hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
795887d75505bc9eb0ef46449b5233ebcd73dfb7 | 2,942 | py | Python | Snippets/Gtk/Python GooCanvas/src/SimpleItem.py | fredmorcos/attic | 0da3b94aa525df59ddc977c32cb71c243ffd0dbd | [
"Unlicense"
] | 2 | 2021-01-24T09:00:51.000Z | 2022-01-23T20:52:17.000Z | Snippets/Gtk/Python GooCanvas/src/SimpleItem.py | fredmorcos/attic | 0da3b94aa525df59ddc977c32cb71c243ffd0dbd | [
"Unlicense"
] | 6 | 2020-02-29T01:59:03.000Z | 2022-02-15T10:25:40.000Z | Snippets/Gtk/Python GooCanvas/src/SimpleItem.py | fredmorcos/attic | 0da3b94aa525df59ddc977c32cb71c243ffd0dbd | [
"Unlicense"
] | 1 | 2019-03-22T14:41:21.000Z | 2019-03-22T14:41:21.000Z | '''
This file is part of Camarillo.
Copyright (C) 2008 Frederic-Gerald Morcos <fred.morcos@gmail.com>
Copyright (C) 2008 Mohammed Hazem <cviruss@gmail.com>
Camarillo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Camarillo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Camarillo. If not, see <http://www.gnu.org/licenses/>.
'''
from goocanvas import Canvas, Rect, Text
from gtk import ANCHOR_CENTER, STATE_PRELIGHT
from pango import SCALE
class SimpleItem(Canvas):
'''
A base class for the Item (quiz, assignment, etc...) and
Slot (on the timeline) classes.
'''
def __init__(self, text, bgColor = '#96abcb', fgColor = '#384a5c'):
Canvas.__init__(self)
root = self.get_root_item()
self.styleBgColor = self.style.bg[STATE_PRELIGHT].to_string()
self.styleFont = self.style.font_desc
self.textString = text
self.bgColor = bgColor
self.fgColor = fgColor
self.bgRect = Rect(
stroke_color = self.styleBgColor,
fill_color = self.styleBgColor,
line_width = 0.0)
self.rect = Rect(
stroke_color = self.fgColor,
fill_color = self.bgColor,
line_width = 2.0,
radius_x = 5,
radius_y = 5)
self.text = Text(
text = self.textString,
anchor = ANCHOR_CENTER,
fill_color = self.fgColor,
font_desc = self.styleFont)
root.add_child(self.bgRect, -1)
root.add_child(self.rect, -1)
root.add_child(self.text, -1)
self.connect('size-allocate', self.size_allocated, None)
self.connect('size-request', self.size_request, None)
def size_allocated(self, widget, allocation, data):
w = allocation.width
h = allocation.height
mid_x = w / 2
mid_y = h / 2
line_width = self.getBorderWidth()
self.bgRect.set_properties(
x = 0,
y = 0,
width = w,
height = h)
self.rect.set_properties(
x = line_width,
y = line_width,
width = w - (line_width * 2),
height = h - (line_width * 2))
self.text.set_properties(
x = mid_x,
y = mid_y)
def size_request(self, widget, requisition, data):
requisition.width = (self.getBorderWidth() * 6) + self.getTextWidth()
requisition.height = (self.getBorderWidth() * 6) + \
(self.fontSizePixels() * 2)
def getBorderWidth(self):
return int(self.rect.get_property('line_width'))
def getTextWidth(self):
return int(len(self.text.get_property('text')) * self.fontSizePixels())
def fontSizePixels(self):
return int(self.text.get_property('font-desc').get_size() / SCALE)
| 29.42 | 73 | 0.689667 |
795888da11aa9efee0d4b805f9b6f3a9e0b5e241 | 210 | py | Python | courses/api/permissions.py | esiebomaj/skillNG | f0133615a7b397205d988b3fc4352d099cc5f166 | [
"MIT"
] | null | null | null | courses/api/permissions.py | esiebomaj/skillNG | f0133615a7b397205d988b3fc4352d099cc5f166 | [
"MIT"
] | 19 | 2020-07-29T21:36:11.000Z | 2022-03-12T00:46:40.000Z | courses/api/permissions.py | esiebomaj/skillNG | f0133615a7b397205d988b3fc4352d099cc5f166 | [
"MIT"
] | null | null | null | from rest_framework.permissions import BasePermission
class IsEnrolled(BasePermission):
def has_object_permission(self, request, view, obj):
return obj.students.filter(id=request.user.id).exists() | 35 | 63 | 0.780952 |
795888f3c3687c8d57b25ce07caea89a087554f6 | 313 | py | Python | oop_advance/use_metaclass.py | LaurenceYang/learn-python | 819994039abd3af298f73b1a73976eaa95071096 | [
"Apache-2.0"
] | 2 | 2018-01-20T03:38:58.000Z | 2019-07-21T11:33:24.000Z | oop_advance/use_metaclass.py | LaurenceYang/learn-python | 819994039abd3af298f73b1a73976eaa95071096 | [
"Apache-2.0"
] | null | null | null | oop_advance/use_metaclass.py | LaurenceYang/learn-python | 819994039abd3af298f73b1a73976eaa95071096 | [
"Apache-2.0"
] | null | null | null | class ListMetaClass(type):
def __new__(cls, name, bases, attrs):
attrs['add'] = lambda self, value : self.append(value)
return type.__new__(cls, name, bases, attrs)
class MyList(list, metaclass=ListMetaClass):
pass
L = MyList()
L.add(1)
L.add(2)
L.add(3)
L.add(4)
L.add('END')
print(L) | 18.411765 | 62 | 0.645367 |
795889f081072bbd6963492e1ec68a9f7134f25d | 12,440 | py | Python | test/opsgenie_swagger/models/thousand_eyes_integration.py | Logicworks/opsgenie-python-sdk | 244c4c40ddcc25e70df5ba4425ab8d7c8da59c18 | [
"Apache-2.0"
] | null | null | null | test/opsgenie_swagger/models/thousand_eyes_integration.py | Logicworks/opsgenie-python-sdk | 244c4c40ddcc25e70df5ba4425ab8d7c8da59c18 | [
"Apache-2.0"
] | null | null | null | test/opsgenie_swagger/models/thousand_eyes_integration.py | Logicworks/opsgenie-python-sdk | 244c4c40ddcc25e70df5ba4425ab8d7c8da59c18 | [
"Apache-2.0"
] | 1 | 2020-11-07T11:27:13.000Z | 2020-11-07T11:27:13.000Z | # coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from opsgenie_swagger.models.integration import Integration # noqa: F401,E501
from opsgenie_swagger.models.recipient import Recipient # noqa: F401,E501
from opsgenie_swagger.models.team_meta import TeamMeta # noqa: F401,E501
from opsgenie_swagger.models.token_based_incoming_feature import TokenBasedIncomingFeature # noqa: F401,E501
class ThousandEyesIntegration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'suppress_notifications': 'bool',
'ignore_teams_from_payload': 'bool',
'ignore_recipients_from_payload': 'bool',
'recipients': 'list[Recipient]',
'is_advanced': 'bool',
'feature_type': 'str',
'allow_configuration_access': 'bool',
'allow_write_access': 'bool'
}
attribute_map = {
'suppress_notifications': 'suppressNotifications',
'ignore_teams_from_payload': 'ignoreTeamsFromPayload',
'ignore_recipients_from_payload': 'ignoreRecipientsFromPayload',
'recipients': 'recipients',
'is_advanced': 'isAdvanced',
'feature_type': 'feature-type',
'allow_configuration_access': 'allowConfigurationAccess',
'allow_write_access': 'allowWriteAccess'
}
def __init__(self, suppress_notifications=None, ignore_teams_from_payload=None, ignore_recipients_from_payload=None, recipients=None, is_advanced=None, feature_type=None, allow_configuration_access=None, allow_write_access=None): # noqa: E501
"""ThousandEyesIntegration - a model defined in Swagger""" # noqa: E501
self._suppress_notifications = None
self._ignore_teams_from_payload = None
self._ignore_recipients_from_payload = None
self._recipients = None
self._is_advanced = None
self._feature_type = None
self._allow_configuration_access = None
self._allow_write_access = None
self.discriminator = None
if suppress_notifications is not None:
self.suppress_notifications = suppress_notifications
if ignore_teams_from_payload is not None:
self.ignore_teams_from_payload = ignore_teams_from_payload
if ignore_recipients_from_payload is not None:
self.ignore_recipients_from_payload = ignore_recipients_from_payload
if recipients is not None:
self.recipients = recipients
if is_advanced is not None:
self.is_advanced = is_advanced
if feature_type is not None:
self.feature_type = feature_type
if allow_configuration_access is not None:
self.allow_configuration_access = allow_configuration_access
if allow_write_access is not None:
self.allow_write_access = allow_write_access
@property
def suppress_notifications(self):
"""Gets the suppress_notifications of this ThousandEyesIntegration. # noqa: E501
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:return: The suppress_notifications of this ThousandEyesIntegration. # noqa: E501
:rtype: bool
"""
return self._suppress_notifications
@suppress_notifications.setter
def suppress_notifications(self, suppress_notifications):
"""Sets the suppress_notifications of this ThousandEyesIntegration.
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:param suppress_notifications: The suppress_notifications of this ThousandEyesIntegration. # noqa: E501
:type: bool
"""
self._suppress_notifications = suppress_notifications
@property
def ignore_teams_from_payload(self):
"""Gets the ignore_teams_from_payload of this ThousandEyesIntegration. # noqa: E501
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_teams_from_payload of this ThousandEyesIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_teams_from_payload
@ignore_teams_from_payload.setter
def ignore_teams_from_payload(self, ignore_teams_from_payload):
"""Sets the ignore_teams_from_payload of this ThousandEyesIntegration.
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:param ignore_teams_from_payload: The ignore_teams_from_payload of this ThousandEyesIntegration. # noqa: E501
:type: bool
"""
self._ignore_teams_from_payload = ignore_teams_from_payload
@property
def ignore_recipients_from_payload(self):
"""Gets the ignore_recipients_from_payload of this ThousandEyesIntegration. # noqa: E501
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_recipients_from_payload of this ThousandEyesIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_recipients_from_payload
@ignore_recipients_from_payload.setter
def ignore_recipients_from_payload(self, ignore_recipients_from_payload):
"""Sets the ignore_recipients_from_payload of this ThousandEyesIntegration.
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:param ignore_recipients_from_payload: The ignore_recipients_from_payload of this ThousandEyesIntegration. # noqa: E501
:type: bool
"""
self._ignore_recipients_from_payload = ignore_recipients_from_payload
@property
def recipients(self):
"""Gets the recipients of this ThousandEyesIntegration. # noqa: E501
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:return: The recipients of this ThousandEyesIntegration. # noqa: E501
:rtype: list[Recipient]
"""
return self._recipients
@recipients.setter
def recipients(self, recipients):
"""Sets the recipients of this ThousandEyesIntegration.
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:param recipients: The recipients of this ThousandEyesIntegration. # noqa: E501
:type: list[Recipient]
"""
self._recipients = recipients
@property
def is_advanced(self):
"""Gets the is_advanced of this ThousandEyesIntegration. # noqa: E501
:return: The is_advanced of this ThousandEyesIntegration. # noqa: E501
:rtype: bool
"""
return self._is_advanced
@is_advanced.setter
def is_advanced(self, is_advanced):
"""Sets the is_advanced of this ThousandEyesIntegration.
:param is_advanced: The is_advanced of this ThousandEyesIntegration. # noqa: E501
:type: bool
"""
self._is_advanced = is_advanced
@property
def feature_type(self):
"""Gets the feature_type of this ThousandEyesIntegration. # noqa: E501
:return: The feature_type of this ThousandEyesIntegration. # noqa: E501
:rtype: str
"""
return self._feature_type
@feature_type.setter
def feature_type(self, feature_type):
"""Sets the feature_type of this ThousandEyesIntegration.
:param feature_type: The feature_type of this ThousandEyesIntegration. # noqa: E501
:type: str
"""
allowed_values = ["email-based", "token-based"] # noqa: E501
if feature_type not in allowed_values:
raise ValueError(
"Invalid value for `feature_type` ({0}), must be one of {1}" # noqa: E501
.format(feature_type, allowed_values)
)
self._feature_type = feature_type
@property
def allow_configuration_access(self):
"""Gets the allow_configuration_access of this ThousandEyesIntegration. # noqa: E501
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:return: The allow_configuration_access of this ThousandEyesIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_configuration_access
@allow_configuration_access.setter
def allow_configuration_access(self, allow_configuration_access):
"""Sets the allow_configuration_access of this ThousandEyesIntegration.
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:param allow_configuration_access: The allow_configuration_access of this ThousandEyesIntegration. # noqa: E501
:type: bool
"""
self._allow_configuration_access = allow_configuration_access
@property
def allow_write_access(self):
"""Gets the allow_write_access of this ThousandEyesIntegration. # noqa: E501
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:return: The allow_write_access of this ThousandEyesIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_write_access
@allow_write_access.setter
def allow_write_access(self, allow_write_access):
"""Sets the allow_write_access of this ThousandEyesIntegration.
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:param allow_write_access: The allow_write_access of this ThousandEyesIntegration. # noqa: E501
:type: bool
"""
self._allow_write_access = allow_write_access
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ThousandEyesIntegration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 39.119497 | 265 | 0.683923 |
79588b412e3ecec0dcabb06078d17b2e965e512c | 2,123 | py | Python | ros_bt_py/setup.py | fzi-forschungszentrum-informatik/ros_bt_py | ed65e2b2f0a03411101f455c0ab38401ba50bada | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 4 | 2022-03-11T14:30:43.000Z | 2022-03-31T07:21:35.000Z | ros_bt_py/setup.py | fzi-forschungszentrum-informatik/ros_bt_py | ed65e2b2f0a03411101f455c0ab38401ba50bada | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | ros_bt_py/setup.py | fzi-forschungszentrum-informatik/ros_bt_py | ed65e2b2f0a03411101f455c0ab38401ba50bada | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -------- BEGIN LICENSE BLOCK --------
# Copyright 2022 FZI Forschungszentrum Informatik
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the {copyright_holder} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -------- END LICENSE BLOCK --------
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
setup_args = generate_distutils_setup(
version='1.0.0',
scripts=[],
packages=['ros_bt_py',
'ros_bt_py.nodes',
'ros_bt_py.nodes.migrations',
'ros_bt_py.ros_nodes',
'ros_bt_py.ros_nodes.migrations'],
package_dir={'': 'src'},
install_requires=[
'requests'
]
)
setup(**setup_args)
| 42.46 | 77 | 0.728686 |
79588b922ef95b4ea2c7a370c20c3a25b7a0e146 | 1,918 | py | Python | goldsrc/mdl_v4/structs/sequence.py | tltneon/SourceIO | 418224918c2b062a4c78a41d4d65329ba2decb22 | [
"MIT"
] | 1 | 2021-07-12T12:55:27.000Z | 2021-07-12T12:55:27.000Z | goldsrc/mdl_v4/structs/sequence.py | syborg64/SourceIO | e4ba86d801f518e192260af08ef533759c2e1cc3 | [
"MIT"
] | null | null | null | goldsrc/mdl_v4/structs/sequence.py | syborg64/SourceIO | e4ba86d801f518e192260af08ef533759c2e1cc3 | [
"MIT"
] | null | null | null | import math
from typing import List
import numpy as np
from ....source_shared.base import Base
from ....utilities.byte_io_mdl import ByteIO
def euler_to_quat(euler):
eulerd = euler[2] * 0.5
v8 = math.sin(eulerd)
v9 = math.cos(eulerd)
eulerd = euler[1] * 0.5
v12 = math.sin(eulerd)
v10 = math.cos(eulerd)
eulerd = euler[0] * 0.5
v11 = math.sin(eulerd)
eulerd = math.cos(eulerd)
v4 = v11 * v10
v5 = eulerd * v12
x = v9 * v4 - v8 * v5
y = v4 * v8 + v5 * v9
v6 = v10 * eulerd
v7 = v11 * v12
z = v8 * v6 - v9 * v7
w = v7 * v8 + v9 * v6
quat = w, x, y, z
return quat
class SequenceFrame:
def __init__(self):
self.sequence_id = 0.0
self.unk = []
self.unk_vec = []
self.animation_per_bone_rot = np.array([])
def read(self, reader: ByteIO, bone_count):
self.sequence_id = reader.read_float()
self.unk = reader.read_fmt('11I')
self.unk_vec = reader.read_fmt('3f')
self.animation_per_bone_rot = np.frombuffer(reader.read(6 * bone_count), dtype=np.int16).astype(np.float32)
self.animation_per_bone_rot *= 0.0001745329354889691
self.animation_per_bone_rot = self.animation_per_bone_rot.reshape((-1, 3))
class StudioSequence(Base):
def __init__(self):
self.name = ''
self.frame_count = 0
self.unk = 0
self.frame_helpers: List[SequenceFrame] = []
self.frames = []
def read(self, reader: ByteIO):
self.name = reader.read_ascii_string(32)
self.frame_count = reader.read_int32()
self.unk = reader.read_int32()
def read_anim_values(self, reader, bone_count):
for _ in range(self.frame_count):
frame = SequenceFrame()
frame.read(reader, bone_count)
self.frame_helpers.append(frame)
self.frames.append(frame.animation_per_bone_rot)
| 28.626866 | 115 | 0.613139 |
79588c3f969326716acbf5fae00ef28966606f03 | 108,536 | py | Python | pygeoapi/api.py | Canadian-Geospatial-Platform/pygeoapi | 56ccf43d25a9e06ec351a57d0ebc73dc09110013 | [
"MIT"
] | 3 | 2021-02-22T13:19:14.000Z | 2021-06-04T17:51:13.000Z | pygeoapi/api.py | Canadian-Geospatial-Platform/pygeoapi | 56ccf43d25a9e06ec351a57d0ebc73dc09110013 | [
"MIT"
] | null | null | null | pygeoapi/api.py | Canadian-Geospatial-Platform/pygeoapi | 56ccf43d25a9e06ec351a57d0ebc73dc09110013 | [
"MIT"
] | null | null | null | # =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
# Francesco Bartoli <xbartolone@gmail.com>
# Sander Schaminee <sander.schaminee@geocat.net>
#
# Copyright (c) 2021 Tom Kralidis
# Copyright (c) 2020 Francesco Bartoli
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
""" Root level code of pygeoapi, parsing content provided by web framework.
Returns content from plugins and sets responses.
"""
from datetime import datetime, timezone
from functools import partial
import json
import logging
import os
import uuid
import re
import urllib.parse
from copy import deepcopy
from typing import Union, Any
from collections import OrderedDict
from dateutil.parser import parse as dateparse
from shapely.wkt import loads as shapely_loads
from shapely.errors import WKTReadingError
import pytz
from pygeoapi import __version__
from pygeoapi import l10n
from pygeoapi.linked_data import (geojson2geojsonld, jsonldify,
jsonldify_collection)
from pygeoapi.log import setup_logger
from pygeoapi.process.base import (
ProcessorExecuteError
)
from pygeoapi.plugin import load_plugin, PLUGINS
from pygeoapi.provider.base import (
ProviderGenericError, ProviderConnectionError, ProviderNotFoundError,
ProviderInvalidQueryError, ProviderNoDataError, ProviderQueryError,
ProviderItemNotFoundError, ProviderTypeError)
from pygeoapi.provider.tile import (ProviderTileNotFoundError,
ProviderTileQueryError,
ProviderTilesetIdNotFoundError)
from pygeoapi.util import (dategetter, DATETIME_FORMAT,
filter_dict_by_key_value, get_provider_by_type,
get_provider_default, get_typed_value, JobStatus,
json_serial, render_j2_template, str2bool,
TEMPLATES, to_json)
LOGGER = logging.getLogger(__name__)
#: Return headers for requests (e.g:X-Powered-By)
HEADERS = {
'Content-Type': 'application/json',
'X-Powered-By': 'pygeoapi {}'.format(__version__)
}
#: Formats allowed for ?f= requests
FORMATS = OrderedDict((
('html', 'text/html'),
('jsonld', 'application/ld+json'),
('json', 'application/json'),
))
CONFORMANCE = [
'http://www.opengis.net/spec/ogcapi-common-1/1.0/conf/core',
'http://www.opengis.net/spec/ogcapi-common-2/1.0/conf/collections',
'http://www.opengis.net/spec/ogcapi-features-1/1.0/conf/core',
'http://www.opengis.net/spec/ogcapi-features-1/1.0/conf/oas30',
'http://www.opengis.net/spec/ogcapi-features-1/1.0/conf/html',
'http://www.opengis.net/spec/ogcapi-features-1/1.0/conf/geojson',
'http://www.opengis.net/spec/ogcapi_coverages-1/1.0/conf/core',
'http://www.opengis.net/spec/ogcapi-coverages-1/1.0/conf/oas30',
'http://www.opengis.net/spec/ogcapi-coverages-1/1.0/conf/html',
'http://www.opengis.net/spec/ogcapi-tiles-1/1.0/req/core',
'http://www.opengis.net/spec/ogcapi-tiles-1/1.0/req/collections',
'http://www.opengis.net/spec/ogcapi-records-1/1.0/conf/core',
'http://www.opengis.net/spec/ogcapi-records-1/1.0/conf/sorting',
'http://www.opengis.net/spec/ogcapi-records-1/1.0/conf/opensearch',
'http://www.opengis.net/spec/ogcapi-records-1/1.0/conf/json',
'http://www.opengis.net/spec/ogcapi-records-1/1.0/conf/html',
'http://www.opengis.net/spec/ogcapi-edr-1/1.0/conf/core'
]
OGC_RELTYPES_BASE = 'http://www.opengis.net/def/rel/ogc/1.0'
def pre_process(func):
""" Decorator that transforms the incoming Request instance specific to the
web framework (Flask, Starlette) into an APIRequest instance.
:param func: decorated function
:returns: `func`
"""
def inner(*args):
cls = args[0]
req = APIRequest(args[1], getattr(cls, 'locales', set()))
if len(args) > 2:
args = args[2:]
return func(cls, req, *args)
else:
return func(cls, req)
return inner
class APIRequest:
""" Transforms an incoming server-specific Request into an object
with some generic helper methods and properties.
:param request: The web platform specific Request instance.
:param supported_locales: List or set of supported Locale instances.
"""
def __init__(self, request, supported_locales):
# Get request data (if any)
self._data = getattr(request, 'data', None) or None
# Copy request query parameters
self._args = self._get_params(request)
# Get path info
self._path_info = request.headers.environ['PATH_INFO'].strip('/')
# Extract locale from params or headers
self._raw_locale, self._locale = self._get_locale(request.headers,
supported_locales)
# Determine format
self._format = self._get_format(request.headers)
@staticmethod
def _get_params(request):
""" Extracts the query parameters from the Request object.
:param request: A Flask or Starlette Request instance
:returns: ImmutableMultiDict or empty dict
"""
if hasattr(request, 'args'):
# Return ImmutableMultiDict from Flask request
return request.args
elif hasattr(request, 'query_params'):
# Return ImmutableMultiDict from Starlette request
return request.query_params
LOGGER.debug('No query parameters found')
return {}
def _get_locale(self, headers, supported_locales):
""" Detects locale from "l=<language>" param or Accept-Language header.
Returns a tuple of (raw, locale) if found in params or headers.
Returns a tuple of (raw default, default locale) if not found.
:param headers: A dict with Request headers
:param supported_locales: List or set of supported Locale instances
:returns: A tuple of (str, Locale)
"""
raw = None
try:
default_locale = l10n.str2locale(supported_locales[0])
default_str = l10n.locale2str(default_locale)
except (TypeError, IndexError, l10n.LocaleError) as err:
# This should normally not happen, since the API class already
# loads the supported languages from the config, which raises
# a LocaleError if any of these languages are invalid.
LOGGER.error(err)
raise ValueError(f"{self.__class__.__name__} must be initialized"
f"with a list of valid supported locales")
for func, mapping in ((l10n.locale_from_params, self._args),
(l10n.locale_from_headers, headers)):
loc_str = func(mapping)
if loc_str:
if not raw:
# This is the first-found locale string: set as raw
raw = loc_str
# Check of locale string is a good match for the UI
loc = l10n.best_match(loc_str, supported_locales)
is_override = func is l10n.locale_from_params
if loc != default_locale or is_override:
return raw, loc
return raw or default_str, default_locale
def _get_format(self, headers) -> Union[str, None]:
"""
Get Request format type from query parameters or headers.
:param headers: Dict of Request headers
:returns: format value or None if not found/specified
"""
# Optional f=html or f=json query param
# Overrides Accept header and might differ from FORMATS
format_ = (self._args.get('f') or '').strip()
if format_:
return format_
# Format not specified: get from Accept headers (MIME types)
# e.g. format_ = 'text/html'
for h in (v.strip() for k, v in headers.items() if k.lower() == 'accept'): # noqa
for fmt, mime in FORMATS.items():
# basic support for complex types (i.e. with "q=0.x")
types_ = (t.split(';')[0].strip() for t in h.split(',') if t)
if mime.strip() in types_:
format_ = fmt
break
return format_ or None
@property
def data(self):
""" Returns the additional data send with the Request. """
return self._data
@property
def params(self):
""" Returns the Request query parameters dict. """
return self._args
@property
def path_info(self):
""" Returns the web server request path info part. """
return self._path_info
@property
def locale(self) -> l10n.Locale:
""" Returns the user-defined locale from the request object.
If no locale has been defined or if it is invalid,
the default server locale is returned.
.. note:: The locale here determines the language in which pygeoapi
should return its responses. This may not be the language
that the user requested. It may also not be the language
that is supported by a collection provider, for example.
For this reason, you should pass the `raw_locale` property
to all plugins, so (e.g.) providers can determine the best
matching locale themselves.
:returns: babel.core.Locale
"""
return self._locale
@property
def raw_locale(self) -> str:
""" Returns the raw locale string from the Request object.
If no "l" query parameter or Accept-Language header was found,
the first (= default) language tag from the server configuration is
returned.
Pass this value to the :func:`load_plugin` function to let the plugin
determine a best match for the locale, which may be different from the
locale used by pygeoapi's UI.
:returns: a locale string
"""
return self._raw_locale
@property
def format(self) -> Union[str, None]:
""" Returns the content type format from the
request query parameters or headers.
:returns: Format name or None
"""
return self._format
def is_valid(self, additional_formats=None) -> bool:
""" Returns True if:
- the format is not set (None)
- the requested format is supported
- the requested format exists in a list if additional formats
:param additional_formats: Optional additional supported formats list
:returns: A boolean
"""
if not self._format:
return True
if self._format in FORMATS.keys():
return True
if self._format in (f.lower() for f in (additional_formats or ())):
return True
return False
def get_response_headers(self, content_type: str = None,
locale: l10n.Locale = None) -> dict:
""" Prepares and returns a dictionary with Response object headers.
Adds a 'Content-Language' header if the Request had a valid
language query parameter or if a `locale` override is provided.
If the user does not specify `content_type`, the 'Content-Type` header
is based on the `format` property. If that is invalid, the default
'application/json' is used.
:param content_type: An optional Content-Type header override.
:param locale: An optional Locale for the Content-Language.
:returns: A header dict
"""
headers = HEADERS.copy()
# Always add a Content-Language response header:
# use user-override if specified
response_loc = locale if locale else self._locale
l10n.set_response_language(headers, response_loc)
if content_type:
# Set custom MIME type if specified
headers['Content-Type'] = content_type
elif self.is_valid() and self.format:
# Set MIME type for valid formats
headers['Content-Type'] = FORMATS[self.format]
return headers
class API:
"""API object"""
def __init__(self, config):
"""
constructor
:param config: configuration dict
:returns: `pygeoapi.API` instance
"""
self.config = config
self.config['server']['url'] = self.config['server']['url'].rstrip('/')
# Process language settings
self.locales = l10n.get_locales(config)
self.default_locale = self.locales[0]
if 'templates' not in self.config['server']:
self.config['server']['templates'] = TEMPLATES
if 'pretty_print' not in self.config['server']:
self.config['server']['pretty_print'] = False
self.pretty_print = self.config['server']['pretty_print']
setup_logger(self.config['logging'])
# TODO: add as decorator
if 'manager' in self.config['server']:
manager_def = self.config['server']['manager']
else:
LOGGER.info('No process manager defined; starting dummy manager')
manager_def = {
'name': 'Dummy',
'connection': None,
'output_dir': None
}
LOGGER.debug('Loading process manager {}'.format(manager_def['name']))
self.manager = load_plugin('process_manager', manager_def)
LOGGER.info('Process manager plugin loaded')
@pre_process
@jsonldify
def landing_page(self, request: Union[APIRequest, Any]):
"""
Provide API
:param request: A request object
:returns: tuple of headers, status code, content
"""
if not request.is_valid():
return self.get_format_exception(request)
fcm = {
'links': [],
'title': l10n.translate(
self.config['metadata']['identification']['title'],
request.locale),
'description':
l10n.translate(
self.config['metadata']['identification']['description'],
request.locale)
}
LOGGER.debug('Creating links')
# TODO: put title text in config or translatable files?
fcm['links'] = [{
'rel': 'self' if request.format in (None, 'json') else 'alternate',
'type': FORMATS['json'],
'title': 'This document as JSON',
'href': '{}?f=json'.format(self.config['server']['url'])
}, {
'rel': 'self' if request.format == 'jsonld' else 'alternate',
'type': FORMATS['jsonld'],
'title': 'This document as RDF (JSON-LD)',
'href': '{}?f=jsonld'.format(self.config['server']['url'])
}, {
'rel': 'self' if request.format == 'html' else 'alternate',
'type': FORMATS['html'],
'title': 'This document as HTML',
'href': '{}?f=html'.format(self.config['server']['url']),
'hreflang': self.default_locale
}, {
'rel': 'service-desc',
'type': 'application/vnd.oai.openapi+json;version=3.0',
'title': 'The OpenAPI definition as JSON',
'href': '{}/openapi'.format(self.config['server']['url'])
}, {
'rel': 'service-doc',
'type': 'text/html',
'title': 'The OpenAPI definition as HTML',
'href': '{}/openapi?f=html'.format(self.config['server']['url']),
'hreflang': self.default_locale
}, {
'rel': 'conformance',
'type': 'application/json',
'title': 'Conformance',
'href': '{}/conformance'.format(self.config['server']['url'])
}, {
'rel': 'data',
'type': 'application/json',
'title': 'Collections',
'href': '{}/collections'.format(self.config['server']['url'])
}]
headers = request.get_response_headers()
if request.format == 'html': # render
fcm['processes'] = False
fcm['stac'] = False
if filter_dict_by_key_value(self.config['resources'],
'type', 'process'):
fcm['processes'] = True
if filter_dict_by_key_value(self.config['resources'],
'type', 'stac-collection'):
fcm['stac'] = True
content = render_j2_template(self.config, 'landing_page.html', fcm,
request.locale)
return headers, 200, content
if request.format == 'jsonld':
return headers, 200, to_json(self.fcmld, self.pretty_print) # noqa
return headers, 200, to_json(fcm, self.pretty_print)
@pre_process
def openapi(self, request: Union[APIRequest, Any], openapi):
"""
Provide OpenAPI document
:param request: A request object
:param openapi: dict of OpenAPI definition
:returns: tuple of headers, status code, content
"""
if not request.is_valid():
return self.get_format_exception(request)
headers = request.get_response_headers()
if request.format == 'html':
path = '/'.join([self.config['server']['url'].rstrip('/'),
'openapi'])
data = {
'openapi-document-path': path
}
content = render_j2_template(self.config, 'openapi.html', data,
request.locale)
return headers, 200, content
headers['Content-Type'] = 'application/vnd.oai.openapi+json;version=3.0' # noqa
if isinstance(openapi, dict):
return headers, 200, to_json(openapi, self.pretty_print)
else:
return headers, 200, openapi.read()
@pre_process
def conformance(self, request: Union[APIRequest, Any]):
"""
Provide conformance definition
:param request: A request object
:returns: tuple of headers, status code, content
"""
if not request.is_valid():
return self.get_format_exception(request)
conformance = {
'conformsTo': CONFORMANCE
}
headers = request.get_response_headers()
if request.format == 'html': # render
content = render_j2_template(self.config, 'conformance.html',
conformance, request.locale)
return headers, 200, content
return headers, 200, to_json(conformance, self.pretty_print)
@pre_process
@jsonldify
def describe_collections(self, request: Union[APIRequest, Any], dataset=None): # noqa
"""
Provide collection metadata
:param request: A request object
:param dataset: name of collection
:returns: tuple of headers, status code, content
"""
if not request.is_valid():
return self.get_format_exception(request)
headers = request.get_response_headers()
fcm = {
'collections': [],
'links': []
}
# Used to store provider locale (if supported)
prv_locale = None
collections = filter_dict_by_key_value(self.config['resources'],
'type', 'collection')
if all([dataset is not None, dataset not in collections.keys()]):
msg = 'Invalid collection'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
LOGGER.debug('Creating collections')
for k, v in collections.items():
collection_data = get_provider_default(v['providers'])
collection_data_type = collection_data['type']
collection_data_format = None
if 'format' in collection_data:
collection_data_format = collection_data['format']
collection = {
'id': k,
'title': l10n.translate(v['title'], request.locale),
'description': l10n.translate(v['description'], request.locale), # noqa
'keywords': l10n.translate(v['keywords'], request.locale),
'links': []
}
bbox = v['extents']['spatial']['bbox']
# The output should be an array of bbox, so if the user only
# provided a single bbox, wrap it in a array.
if not isinstance(bbox[0], list):
bbox = [bbox]
collection['extent'] = {
'spatial': {
'bbox': bbox
}
}
if 'crs' in v['extents']['spatial']:
collection['extent']['spatial']['crs'] = \
v['extents']['spatial']['crs']
t_ext = v.get('extents', {}).get('temporal', {})
if t_ext:
begins = dategetter('begin', t_ext)
ends = dategetter('end', t_ext)
collection['extent']['temporal'] = {
'interval': [[begins, ends]]
}
if 'trs' in t_ext:
collection['extent']['temporal']['trs'] = t_ext['trs']
for link in l10n.translate(v['links'], request.locale):
lnk = {
'type': link['type'],
'rel': link['rel'],
'title': link['title'],
'href': link['href']
}
if 'hreflang' in link:
lnk['hreflang'] = link['hreflang']
collection['links'].append(lnk)
LOGGER.debug('Adding JSON and HTML link relations')
collection['links'].append({
'type': 'application/json',
'rel': 'self' if not request.format
or request.format == 'json' else 'alternate',
'title': 'This document as JSON',
'href': '{}/collections/{}?f=json'.format(
self.config['server']['url'], k)
})
collection['links'].append({
'type': 'application/ld+json',
'rel': 'self' if request.format == 'jsonld' else 'alternate',
'title': 'This document as RDF (JSON-LD)',
'href': '{}/collections/{}?f=jsonld'.format(
self.config['server']['url'], k)
})
collection['links'].append({
'type': 'text/html',
'rel': 'self' if request.format == 'html' else 'alternate',
'title': 'This document as HTML',
'href': '{}/collections/{}?f=html'.format(
self.config['server']['url'], k)
})
if collection_data_type in ['feature', 'record']:
collection['itemType'] = collection_data_type
LOGGER.debug('Adding feature/record based links')
collection['links'].append({
'type': 'application/json',
'rel': 'queryables',
'title': 'Queryables for this collection as JSON',
'href': '{}/collections/{}/queryables?f=json'.format(
self.config['server']['url'], k)
})
collection['links'].append({
'type': 'text/html',
'rel': 'queryables',
'title': 'Queryables for this collection as HTML',
'href': '{}/collections/{}/queryables?f=html'.format(
self.config['server']['url'], k)
})
collection['links'].append({
'type': 'application/geo+json',
'rel': 'items',
'title': 'items as GeoJSON',
'href': '{}/collections/{}/items?f=json'.format(
self.config['server']['url'], k)
})
collection['links'].append({
'type': 'application/ld+json',
'rel': 'items',
'title': 'items as RDF (GeoJSON-LD)',
'href': '{}/collections/{}/items?f=jsonld'.format(
self.config['server']['url'], k)
})
collection['links'].append({
'type': 'text/html',
'rel': 'items',
'title': 'Items as HTML',
'href': '{}/collections/{}/items?f=html'.format(
self.config['server']['url'], k)
})
elif collection_data_type == 'coverage':
LOGGER.debug('Adding coverage based links')
collection['links'].append({
'type': 'application/json',
'rel': 'collection',
'title': 'Detailed Coverage metadata in JSON',
'href': '{}/collections/{}?f=json'.format(
self.config['server']['url'], k)
})
collection['links'].append({
'type': 'text/html',
'rel': 'collection',
'title': 'Detailed Coverage metadata in HTML',
'href': '{}/collections/{}?f=html'.format(
self.config['server']['url'], k)
})
coverage_url = '{}/collections/{}/coverage'.format(
self.config['server']['url'], k)
collection['links'].append({
'type': 'application/json',
'rel': '{}/coverage-domainset'.format(OGC_RELTYPES_BASE),
'title': 'Coverage domain set of collection in JSON',
'href': '{}/domainset?f=json'.format(coverage_url)
})
collection['links'].append({
'type': 'text/html',
'rel': '{}/coverage-domainset'.format(OGC_RELTYPES_BASE),
'title': 'Coverage domain set of collection in HTML',
'href': '{}/domainset?f=html'.format(coverage_url)
})
collection['links'].append({
'type': 'application/json',
'rel': '{}/coverage-rangetype'.format(OGC_RELTYPES_BASE),
'title': 'Coverage range type of collection in JSON',
'href': '{}/rangetype?f=json'.format(coverage_url)
})
collection['links'].append({
'type': 'text/html',
'rel': '{}/coverage-rangetype'.format(OGC_RELTYPES_BASE),
'title': 'Coverage range type of collection in HTML',
'href': '{}/rangetype?f=html'.format(coverage_url)
})
collection['links'].append({
'type': 'application/prs.coverage+json',
'rel': '{}/coverage'.format(OGC_RELTYPES_BASE),
'title': 'Coverage data',
'href': '{}/collections/{}/coverage?f=json'.format(
self.config['server']['url'], k)
})
if collection_data_format is not None:
collection['links'].append({
'type': collection_data_format['mimetype'],
'rel': '{}/coverage'.format(OGC_RELTYPES_BASE),
'title': 'Coverage data as {}'.format(
collection_data_format['name']),
'href': '{}/collections/{}/coverage?f={}'.format(
self.config['server']['url'], k,
collection_data_format['name'])
})
if dataset is not None:
LOGGER.debug('Creating extended coverage metadata')
try:
provider_def = get_provider_by_type(
self.config['resources'][k]['providers'],
'coverage')
p = load_plugin('provider', provider_def)
except ProviderConnectionError:
msg = 'connection error (check logs)'
return self.get_exception(500, headers, request.format,
'NoApplicableCode', msg)
except ProviderTypeError:
pass
else:
# Get provider language (if any)
prv_locale = l10n.get_plugin_locale(provider_def,
request.raw_locale)
collection['crs'] = [p.crs]
collection['domainset'] = p.get_coverage_domainset(
language=prv_locale)
collection['rangetype'] = p.get_coverage_rangetype(
language=prv_locale)
try:
tile = get_provider_by_type(v['providers'], 'tile')
except ProviderTypeError:
tile = None
if tile:
LOGGER.debug('Adding tile links')
collection['links'].append({
'type': 'application/json',
'rel': 'tiles',
'title': 'Tiles as JSON',
'href': '{}/collections/{}/tiles?f=json'.format(
self.config['server']['url'], k)
})
collection['links'].append({
'type': 'text/html',
'rel': 'tiles',
'title': 'Tiles as HTML',
'href': '{}/collections/{}/tiles?f=html'.format(
self.config['server']['url'], k)
})
try:
edr = get_provider_by_type(v['providers'], 'edr')
except ProviderTypeError:
edr = None
if edr and dataset is not None:
LOGGER.debug('Adding EDR links')
try:
p = load_plugin('provider', get_provider_by_type(
self.config['resources'][dataset]['providers'], 'edr'))
parameters = p.get_fields()
if parameters:
collection['parameters'] = {}
for f in parameters['field']:
collection['parameters'][f['id']] = f
for qt in p.get_query_types():
collection['links'].append({
'type': 'text/json',
'rel': 'data',
'title': '{} query for this collection as JSON'.format(qt), # noqa
'href': '{}/collections/{}/{}?f=json'.format(
self.config['server']['url'], k, qt)
})
collection['links'].append({
'type': 'text/html',
'rel': 'data',
'title': '{} query for this collection as HTML'.format(qt), # noqa
'href': '{}/collections/{}/{}?f=html'.format(
self.config['server']['url'], k, qt)
})
except ProviderConnectionError:
msg = 'connection error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
except ProviderTypeError:
pass
if dataset is not None and k == dataset:
fcm = collection
break
fcm['collections'].append(collection)
if dataset is None:
fcm['links'].append({
'type': 'application/json',
'rel': 'self' if not format
or request.format == 'json' else 'alternate',
'title': 'This document as JSON',
'href': '{}/collections?f=json'.format(
self.config['server']['url'])
})
fcm['links'].append({
'type': 'application/ld+json',
'rel': 'self' if request.format == 'jsonld' else 'alternate',
'title': 'This document as RDF (JSON-LD)',
'href': '{}/collections?f=jsonld'.format(
self.config['server']['url'])
})
fcm['links'].append({
'type': 'text/html',
'rel': 'self' if request.format == 'html' else 'alternate',
'title': 'This document as HTML',
'href': '{}/collections?f=html'.format(
self.config['server']['url'])
})
if request.format == 'html': # render
l10n.set_response_language(headers, prv_locale)
if dataset is not None:
content = render_j2_template(self.config,
'collections/collection.html',
fcm, request.locale)
else:
content = render_j2_template(self.config,
'collections/index.html', fcm,
request.locale)
return headers, 200, content
if request.format == 'jsonld':
l10n.set_response_language(headers, prv_locale, True)
jsonld = self.fcmld.copy() # noqa
if dataset is not None:
jsonld['dataset'] = jsonldify_collection(self, fcm,
request.locale)
else:
jsonld['dataset'] = [
jsonldify_collection(self, c, request.locale)
for c in fcm.get('collections', [])
]
return headers, 200, to_json(jsonld, self.pretty_print)
l10n.set_response_language(headers, prv_locale, True)
return headers, 200, to_json(fcm, self.pretty_print)
@pre_process
@jsonldify
def get_collection_queryables(self, request: Union[APIRequest, Any], dataset=None): # noqa
"""
Provide collection queryables
:param request: A request object
:param dataset: name of collection
:returns: tuple of headers, status code, content
"""
if not request.is_valid():
return self.get_format_exception(request)
headers = request.get_response_headers()
if any([dataset is None,
dataset not in self.config['resources'].keys()]):
msg = 'Invalid collection'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
LOGGER.debug('Creating collection queryables')
try:
LOGGER.debug('Loading feature provider')
p = load_plugin('provider', get_provider_by_type(
self.config['resources'][dataset]['providers'], 'feature'))
except ProviderTypeError:
LOGGER.debug('Loading record provider')
p = load_plugin('provider', get_provider_by_type(
self.config['resources'][dataset]['providers'], 'record'))
except ProviderConnectionError:
msg = 'connection error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
except ProviderQueryError:
msg = 'query error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
queryables = {
'type': 'object',
'title': self.config['resources'][dataset]['title'],
'properties': {},
'$schema': 'http://json-schema.org/draft/2019-09/schema',
'$id': '{}/collections/{}/queryables'.format(
self.config['server']['url'], dataset)
}
for k, v in p.fields.items():
show_field = False
if p.properties:
if k in p.properties:
show_field = True
else:
show_field = True
if show_field:
queryables['properties'][k] = {
'title': k,
'type': v['type']
}
if 'values' in v:
queryables['properties'][k]['enum'] = v['values']
if request.format == 'html': # render
queryables['title'] = l10n.translate(
self.config['resources'][dataset]['title'], request.locale)
content = render_j2_template(self.config,
'collections/queryables.html',
queryables, request.locale)
return headers, 200, content
return headers, 200, to_json(queryables, self.pretty_print)
@pre_process
def get_collection_items(self, request: Union[APIRequest, Any], dataset, pathinfo=None): # noqa
"""
Queries collection
:param request: A request object
:param dataset: dataset name
:param pathinfo: path location
:returns: tuple of headers, status code, content
"""
if not request.is_valid(PLUGINS['formatter'].keys()):
return self.get_format_exception(request)
headers = request.get_response_headers()
properties = []
reserved_fieldnames = ['bbox', 'f', 'l', 'limit', 'startindex',
'resulttype', 'datetime', 'sortby',
'properties', 'skipGeometry', 'q']
collections = filter_dict_by_key_value(self.config['resources'],
'type', 'collection')
if dataset not in collections.keys():
msg = 'Invalid collection'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
LOGGER.debug('Processing query parameters')
LOGGER.debug('Processing startindex parameter')
try:
startindex = int(request.params.get('startindex'))
if startindex < 0:
msg = 'startindex value should be positive or zero'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
except TypeError as err:
LOGGER.warning(err)
startindex = 0
except ValueError:
msg = 'startindex value should be an integer'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
LOGGER.debug('Processing limit parameter')
try:
limit = int(request.params.get('limit'))
# TODO: We should do more validation, against the min and max
# allowed by the server configuration
if limit <= 0:
msg = 'limit value should be strictly positive'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
except TypeError as err:
LOGGER.warning(err)
limit = int(self.config['server']['limit'])
except ValueError:
msg = 'limit value should be an integer'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
resulttype = request.params.get('resulttype') or 'results'
LOGGER.debug('Processing bbox parameter')
bbox = request.params.get('bbox')
if bbox is None:
bbox = []
else:
try:
bbox = validate_bbox(bbox)
except ValueError as err:
msg = str(err)
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
LOGGER.debug('Processing datetime parameter')
datetime_ = request.params.get('datetime')
try:
datetime_ = validate_datetime(collections[dataset]['extents'],
datetime_)
except ValueError as err:
msg = str(err)
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
LOGGER.debug('processing q parameter')
q = request.params.get('q') or None
LOGGER.debug('Loading provider')
try:
provider_def = get_provider_by_type(
collections[dataset]['providers'], 'feature')
p = load_plugin('provider', provider_def)
except ProviderTypeError:
try:
provider_def = get_provider_by_type(
collections[dataset]['providers'], 'record')
p = load_plugin('provider', provider_def)
except ProviderTypeError:
msg = 'Invalid provider type'
return self.get_exception(
400, headers, request.format, 'NoApplicableCode', msg)
except ProviderConnectionError:
msg = 'connection error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
except ProviderQueryError:
msg = 'query error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
LOGGER.debug('processing property parameters')
for k, v in request.params.items():
if k not in reserved_fieldnames and k not in p.fields.keys():
msg = 'unknown query parameter: {}'.format(k)
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
elif k not in reserved_fieldnames and k in p.fields.keys():
LOGGER.debug('Add property filter {}={}'.format(k, v))
properties.append((k, v))
LOGGER.debug('processing sort parameter')
val = request.params.get('sortby')
if val is not None:
sortby = []
sorts = val.split(',')
for s in sorts:
prop = s
order = '+'
if s[0] in ['+', '-']:
order = s[0]
prop = s[1:]
if prop not in p.fields.keys():
msg = 'bad sort property'
return self.get_exception(
400, headers, request.format,
'InvalidParameterValue', msg)
sortby.append({'property': prop, 'order': order})
else:
sortby = []
LOGGER.debug('processing properties parameter')
val = request.params.get('properties')
if val is not None:
select_properties = val.split(',')
properties_to_check = set(p.properties) | set(p.fields.keys())
if (len(list(set(select_properties) -
set(properties_to_check))) > 0):
msg = 'unknown properties specified'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
else:
select_properties = []
LOGGER.debug('processing skipGeometry parameter')
val = request.params.get('skipGeometry')
if val is not None:
skip_geometry = str2bool(val)
else:
skip_geometry = False
# Get provider locale (if any)
prv_locale = l10n.get_plugin_locale(provider_def, request.raw_locale)
LOGGER.debug('Querying provider')
LOGGER.debug('startindex: {}'.format(startindex))
LOGGER.debug('limit: {}'.format(limit))
LOGGER.debug('resulttype: {}'.format(resulttype))
LOGGER.debug('sortby: {}'.format(sortby))
LOGGER.debug('bbox: {}'.format(bbox))
LOGGER.debug('datetime: {}'.format(datetime_))
LOGGER.debug('properties: {}'.format(select_properties))
LOGGER.debug('skipGeometry: {}'.format(skip_geometry))
LOGGER.debug('language: {}'.format(prv_locale))
LOGGER.debug('q: {}'.format(q))
try:
content = p.query(startindex=startindex, limit=limit,
resulttype=resulttype, bbox=bbox,
datetime_=datetime_, properties=properties,
sortby=sortby,
select_properties=select_properties,
skip_geometry=skip_geometry,
q=q, language=prv_locale)
except ProviderConnectionError as err:
LOGGER.error(err)
msg = 'connection error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
except ProviderQueryError as err:
LOGGER.error(err)
msg = 'query error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
except ProviderGenericError as err:
LOGGER.error(err)
msg = 'generic error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
serialized_query_params = ''
for k, v in request.params.items():
if k not in ('f', 'startindex'):
serialized_query_params += '&'
serialized_query_params += urllib.parse.quote(k, safe='')
serialized_query_params += '='
serialized_query_params += urllib.parse.quote(str(v), safe=',')
content['links'] = [{
'type': 'application/geo+json',
'rel': 'self' if request.format in (None, 'json') else 'alternate',
'title': 'This document as GeoJSON',
'href': '{}/collections/{}/items?f=json{}'.format(
self.config['server']['url'], dataset, serialized_query_params)
}, {
'rel': 'self' if request.format == 'jsonld' else 'alternate',
'type': 'application/ld+json',
'title': 'This document as RDF (JSON-LD)',
'href': '{}/collections/{}/items?f=jsonld{}'.format(
self.config['server']['url'], dataset, serialized_query_params)
}, {
'type': 'text/html',
'rel': 'self' if request.format == 'html' else 'alternate',
'title': 'This document as HTML',
'href': '{}/collections/{}/items?f=html{}'.format(
self.config['server']['url'], dataset, serialized_query_params)
}]
if startindex > 0:
prev = max(0, startindex - limit)
content['links'].append(
{
'type': 'application/geo+json',
'rel': 'prev',
'title': 'items (prev)',
'href': '{}/collections/{}/items?startindex={}{}'
.format(self.config['server']['url'], dataset, prev,
serialized_query_params)
})
if len(content['features']) == limit:
next_ = startindex + limit
content['links'].append(
{
'type': 'application/geo+json',
'rel': 'next',
'title': 'items (next)',
'href': '{}/collections/{}/items?startindex={}{}'
.format(
self.config['server']['url'], dataset, next_,
serialized_query_params)
})
content['links'].append(
{
'type': 'application/json',
'title': l10n.translate(
collections[dataset]['title'], request.locale),
'rel': 'collection',
'href': '{}/collections/{}'.format(
self.config['server']['url'], dataset)
})
content['timeStamp'] = datetime.utcnow().strftime(
'%Y-%m-%dT%H:%M:%S.%fZ')
if request.format == 'html': # render
l10n.set_response_language(headers, prv_locale)
# For constructing proper URIs to items
if pathinfo:
path_info = '/'.join([
self.config['server']['url'].rstrip('/'),
pathinfo.strip('/')])
else:
path_info = '/'.join([
self.config['server']['url'].rstrip('/'),
request.path_info])
content['items_path'] = path_info
content['dataset_path'] = '/'.join(path_info.split('/')[:-1])
content['collections_path'] = '/'.join(path_info.split('/')[:-2])
content['startindex'] = startindex
if p.title_field is not None:
content['title_field'] = p.title_field
content['id_field'] = p.title_field
content = render_j2_template(self.config,
'collections/items/index.html',
content, request.locale)
return headers, 200, content
elif request.format == 'csv': # render
l10n.set_response_language(headers, prv_locale)
formatter = load_plugin('formatter',
{'name': 'CSV', 'geom': True})
content = formatter.write(
data=content,
options={
'provider_def': get_provider_by_type(
collections[dataset]['providers'],
'feature')
}
)
headers['Content-Type'] = '{}; charset={}'.format(
formatter.mimetype, self.config['server']['encoding'])
cd = 'attachment; filename="{}.csv"'.format(dataset)
headers['Content-Disposition'] = cd
return headers, 200, content
elif request.format == 'jsonld':
l10n.set_response_language(headers, prv_locale, True)
content = geojson2geojsonld(self.config, content, dataset)
return headers, 200, content
l10n.set_response_language(headers, prv_locale, True)
return headers, 200, to_json(content, self.pretty_print)
@pre_process
def get_collection_item(self, request: Union[APIRequest, Any], dataset, identifier): # noqa
"""
Get a single collection item
:param request: A request object
:param dataset: dataset name
:param identifier: item identifier
:returns: tuple of headers, status code, content
"""
if not request.is_valid():
return self.get_format_exception(request)
headers = request.get_response_headers()
LOGGER.debug('Processing query parameters')
collections = filter_dict_by_key_value(self.config['resources'],
'type', 'collection')
if dataset not in collections.keys():
msg = 'Invalid collection'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
LOGGER.debug('Loading provider')
try:
provider_def = get_provider_by_type(
collections[dataset]['providers'], 'feature')
p = load_plugin('provider', provider_def)
except ProviderTypeError:
try:
provider_def = get_provider_by_type(
collections[dataset]['providers'], 'record')
p = load_plugin('provider', provider_def)
except ProviderTypeError:
msg = 'Invalid provider type'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
# Get provider language (if any)
prv_locale = l10n.get_plugin_locale(provider_def, request.raw_locale)
try:
LOGGER.debug('Fetching id {}'.format(identifier))
content = p.get(identifier, language=prv_locale)
except ProviderConnectionError as err:
LOGGER.error(err)
msg = 'connection error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
except ProviderItemNotFoundError:
msg = 'identifier not found'
return self.get_exception(404, headers, request.format,
'NotFound', msg)
except ProviderQueryError as err:
LOGGER.error(err)
msg = 'query error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
except ProviderGenericError as err:
LOGGER.error(err)
msg = 'generic error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
if content is None:
msg = 'identifier not found'
return self.get_exception(400, headers, request.format,
'NotFound', msg)
content['links'] = [{
'rel': 'self' if request.format in (None, 'json') else 'alternate',
'type': 'application/geo+json',
'title': 'This document as GeoJSON',
'href': '{}/collections/{}/items/{}?f=json'.format(
self.config['server']['url'], dataset, identifier)
}, {
'rel': 'self' if request.format == 'jsonld' else 'alternate',
'type': 'application/ld+json',
'title': 'This document as RDF (JSON-LD)',
'href': '{}/collections/{}/items/{}?f=jsonld'.format(
self.config['server']['url'], dataset, identifier)
}, {
'rel': 'self' if request.format == 'html' else 'alternate',
'type': 'text/html',
'title': 'This document as HTML',
'href': '{}/collections/{}/items/{}?f=html'.format(
self.config['server']['url'], dataset, identifier)
}, {
'rel': 'collection',
'type': 'application/json',
'title': l10n.translate(collections[dataset]['title'],
request.locale),
'href': '{}/collections/{}'.format(
self.config['server']['url'], dataset)
}, {
'rel': 'prev',
'type': 'application/geo+json',
'href': '{}/collections/{}/items/{}'.format(
self.config['server']['url'], dataset, identifier)
}, {
'rel': 'next',
'type': 'application/geo+json',
'href': '{}/collections/{}/items/{}'.format(
self.config['server']['url'], dataset, identifier)
}]
if request.format == 'html': # render
l10n.set_response_language(headers, prv_locale)
content['title'] = l10n.translate(collections[dataset]['title'],
request.locale)
content['id_field'] = p.id_field
if p.title_field is not None:
content['title_field'] = p.title_field
content = render_j2_template(self.config,
'collections/items/item.html',
content, request.locale)
return headers, 200, content
elif request.format == 'jsonld':
l10n.set_response_language(headers, prv_locale, True)
content = geojson2geojsonld(
self.config, content, dataset, identifier=identifier
)
return headers, 200, content
l10n.set_response_language(headers, prv_locale, True)
return headers, 200, to_json(content, self.pretty_print)
@pre_process
@jsonldify
def get_collection_coverage(self, request: Union[APIRequest, Any], dataset): # noqa
"""
Returns a subset of a collection coverage
:param request: A request object
:param dataset: dataset name
:returns: tuple of headers, status code, content
"""
query_args = {}
format_ = 'json'
headers = request.get_response_headers(FORMATS['json'])
LOGGER.debug('Loading provider')
try:
collection_def = get_provider_by_type(
self.config['resources'][dataset]['providers'], 'coverage')
p = load_plugin('provider', collection_def)
except KeyError:
msg = 'collection does not exist'
return self.get_exception(
404, headers, format_, 'InvalidParameterValue', msg)
except ProviderTypeError:
msg = 'invalid provider type'
return self.get_exception(
400, headers, format_, 'NoApplicableCode', msg)
except ProviderConnectionError:
msg = 'connection error (check logs)'
return self.get_exception(
500, headers, format_, 'NoApplicableCode', msg)
LOGGER.debug('Processing bbox parameter')
bbox = request.params.get('bbox')
if bbox is None:
bbox = []
else:
try:
bbox = validate_bbox(bbox)
except ValueError as err:
msg = str(err)
return self.get_exception(
500, headers, format_, 'InvalidParameterValue', msg)
query_args['bbox'] = bbox
LOGGER.debug('Processing datetime parameter')
datetime_ = request.params.get('datetime', None)
try:
datetime_ = validate_datetime(
self.config['resources'][dataset]['extents'], datetime_)
except ValueError as err:
msg = str(err)
return self.get_exception(
400, headers, format_, 'InvalidParameterValue', msg)
query_args['datetime_'] = datetime_
if request.format:
query_args['format_'] = format_ = request.format
range_subset = request.params.get('rangeSubset')
if range_subset:
LOGGER.debug('Processing rangeSubset parameter')
query_args['range_subset'] = [rs for
rs in range_subset.split(',') if rs]
LOGGER.debug('Fields: {}'.format(query_args['range_subset']))
for a in query_args['range_subset']:
if a not in p.fields:
msg = 'Invalid field specified'
return self.get_exception(
400, headers, format_, 'InvalidParameterValue', msg)
if 'subset' in request.params:
subsets = {}
LOGGER.debug('Processing subset parameter')
for s in (request.params['subset'] or '').split(','):
try:
if '"' not in s:
m = re.search(r'(.*)\((.*):(.*)\)', s)
else:
m = re.search(r'(.*)\(\"(\S+)\":\"(\S+.*)\"\)', s)
subset_name = m.group(1)
if subset_name not in p.axes:
msg = 'Invalid axis name'
return self.get_exception(
400, headers, format_,
'InvalidParameterValue', msg)
subsets[subset_name] = list(map(
get_typed_value, m.group(2, 3)))
except AttributeError:
msg = 'subset should be like "axis(min:max)"'
return self.get_exception(
400, headers, format_, 'InvalidParameterValue', msg)
query_args['subsets'] = subsets
LOGGER.debug('Subsets: {}'.format(query_args['subsets']))
LOGGER.debug('Querying coverage')
try:
data = p.query(**query_args)
except ProviderInvalidQueryError as err:
msg = 'query error: {}'.format(err)
return self.get_exception(
400, headers, format_, 'InvalidParameterValue', msg)
except ProviderNoDataError:
msg = 'No data found'
return self.get_exception(
204, headers, format_, 'InvalidParameterValue', msg)
except ProviderQueryError:
msg = 'query error (check logs)'
return self.get_exception(
500, headers, format_, 'NoApplicableCode', msg)
mt = collection_def['format']['name']
if format_ == mt:
headers['Content-Type'] = collection_def['format']['mimetype']
return headers, 200, data
elif format_ == 'json':
headers['Content-Type'] = 'application/prs.coverage+json'
return headers, 200, to_json(data, self.pretty_print)
else:
return self.get_format_exception(request)
@pre_process
@jsonldify
def get_collection_coverage_domainset(self, request: Union[APIRequest, Any], dataset): # noqa
"""
Returns a collection coverage domainset
:param request: A request object
:param dataset: dataset name
:returns: tuple of headers, status code, content
"""
format_ = request.format or 'json'
headers = request.get_response_headers()
LOGGER.debug('Loading provider')
try:
collection_def = get_provider_by_type(
self.config['resources'][dataset]['providers'], 'coverage')
p = load_plugin('provider', collection_def)
# Get provider language (if any)
prv_locale = l10n.get_plugin_locale(collection_def,
request.raw_locale)
data = p.get_coverage_domainset(language=prv_locale)
except KeyError:
msg = 'collection does not exist'
return self.get_exception(
404, headers, format_, 'InvalidParameterValue', msg)
except ProviderTypeError:
msg = 'invalid provider type'
return self.get_exception(
500, headers, format_, 'NoApplicableCode', msg)
except ProviderConnectionError:
msg = 'connection error (check logs)'
return self.get_exception(
500, headers, format_, 'NoApplicableCode', msg)
if format_ == 'json':
l10n.set_response_language(headers, prv_locale, True)
return headers, 200, to_json(data, self.pretty_print)
elif format_ == 'html':
l10n.set_response_language(headers, prv_locale)
data['id'] = dataset
data['title'] = l10n.translate(
self.config['resources'][dataset]['title'], request.locale)
content = render_j2_template(self.config,
'collections/coverage/domainset.html',
data, request.locale)
return headers, 200, content
else:
return self.get_format_exception(request)
@pre_process
@jsonldify
def get_collection_coverage_rangetype(self, request: Union[APIRequest, Any], dataset): # noqa
"""
Returns a collection coverage rangetype
:param request: A request object
:param dataset: dataset name
:returns: tuple of headers, status code, content
"""
format_ = request.format or 'json'
headers = request.get_response_headers()
LOGGER.debug('Loading provider')
try:
collection_def = get_provider_by_type(
self.config['resources'][dataset]['providers'], 'coverage')
p = load_plugin('provider', collection_def)
# Get provider language (if any)
prv_locale = l10n.get_plugin_locale(collection_def,
request.raw_locale)
data = p.get_coverage_rangetype(language=prv_locale)
except KeyError:
msg = 'collection does not exist'
return self.get_exception(
404, headers, format_, 'InvalidParameterValue', msg)
except ProviderTypeError:
msg = 'invalid provider type'
return self.get_exception(
500, headers, format_, 'NoApplicableCode', msg)
except ProviderConnectionError:
msg = 'connection error (check logs)'
return self.get_exception(
500, headers, format_, 'NoApplicableCode', msg)
if format_ == 'json':
l10n.set_response_language(headers, prv_locale, True)
return headers, 200, to_json(data, self.pretty_print)
elif format_ == 'html':
l10n.set_response_language(headers, prv_locale)
data['id'] = dataset
data['title'] = l10n.translate(
self.config['resources'][dataset]['title'], request.locale)
content = render_j2_template(self.config,
'collections/coverage/rangetype.html',
data, request.locale)
return headers, 200, content
else:
return self.get_format_exception(request)
@pre_process
@jsonldify
def get_collection_tiles(self, request: Union[APIRequest, Any], dataset=None): # noqa
"""
Provide collection tiles
:param request: A request object
:param dataset: name of collection
:returns: tuple of headers, status code, content
"""
if not request.is_valid():
return self.get_format_exception(request)
headers = request.get_response_headers()
if any([dataset is None,
dataset not in self.config['resources'].keys()]):
msg = 'Invalid collection'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
LOGGER.debug('Creating collection tiles')
LOGGER.debug('Loading provider')
try:
t = get_provider_by_type(
self.config['resources'][dataset]['providers'], 'tile')
p = load_plugin('provider', t)
except (KeyError, ProviderTypeError):
msg = 'Invalid collection tiles'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
except ProviderConnectionError:
msg = 'connection error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
except ProviderQueryError:
msg = 'query error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
tiles = {
'title': dataset,
'description': l10n.translate(
self.config['resources'][dataset]['description'],
request.locale),
'links': [],
'tileMatrixSetLinks': []
}
tiles['links'].append({
'type': 'application/json',
'rel': 'self' if request.format == 'json' else 'alternate',
'title': 'This document as JSON',
'href': '{}/collections/{}/tiles?f=json'.format(
self.config['server']['url'], dataset)
})
tiles['links'].append({
'type': 'application/ld+json',
'rel': 'self' if request.format == 'jsonld' else 'alternate',
'title': 'This document as RDF (JSON-LD)',
'href': '{}/collections/{}/tiles?f=jsonld'.format(
self.config['server']['url'], dataset)
})
tiles['links'].append({
'type': 'text/html',
'rel': 'self' if request.format == 'html' else 'alternate',
'title': 'This document as HTML',
'href': '{}/collections/{}/tiles?f=html'.format(
self.config['server']['url'], dataset)
})
for service in p.get_tiles_service(
baseurl=self.config['server']['url'],
servicepath='/collections/{}/tiles/{{{}}}/{{{}}}/{{{}}}/{{{}}}?f=mvt' # noqa
.format(dataset, 'tileMatrixSetId',
'tileMatrix', 'tileRow', 'tileCol'))['links']:
tiles['links'].append(service)
tiles['tileMatrixSetLinks'] = p.get_tiling_schemes()
metadata_format = p.options['metadata_format']
if request.format == 'html': # render
tiles['id'] = dataset
tiles['title'] = l10n.translate(
self.config['resources'][dataset]['title'], request.locale)
tiles['tilesets'] = [
scheme['tileMatrixSet'] for scheme in p.get_tiling_schemes()]
tiles['format'] = metadata_format
tiles['bounds'] = \
self.config['resources'][dataset]['extents']['spatial']['bbox']
tiles['minzoom'] = p.options['zoom']['min']
tiles['maxzoom'] = p.options['zoom']['max']
content = render_j2_template(self.config,
'collections/tiles/index.html', tiles,
request.locale)
return headers, 200, content
return headers, 200, to_json(tiles, self.pretty_print)
@pre_process
@jsonldify
def get_collection_tiles_data(self, request: Union[APIRequest, Any],
dataset=None, matrix_id=None,
z_idx=None, y_idx=None, x_idx=None):
"""
Get collection items tiles
:param request: A request object
:param dataset: dataset name
:param matrix_id: matrix identifier
:param z_idx: z index
:param y_idx: y index
:param x_idx: x index
:returns: tuple of headers, status code, content
"""
format_ = request.format
if not format_:
return self.get_format_exception(request)
headers = request.get_response_headers()
LOGGER.debug('Processing tiles')
collections = filter_dict_by_key_value(self.config['resources'],
'type', 'collection')
if dataset not in collections.keys():
msg = 'Invalid collection'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
LOGGER.debug('Loading tile provider')
try:
t = get_provider_by_type(
self.config['resources'][dataset]['providers'], 'tile')
p = load_plugin('provider', t)
format_ = p.format_type
headers['Content-Type'] = format_
LOGGER.debug('Fetching tileset id {} and tile {}/{}/{}'.format(
matrix_id, z_idx, y_idx, x_idx))
content = p.get_tiles(layer=p.get_layer(), tileset=matrix_id,
z=z_idx, y=y_idx, x=x_idx, format_=format_)
if content is None:
msg = 'identifier not found'
return self.get_exception(
404, headers, format_, 'NotFound', msg)
else:
return headers, 202, content
# @TODO: figure out if the spec requires to return json errors
except KeyError:
msg = 'Invalid collection tiles'
return self.get_exception(
400, headers, format_, 'InvalidParameterValue', msg)
except ProviderConnectionError as err:
LOGGER.error(err)
msg = 'connection error (check logs)'
return self.get_exception(
500, headers, format_, 'NoApplicableCode', msg)
except ProviderTilesetIdNotFoundError:
msg = 'Tileset id not found'
return self.get_exception(
404, headers, format_, 'NotFound', msg)
except ProviderTileQueryError as err:
LOGGER.error(err)
msg = 'Tile not found'
return self.get_exception(
500, headers, format_, 'NoApplicableCode', msg)
except ProviderTileNotFoundError as err:
LOGGER.error(err)
msg = 'tile not found (check logs)'
return self.get_exception(
404, headers, format_, 'NoMatch', msg)
except ProviderGenericError as err:
LOGGER.error(err)
msg = 'generic error (check logs)'
return self.get_exception(
500, headers, format_, 'NoApplicableCode', msg)
@pre_process
@jsonldify
def get_collection_tiles_metadata(self, request: Union[APIRequest, Any],
dataset=None, matrix_id=None):
"""
Get collection items tiles
:param request: A request object
:param dataset: dataset name
:param matrix_id: matrix identifier
:returns: tuple of headers, status code, content
"""
if not request.is_valid():
return self.get_format_exception(request)
headers = request.get_response_headers()
if any([dataset is None,
dataset not in self.config['resources'].keys()]):
msg = 'Invalid collection'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
LOGGER.debug('Creating collection tiles')
LOGGER.debug('Loading provider')
try:
t = get_provider_by_type(
self.config['resources'][dataset]['providers'], 'tile')
p = load_plugin('provider', t)
except KeyError:
msg = 'Invalid collection tiles'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
except ProviderConnectionError:
msg = 'connection error (check logs)'
return self.get_exception(
500, headers, request.format, 'InvalidParameterValue', msg)
except ProviderQueryError:
msg = 'query error (check logs)'
return self.get_exception(
500, headers, request.format, 'InvalidParameterValue', msg)
# Get provider language (if any)
prv_locale = l10n.get_plugin_locale(t, request.raw_locale)
if matrix_id not in p.options['schemes']:
msg = 'tileset not found'
return self.get_exception(404, headers, request.format,
'NotFound', msg)
metadata_format = p.options['metadata_format']
tilejson = True if (metadata_format == 'tilejson') else False
tiles_metadata = p.get_metadata(
dataset=dataset, server_url=self.config['server']['url'],
layer=p.get_layer(), tileset=matrix_id, tilejson=tilejson,
language=prv_locale)
if request.format == 'html': # render
metadata = dict(metadata=tiles_metadata)
metadata['id'] = dataset
metadata['title'] = l10n.translate(
self.config['resources'][dataset]['title'], request.locale)
metadata['tileset'] = matrix_id
metadata['format'] = metadata_format
content = render_j2_template(self.config,
'collections/tiles/metadata.html',
metadata, request.locale)
return headers, 200, content
return headers, 200, to_json(tiles_metadata, self.pretty_print)
@pre_process
@jsonldify
def describe_processes(self, request: Union[APIRequest, Any], process=None): # noqa
"""
Provide processes metadata
:param request: A request object
:param process: process identifier, defaults to None to obtain
information about all processes
:returns: tuple of headers, status code, content
"""
processes = []
if not request.is_valid():
return self.get_format_exception(request)
headers = request.get_response_headers()
processes_config = filter_dict_by_key_value(self.config['resources'],
'type', 'process')
if process is not None:
if process not in processes_config.keys() or not processes_config:
msg = 'Identifier not found'
return self.get_exception(
404, headers, request.format, 'NoSuchProcess', msg)
if processes_config:
if process is not None:
relevant_processes = [(process, processes_config[process])]
else:
relevant_processes = processes_config.items()
for key, value in relevant_processes:
p = load_plugin('process',
processes_config[key]['processor'])
p2 = deepcopy(p.metadata)
p2['jobControlOptions'] = ['sync-execute']
if self.manager.is_async:
p2['jobControlOptions'].append('async-execute')
p2['outputTransmission'] = ['value']
p2['links'] = l10n.translate(p2.get('links', []),
request.locale)
jobs_url = '{}/processes/{}/jobs'.format(
self.config['server']['url'], key)
# TODO translation support
link = {
'type': 'text/html',
'rel': 'collection',
'href': '{}?f=html'.format(jobs_url),
'title': 'jobs for this process as HTML',
'hreflang': self.default_locale
}
p2['links'].append(link)
link = {
'type': 'application/json',
'rel': 'collection',
'href': '{}?f=json'.format(jobs_url),
'title': 'jobs for this process as JSON',
'hreflang': self.default_locale
}
p2['links'].append(link)
processes.append(p2)
if process is not None:
response = processes[0]
else:
response = {
'processes': processes
}
if request.format == 'html': # render
if process is not None:
response = render_j2_template(self.config,
'processes/process.html',
response, request.locale)
else:
response = render_j2_template(self.config,
'processes/index.html', response,
request.locale)
return headers, 200, response
return headers, 200, to_json(response, self.pretty_print)
@pre_process
def get_process_jobs(self, request: Union[APIRequest, Any], process_id, job_id=None): # noqa
"""
Get process jobs
:param request: A request object
:param process_id: id of process
:param job_id: id of job
:returns: tuple of headers, status code, content
"""
if not request.is_valid():
return self.get_format_exception(request)
headers = request.get_response_headers()
processes = filter_dict_by_key_value(
self.config['resources'], 'type', 'process')
if process_id not in processes:
msg = 'identifier not found'
return self.get_exception(
404, headers, request.format, 'NoSuchProcess', msg)
p = load_plugin('process', processes[process_id]['processor'])
if self.manager:
if job_id is None:
jobs = sorted(self.manager.get_jobs(process_id),
key=lambda k: k['job_start_datetime'],
reverse=True)
else:
jobs = [self.manager.get_job(process_id, job_id)]
else:
LOGGER.debug('Process management not configured')
jobs = []
serialized_jobs = []
for job_ in jobs:
job2 = {
'jobID': job_['identifier'],
'status': job_['status'],
'message': job_['message'],
'progress': job_['progress'],
'parameters': job_.get('parameters'),
'job_start_datetime': job_['job_start_datetime'],
'job_end_datetime': job_['job_end_datetime']
}
if JobStatus[job_['status']] in [
JobStatus.successful, JobStatus.running, JobStatus.accepted]:
job_result_url = '{}/processes/{}/jobs/{}/results'.format(
self.config['server']['url'],
process_id, job_['identifier'])
job2['links'] = [{
'href': '{}?f=html'.format(job_result_url),
'rel': 'about',
'type': 'text/html',
'title': 'results of job {} as HTML'.format(job_id)
}, {
'href': '{}?f=json'.format(job_result_url),
'rel': 'about',
'type': 'application/json',
'title': 'results of job {} as JSON'.format(job_id)
}]
if job_['mimetype'] not in ['application/json', 'text/html']:
job2['links'].append({
'href': job_result_url,
'rel': 'about',
'type': job_['mimetype'],
'title': 'results of job {} as {}'.format(
job_id, job_['mimetype'])
})
serialized_jobs.append(job2)
if job_id is None:
j2_template = 'processes/jobs/index.html'
else:
serialized_jobs = serialized_jobs[0]
j2_template = 'processes/jobs/job.html'
if request.format == 'html':
data = {
'process': {
'id': process_id,
'title': l10n.translate(p.metadata['title'],
request.locale)
},
'jobs': serialized_jobs,
'now': datetime.now(timezone.utc).strftime(DATETIME_FORMAT)
}
response = render_j2_template(self.config, j2_template, data,
request.locale)
return headers, 200, response
return headers, 200, to_json(serialized_jobs, self.pretty_print)
@pre_process
def execute_process(self, request: Union[APIRequest, Any], process_id):
"""
Execute process
:param request: A request object
:param process_id: id of process
:returns: tuple of headers, status code, content
"""
if not request.is_valid():
return self.get_format_exception(request)
headers = request.get_response_headers()
processes_config = filter_dict_by_key_value(
self.config['resources'], 'type', 'process'
)
if process_id not in processes_config:
msg = 'identifier not found'
return self.get_exception(
404, headers, request.format, 'NoSuchProcess', msg)
if not self.manager:
msg = 'Process manager is undefined'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
process = load_plugin('process',
processes_config[process_id]['processor'])
if not request.data:
# TODO not all processes require input, e.g. time-dependent or
# random value generators
msg = 'missing request data'
return self.get_exception(
400, headers, request.format, 'MissingParameterValue', msg)
data = request.data
try:
# Parse bytes data, if applicable
data = data.decode()
LOGGER.debug(data)
except (UnicodeDecodeError, AttributeError):
pass
try:
data = json.loads(data)
except (json.decoder.JSONDecodeError, TypeError) as err:
# Input does not appear to be valid JSON
LOGGER.error(err)
msg = 'invalid request data'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
try:
data_dict = {}
for input in data.get('inputs', []):
id = input['id']
value = input['value']
if id not in data_dict:
data_dict[id] = value
elif id in data_dict and isinstance(data_dict[id], list):
data_dict[id].append(value)
else:
data_dict[id] = [data_dict[id], value]
except KeyError:
# Return 4XX client error for missing 'id' or 'value' in an input
msg = 'invalid request data'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
else:
LOGGER.debug(data_dict)
job_id = str(uuid.uuid1())
url = '{}/processes/{}/jobs/{}'.format(
self.config['server']['url'], process_id, job_id)
headers['Location'] = url
outputs = status = None
is_async = data.get('mode', 'auto') == 'async'
if is_async:
LOGGER.debug('Asynchronous request mode detected')
try:
LOGGER.debug('Executing process')
mime_type, outputs, status = self.manager.execute_process(
process, job_id, data_dict, is_async)
except ProcessorExecuteError as err:
LOGGER.error(err)
msg = 'Processing error'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
response = {}
if status == JobStatus.failed:
response = outputs
if data.get('response', 'document') == 'raw':
headers['Content-Type'] = mime_type
if 'json' in mime_type:
response = to_json(outputs)
else:
response = outputs
elif status != JobStatus.failed and not is_async:
response['outputs'] = outputs
if is_async:
http_status = 201
else:
http_status = 200
return headers, http_status, to_json(response, self.pretty_print)
@pre_process
def get_process_job_result(self, request: Union[APIRequest, Any], process_id, job_id): # noqa
"""
Get result of job (instance of a process)
:param request: A request object
:param process_id: name of process
:param job_id: ID of job
:returns: tuple of headers, status code, content
"""
if not request.is_valid():
return self.get_format_exception(request)
headers = request.get_response_headers()
processes_config = filter_dict_by_key_value(self.config['resources'],
'type', 'process')
if process_id not in processes_config:
msg = 'identifier not found'
return self.get_exception(
404, headers, request.format, 'NoSuchProcess', msg)
process = load_plugin('process',
processes_config[process_id]['processor'])
if not process:
msg = 'identifier not found'
return self.get_exception(
404, headers, request.format, 'NoSuchProcess', msg)
job = self.manager.get_job(process_id, job_id)
if not job:
msg = 'job not found'
return self.get_exception(404, headers, request.format,
'NoSuchJob', msg)
status = JobStatus[job['status']]
if status == JobStatus.running:
msg = 'job still running'
return self.get_exception(
404, headers, request.format, 'ResultNotReady', msg)
elif status == JobStatus.accepted:
# NOTE: this case is not mentioned in the specification
msg = 'job accepted but not yet running'
return self.get_exception(
404, headers, request.format, 'ResultNotReady', msg)
elif status == JobStatus.failed:
msg = 'job failed'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
mimetype, job_output = self.manager.get_job_result(process_id, job_id)
if mimetype not in [None, 'application/json']:
headers['Content-Type'] = mimetype
content = job_output
else:
if request.format == 'json':
content = json.dumps(job_output, sort_keys=True, indent=4,
default=json_serial)
else:
data = {
'process': {
'id': process_id, 'title': process.metadata['title']
},
'job': {'id': job_id},
'result': job_output
}
content = render_j2_template(self.config,
'processes/jobs/results/index.html', # noqa
data, request.locale)
return headers, 200, content
def delete_process_job(self, process_id, job_id):
"""
:param process_id: process identifier
:param job_id: job identifier
:returns: tuple of headers, status code, content
"""
success = self.manager.delete_job(process_id, job_id)
if not success:
http_status = 404
response = {
'code': 'NoSuchJob',
'description': 'Job identifier not found'
}
else:
http_status = 200
jobs_url = '{}/processes/{}/jobs'.format(
self.config['server']['url'], process_id)
response = {
'jobID': job_id,
'status': JobStatus.dismissed.value,
'message': 'Job dismissed',
'progress': 100,
'links': [{
'href': jobs_url,
'rel': 'up',
'type': 'application/json',
'title': 'The job list for the current process'
}]
}
LOGGER.info(response)
return {}, http_status, response
@pre_process
def get_collection_edr_query(self, request: Union[APIRequest, Any],
dataset, instance, query_type):
"""
Queries collection EDR
:param request: APIRequest instance with query params
:param dataset: dataset name
:param instance: instance name
:param query_type: EDR query type
:returns: tuple of headers, status code, content
"""
if not request.is_valid(PLUGINS['formatter'].keys()):
return self.get_format_exception(request)
headers = request.get_response_headers()
collections = filter_dict_by_key_value(self.config['resources'],
'type', 'collection')
if dataset not in collections.keys():
msg = 'Invalid collection'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
LOGGER.debug('Processing query parameters')
LOGGER.debug('Processing datetime parameter')
datetime_ = request.params.get('datetime')
try:
datetime_ = validate_datetime(collections[dataset]['extents'],
datetime_)
except ValueError as err:
msg = str(err)
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
LOGGER.debug('Processing parameter-name parameter')
parameternames = request.params.get('parameter-name') or []
if isinstance(parameternames, str):
parameternames = parameternames.split(',')
LOGGER.debug('Processing coords parameter')
wkt = request.params.get('coords', None)
if not wkt:
msg = 'missing coords parameter'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
try:
wkt = shapely_loads(wkt)
except WKTReadingError:
msg = 'invalid coords parameter'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
LOGGER.debug('Processing z parameter')
z = request.params.get('z')
LOGGER.debug('Loading provider')
try:
p = load_plugin('provider', get_provider_by_type(
collections[dataset]['providers'], 'edr'))
except ProviderTypeError:
msg = 'invalid provider type'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
except ProviderConnectionError:
msg = 'connection error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
except ProviderQueryError:
msg = 'query error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
if instance is not None and not p.get_instance(instance):
msg = 'Invalid instance identifier'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
if query_type not in p.get_query_types():
msg = 'Unsupported query type'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
if parameternames and not any((fld['id'] in parameternames)
for fld in p.get_fields()['field']):
msg = 'Invalid parameter-name'
return self.get_exception(
400, headers, request.format, 'InvalidParameterValue', msg)
query_args = dict(
query_type=query_type,
instance=instance,
format_=request.format,
datetime_=datetime_,
select_properties=parameternames,
wkt=wkt,
z=z
)
try:
data = p.query(**query_args)
except ProviderNoDataError:
msg = 'No data found'
return self.get_exception(
204, headers, request.format, 'NoMatch', msg)
except ProviderQueryError:
msg = 'query error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
if request.format == 'html': # render
content = render_j2_template(self.config,
'collections/edr/query.html', data,
request.locale)
else:
content = to_json(data, self.pretty_print)
return headers, 200, content
@pre_process
@jsonldify
def get_stac_root(self, request: Union[APIRequest, Any]):
if not request.is_valid():
return self.get_format_exception(request)
headers = request.get_response_headers()
id_ = 'pygeoapi-stac'
stac_version = '0.6.2'
stac_url = os.path.join(self.config['server']['url'], 'stac')
content = {
'id': id_,
'stac_version': stac_version,
'title': l10n.translate(
self.config['metadata']['identification']['title'],
request.locale),
'description': l10n.translate(
self.config['metadata']['identification']['description'],
request.locale),
'license': l10n.translate(
self.config['metadata']['license']['name'], request.locale),
'providers': [{
'name': l10n.translate(
self.config['metadata']['provider']['name'],
request.locale),
'url': l10n.translate(
self.config['metadata']['provider']['url'], request.locale)
}],
'links': []
}
stac_collections = filter_dict_by_key_value(self.config['resources'],
'type', 'stac-collection')
for key, value in stac_collections.items():
content['links'].append({
'rel': 'collection',
'href': '{}/{}?f=json'.format(stac_url, key),
'type': 'application/json'
})
content['links'].append({
'rel': 'collection',
'href': '{}/{}'.format(stac_url, key),
'type': 'text/html'
})
if request.format == 'html': # render
content = render_j2_template(self.config, 'stac/collection.html',
content, request.locale)
return headers, 200, content
return headers, 200, to_json(content, self.pretty_print)
@pre_process
@jsonldify
def get_stac_path(self, request: Union[APIRequest, Any], path):
if not request.is_valid():
return self.get_format_exception(request)
headers = request.get_response_headers()
dataset = None
LOGGER.debug('Path: {}'.format(path))
dir_tokens = path.split('/')
if dir_tokens:
dataset = dir_tokens[0]
stac_collections = filter_dict_by_key_value(self.config['resources'],
'type', 'stac-collection')
if dataset not in stac_collections:
msg = 'collection not found'
return self.get_exception(404, headers, request.format,
'NotFound', msg)
LOGGER.debug('Loading provider')
try:
p = load_plugin('provider', get_provider_by_type(
stac_collections[dataset]['providers'], 'stac'))
except ProviderConnectionError as err:
LOGGER.error(err)
msg = 'connection error (check logs)'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
id_ = '{}-stac'.format(dataset)
stac_version = '0.6.2'
description = stac_collections[dataset]['description']
content = {
'id': id_,
'stac_version': stac_version,
'description': l10n.translate(description, request.locale),
'extent': stac_collections[dataset]['extents'],
'links': []
}
try:
stac_data = p.get_data_path(
os.path.join(self.config['server']['url'], 'stac'),
path,
path.replace(dataset, '', 1)
)
except ProviderNotFoundError as err:
LOGGER.error(err)
msg = 'resource not found'
return self.get_exception(404, headers, request.format,
'NotFound', msg)
except Exception as err:
LOGGER.error(err)
msg = 'data query error'
return self.get_exception(
500, headers, request.format, 'NoApplicableCode', msg)
if isinstance(stac_data, dict):
content.update(stac_data)
content['links'].extend(
l10n.translate(stac_collections[dataset]['links'],
request.locale))
if request.format == 'html': # render
content['path'] = path
if 'assets' in content: # item view
content = render_j2_template(self.config,
'stac/item.html',
content, request.locale)
else:
content = render_j2_template(self.config,
'stac/catalog.html',
content, request.locale)
return headers, 200, content
return headers, 200, to_json(content, self.pretty_print)
else: # send back file
headers.pop('Content-Type', None)
return headers, 200, stac_data
def get_exception(self, status, headers, format_, code, description):
"""
Exception handler
:param status: HTTP status code
:param headers: dict of HTTP response headers
:param format_: format string
:param code: OGC API exception code
:param description: OGC API exception code
:returns: tuple of headers, status, and message
"""
LOGGER.error(description)
exception = {
'code': code,
'description': description
}
if format_ == 'html':
headers['Content-Type'] = 'text/html'
content = render_j2_template(
self.config, 'exception.html', exception, self.default_locale)
else:
content = to_json(exception, self.pretty_print)
return headers, status, content
def get_format_exception(self, request):
""" Returns a format exception.
:param request: An APIRequest instance.
:returns: tuple of (headers, status, message)
"""
headers = request.get_response_headers()
msg = f'Invalid format: {request.format}'
return self.get_exception(
400, headers, 'json', 'InvalidParameterValue', msg)
def validate_bbox(value=None):
"""
Helper function to validate bbox parameter
:param value: `list` of minx, miny, maxx, maxy
:returns: bbox as `list` of `float` values
"""
if value is None:
LOGGER.debug('bbox is empty')
return []
bbox = value.split(',')
if len(bbox) != 4:
msg = 'bbox should be 4 values (minx,miny,maxx,maxy)'
LOGGER.debug(msg)
raise ValueError(msg)
try:
bbox = [float(c) for c in bbox]
except ValueError as err:
msg = 'bbox values must be numbers'
err.args = (msg,)
LOGGER.debug(msg)
raise
if bbox[0] > bbox[2] or bbox[1] > bbox[3]:
msg = 'min values should be less than max values'
LOGGER.debug(msg)
raise ValueError(msg)
return bbox
def validate_datetime(resource_def, datetime_=None):
"""
Helper function to validate temporal parameter
:param resource_def: `dict` of configuration resource definition
:param datetime_: `str` of datetime parameter
:returns: `str` of datetime input, if valid
"""
# TODO: pass datetime to query as a `datetime` object
# we would need to ensure partial dates work accordingly
# as well as setting '..' values to `None` so that underlying
# providers can just assume a `datetime.datetime` object
#
# NOTE: needs testing when passing partials from API to backend
datetime_invalid = False
if (datetime_ is not None and 'temporal' in resource_def):
dateparse_begin = partial(dateparse, default=datetime.min)
dateparse_end = partial(dateparse, default=datetime.max)
unix_epoch = datetime(1970, 1, 1, 0, 0, 0)
dateparse_ = partial(dateparse, default=unix_epoch)
te = resource_def['temporal']
try:
if te['begin'] is not None and te['begin'].tzinfo is None:
te['begin'] = te['begin'].replace(tzinfo=pytz.UTC)
if te['end'] is not None and te['end'].tzinfo is None:
te['end'] = te['end'].replace(tzinfo=pytz.UTC)
except AttributeError:
msg = 'Configured times should be RFC3339'
LOGGER.error(msg)
raise ValueError(msg)
if '/' in datetime_: # envelope
LOGGER.debug('detected time range')
LOGGER.debug('Validating time windows')
# normalize "" to ".." (actually changes datetime_)
datetime_ = re.sub(r'^/', '../', datetime_)
datetime_ = re.sub(r'/$', '/..', datetime_)
datetime_begin, datetime_end = datetime_.split('/')
if datetime_begin != '..':
datetime_begin = dateparse_begin(datetime_begin)
if datetime_begin.tzinfo is None:
datetime_begin = datetime_begin.replace(
tzinfo=pytz.UTC)
if datetime_end != '..':
datetime_end = dateparse_end(datetime_end)
if datetime_end.tzinfo is None:
datetime_end = datetime_end.replace(tzinfo=pytz.UTC)
datetime_invalid = any([
(te['begin'] is not None and datetime_begin != '..' and
datetime_begin < te['begin']),
(te['end'] is not None and datetime_end != '..' and
datetime_end > te['end'])
])
else: # time instant
LOGGER.debug('detected time instant')
datetime__ = dateparse_(datetime_)
if datetime__ != '..':
if datetime__.tzinfo is None:
datetime__ = datetime__.replace(tzinfo=pytz.UTC)
datetime_invalid = any([
(te['begin'] is not None and datetime__ != '..' and
datetime__ < te['begin']),
(te['end'] is not None and datetime__ != '..' and
datetime__ > te['end'])
])
if datetime_invalid:
msg = 'datetime parameter out of range'
LOGGER.debug(msg)
raise ValueError(msg)
return datetime_
| 38.929699 | 100 | 0.533445 |
79588cb94e7fb4cd6d2ff9484f21fad9e6447f59 | 1,869 | py | Python | tests/test_event.py | TierMobility/dj-stripe | 454a10746197af108ec9f12bbb841b6b28116235 | [
"MIT"
] | null | null | null | tests/test_event.py | TierMobility/dj-stripe | 454a10746197af108ec9f12bbb841b6b28116235 | [
"MIT"
] | null | null | null | tests/test_event.py | TierMobility/dj-stripe | 454a10746197af108ec9f12bbb841b6b28116235 | [
"MIT"
] | null | null | null | """
dj-stripe Event Model Tests.
"""
from copy import deepcopy
from unittest.mock import patch
from django.contrib.auth import get_user_model
from django.test import TestCase
from stripe.error import StripeError
from djstripe import webhooks
from djstripe.models import Event
from . import FAKE_CUSTOMER, FAKE_EVENT_TRANSFER_CREATED
class EventTest(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username="pydanny", email="pydanny@gmail.com"
)
self.customer = FAKE_CUSTOMER.create_for_user(self.user)
patcher = patch.object(webhooks, "call_handlers")
self.addCleanup(patcher.stop)
self.call_handlers = patcher.start()
def test_str(self):
event = self._create_event(FAKE_EVENT_TRANSFER_CREATED)
self.assertEqual(
"<type={type}, id={id}>".format(
type=FAKE_EVENT_TRANSFER_CREATED["type"], id=FAKE_EVENT_TRANSFER_CREATED["id"]
),
str(event),
)
def test_invoke_webhook_handlers_event_with_log_stripe_error(self):
event = self._create_event(FAKE_EVENT_TRANSFER_CREATED)
self.call_handlers.side_effect = StripeError("Boom!")
with self.assertRaises(StripeError):
event.invoke_webhook_handlers()
def test_invoke_webhook_handlers_event_with_raise_stripe_error(self):
event = self._create_event(FAKE_EVENT_TRANSFER_CREATED)
self.call_handlers.side_effect = StripeError("Boom!")
with self.assertRaises(StripeError):
event.invoke_webhook_handlers()
def test_invoke_webhook_handlers_event_when_invalid(self):
event = self._create_event(FAKE_EVENT_TRANSFER_CREATED)
event.valid = False
event.invoke_webhook_handlers()
#
# Helpers
#
@patch("stripe.Event.retrieve")
def _create_event(self, event_data, event_retrieve_mock):
event_data = deepcopy(event_data)
event_retrieve_mock.return_value = event_data
event = Event.sync_from_stripe_data(event_data)
return event
| 28.753846 | 82 | 0.789727 |
79588dad90065d9b77baf4d892f0b899d77258f1 | 490 | py | Python | dds_web/development/cache_temp.py | pericsson/dds_web | 60e4436b94e1999e09a1f2b81520384b204dc8df | [
"BSD-3-Clause"
] | null | null | null | dds_web/development/cache_temp.py | pericsson/dds_web | 60e4436b94e1999e09a1f2b81520384b204dc8df | [
"BSD-3-Clause"
] | null | null | null | dds_web/development/cache_temp.py | pericsson/dds_web | 60e4436b94e1999e09a1f2b81520384b204dc8df | [
"BSD-3-Clause"
] | null | null | null | """ Temparoy methods for cacheing """
import os
import json
from flask import current_app
tmp_ucache_path = os.path.join(current_app.config.get("LOCAL_TEMP_CACHE"), "{}_{}_cache.json")
def store_temp_ucache(tu, tp, usid):
with open(tmp_ucache_path.format(tu, usid), "w") as tchf:
json.dump({"username": tu, "password": tp}, tchf)
def clear_temp_ucache(tu, usid):
if os.path.exists(tmp_ucache_path.format(tu, usid)):
os.remove(tmp_ucache_path.format(tu, usid))
| 27.222222 | 94 | 0.706122 |
79588e13abfb1cc9e7c21ee0bead598f3edef553 | 3,481 | py | Python | bigmler/tsevaluation.py | mmerce/bigmler | e411bb292a3c8db4cac6754b2b744ffe27fdb47a | [
"Apache-2.0"
] | null | null | null | bigmler/tsevaluation.py | mmerce/bigmler | e411bb292a3c8db4cac6754b2b744ffe27fdb47a | [
"Apache-2.0"
] | null | null | null | bigmler/tsevaluation.py | mmerce/bigmler | e411bb292a3c8db4cac6754b2b744ffe27fdb47a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2017-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Evaluation related functions for BigMLer
"""
from __future__ import absolute_import
import os
import json
import numbers
import math
import bigmler.utils as u
import bigmler.resources as r
import bigmler.checkpoint as c
from bigml.util import slugify
def evaluate(time_series_set, datasets, api, args, resume,
session_file=None, path=None, log=None,
fields=None, dataset_fields=None,
objective_field=None):
"""Evaluates a list of time-series with the given dataset
"""
output = args.predictions
evaluation_files = []
evaluations, resume = evaluations_process(
time_series_set, datasets, fields,
dataset_fields, api, args, resume,
session_file=session_file, path=path, log=log,
objective_field=objective_field)
for index in range(0, len(evaluations)):
evaluation = evaluations[index]
evaluation = r.get_evaluation(evaluation, api, args.verbosity,
session_file)
if r.shared_changed(args.shared, evaluation):
evaluation_args = {"shared": args.shared}
evaluation = r.update_evaluation(evaluation, evaluation_args,
args, api=api, path=path,
session_file=session_file)
file_name = output
r.save_evaluation(evaluation, file_name, api)
return resume
def evaluations_process(time_series_set, datasets,
fields, dataset_fields, api, args, resume,
session_file=None, path=None, log=None,
objective_field=None):
"""Evaluates time-series against datasets
"""
existing_evaluations = 0
evaluations = []
number_of_evaluations = len(time_series_set)
if resume:
resume, evaluations = c.checkpoint(c.are_evaluations_created, path,
number_of_evaluations,
debug=args.debug)
if not resume:
existing_evaluations = len(evaluations)
message = u.dated("Found %s evaluations from %s. Resuming.\n" %
(existing_evaluations,
number_of_evaluations))
number_of_evaluations -= existing_evaluations
u.log_message(message, log_file=session_file,
console=args.verbosity)
if not resume:
evaluation_args = r.set_evaluation_args(args, fields,
dataset_fields)
evaluations.extend(r.create_evaluations(
time_series_set, datasets, evaluation_args,
args, api, path=path, session_file=session_file,
log=log, existing_evaluations=existing_evaluations))
return evaluations, resume
| 37.031915 | 75 | 0.62798 |
795892047f7fee8bceeeabd0090f9c5703e68d12 | 297 | py | Python | utils/timeit.py | orrinjelo/AdventOfCode2018 | 7455737bebfb56d7912c8f8760a55ea0a5b240a3 | [
"MIT"
] | 1 | 2020-12-14T21:05:28.000Z | 2020-12-14T21:05:28.000Z | utils/timeit.py | orrinjelo/AdventOfCode2018 | 7455737bebfb56d7912c8f8760a55ea0a5b240a3 | [
"MIT"
] | null | null | null | utils/timeit.py | orrinjelo/AdventOfCode2018 | 7455737bebfb56d7912c8f8760a55ea0a5b240a3 | [
"MIT"
] | null | null | null | import time
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print(f'{method.__name__} {(te-ts)*1000:.3f} msec')
return result
return timed | 22.846154 | 59 | 0.451178 |
795892cacaf38933bbb01032934f7569637e1617 | 7,622 | py | Python | tests/notebooks/imodels_demo.py | jiayouwyhit/imodels | 2f3e3cd55b6ac1e6d0f21420bf9c4c29e4d39b06 | [
"MIT"
] | 1 | 2021-05-22T13:04:36.000Z | 2021-05-22T13:04:36.000Z | tests/notebooks/imodels_demo.py | jiayouwyhit/imodels | 2f3e3cd55b6ac1e6d0f21420bf9c4c29e4d39b06 | [
"MIT"
] | null | null | null | tests/notebooks/imodels_demo.py | jiayouwyhit/imodels | 2f3e3cd55b6ac1e6d0f21420bf9c4c29e4d39b06 | [
"MIT"
] | null | null | null | # ---
# jupyter:
# jupytext:
# formats: ipynb,../tests/notebooks//py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.10.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% pycharm={"is_executing": false}
# %load_ext autoreload
# %autoreload 2
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(13)
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor, plot_tree, DecisionTreeClassifier
from sklearn import metrics
from scipy.io.arff import loadarff
# installable with: `pip install imodels`
from imodels import SLIMRegressor, BayesianRuleListClassifier, RuleFitRegressor, GreedyRuleListClassifier
from imodels import OneRClassifier, BoostedRulesClassifier
from imodels.util.convert import tree_to_code
# change working directory to project root
if os.getcwd().split('/')[-1] != 'imodels':
os.chdir('..')
def get_reg_boston_data():
'''load (regression) data on boston housing prices
'''
X_reg, y_reg = load_boston(return_X_y=True)
feature_names = load_boston()['feature_names']
X_train_reg, X_test_reg, y_train_reg, y_test_reg = train_test_split(X_reg, y_reg, test_size=0.75) # split
return X_train_reg, X_test_reg, y_train_reg, y_test_reg, feature_names
def get_diabetes_data():
'''load (classification) data on diabetes
'''
data = loadarff("tests/test_data/diabetes.arff")
data_np = np.array(list(map(lambda x: np.array(list(x)), data[0])))
X = data_np[:, :-1].astype('float32')
y_text = data_np[:, -1].astype('str')
y = (y_text == 'tested_positive').astype(np.int) # labels 0-1
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.75) # split
feature_names = ["#Pregnant","Glucose concentration test","Blood pressure(mmHg)","Triceps skin fold thickness(mm)",
"2-Hour serum insulin (mu U/ml)","Body mass index","Diabetes pedigree function","Age (years)"]
return X_train, X_test, y_train, y_test, feature_names
X_train_reg, X_test_reg, y_train_reg, y_test_reg, feat_names_reg = get_reg_boston_data()
X_train, X_test, y_train, y_test, feat_names = get_diabetes_data()
def viz_classification_preds(probs, y_test):
'''look at prediction breakdown
'''
plt.subplot(121)
plt.hist(probs[:, 1][y_test==0], label='Class 0')
plt.hist(probs[:, 1][y_test==1], label='Class 1', alpha=0.8)
plt.ylabel('Count')
plt.xlabel('Predicted probability of class 1')
plt.legend()
plt.subplot(122)
preds = np.argmax(probs, axis=1)
plt.title('ROC curve')
fpr, tpr, thresholds = metrics.roc_curve(y_test, preds)
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.plot(fpr, tpr)
plt.tight_layout()
plt.show()
# load some data
print('regression data', X_train_reg.shape, 'classification data', X_train.shape)
# %% [markdown]
# # rule sets
# Rule sets are models that create a set of (potentially overlapping) rules.
# %% [markdown]
# ### rulefit
# %% pycharm={"is_executing": false}
# fit a rulefit model
rulefit = RuleFitRegressor(max_rules=10)
rulefit.fit(X_train_reg, y_train_reg, feature_names=feat_names_reg)
# get test performance
preds = rulefit.predict(X_test_reg)
print(f'test r2: {metrics.r2_score(y_test_reg, preds):0.2f}')
# inspect and print the rules
rules = rulefit.get_rules()
rules = rules[rules.coef != 0].sort_values("support", ascending=False)
# 'rule' is how the feature is constructed
# 'coef' is its weight in the final linear model
# 'support' is the fraction of points it applies to
rules[['rule', 'coef', 'support']].style.background_gradient(cmap='viridis')
# %% [markdown]
# ## boosted stumps
# %%
# fit boosted stumps
brc = BoostedRulesClassifier(n_estimators=10)
brc.fit(X_train, y_train, feature_names=feat_names)
print(brc)
# look at performance
probs = brc.predict_proba(X_test)
viz_classification_preds(probs, y_test)
# %% [markdown]
# # rule lists
# %% [markdown]
# ### greedy rule lists
# **like a decision tree that only ever splits going left**
# %% pycharm={"is_executing": false}
# fit a greedy rule list
m = GreedyRuleListClassifier()
m.fit(X_train, y=y_train, feature_names=feat_names) # stores into m.rules_
probs = m.predict_proba(X_test)
# print the list
print(m)
# look at prediction breakdown
viz_classification_preds(probs, y_test)
# %% [markdown]
# ### oneR
# **fits a rule list restricted to use only one feature**
# %%
# fit a oneR model
m = OneRClassifier()
m.fit(X_train, y=y_train, feature_names=feat_names) # stores into m.rules_
probs = m.predict_proba(X_test)
# print the rule list
print(m)
# look at prediction breakdown
viz_classification_preds(probs, y_test)
# %% [markdown]
# ### scalable bayesian rule lists
# %%
# train classifier (allow more iterations for better accuracy; use BigDataRuleListClassifier for large datasets)
print('training...')
m = BayesianRuleListClassifier(max_iter=3000, class1label="diabetes", verbose=False)
m.fit(X_train, y_train)
probs = m.predict_proba(X_test)
print("learned model:\n", m)
viz_classification_preds(probs, y_test)
# %% [markdown]
# # rule trees
# %% [markdown]
# ### short decision tree
# %% pycharm={"is_executing": false}
# specify a decision tree with a maximum depth
dt = DecisionTreeClassifier(max_depth=3)
dt.fit(X_train, y_train)
# calculate mse on the training data
probs = dt.predict_proba(X_test)
# print(f'test mse: {np.mean(np.square(preds-y)):0.2f}')
plot_tree(dt)
# plt.savefig('tree.pdf')
plt.show()
viz_classification_preds(probs, y_test)
# %% [markdown]
# ### optimal classification tree
# - docs [here](https://github.com/csinva/interpretability-workshop/tree/master/imodels/optimal_classification_tree)
# - note: this implementation is still somewhat unstable, and can be made faster by installing either `cplex` or `gurobi`
# %%
# sys.path.append('../imodels/optimal_classification_tree/pyoptree')
# sys.path.append('../imodels/optimal_classification_tree/')
# %%
# from optree import OptimalTreeModel
# feature_names = np.array(["x1", "x2"])
# X = np.array([[1, 2, 2, 2, 3], [1, 2, 1, 0, 1]]).T
# y = np.array([1, 1, 0, 0, 0]).reshape(-1, 1)
# X_test = np.array([[1, 1, 2, 2, 2, 3, 3], [1, 2, 2, 1, 0, 1, 0]]).T
# y_test = np.array([1, 1, 1, 0, 0, 0, 0])
# np.random.seed(13)
# model = OptimalTreeModel(tree_depth=3, N_min=1, alpha=0.1) #, solver_name='baron'
# model.fit(X_test, y_test) # this method is currently using the fast, but not optimal solver
# preds = model.predict(X_test)
# # fit on the bigger diabetes dset from above
# # model.fit(Xtrain, ytrain) # this method is currently using the fast, but not optimal solver
# # preds = model.predict(Xtest)
# print('acc', np.mean(preds == y_test))
# %%
# model.print_tree(feature_names)
# %% [markdown]
# # algebraic models
# %% [markdown]
# ### integer linear models
# %% pycharm={"is_executing": false}
np.random.seed(123)
# generate X and y
n, p = 500, 10
X_sim = np.random.randn(n, p)
y_sim = 1 * X_sim[:, 0] + 2 * X_sim[:, 1] - 1 * X_sim[:, 2] + np.random.randn(n)
# fit linear models with different regularization parameters
print('groundtruth weights should be 1, 2, -1...')
model = SLIMRegressor()
for lambda_reg in [0, 1e-2, 5e-2, 1e-1, 1, 2]:
model.fit(X_sim, y_sim, lambda_reg)
mse = np.mean(np.square(y_sim - model.predict(X_sim)))
print(f'lambda: {lambda_reg}\tmse: {mse: 0.2f}\tweights: {model.model.coef_}')
# %%
| 30.246032 | 121 | 0.704802 |
795894b7ee70ebc43cb1d79d09b26c3a8e9c7f87 | 8,660 | py | Python | venv/Lib/site-packages/praw/models/reddit/multi.py | briehanrahan/subdiver | 92373f29dac2342e8a7e716a2966ce394f18fd1b | [
"CC0-1.0"
] | 38 | 2020-03-14T22:22:40.000Z | 2022-02-24T18:05:45.000Z | venv/Lib/site-packages/praw/models/reddit/multi.py | briehanrahan/subdiver | 92373f29dac2342e8a7e716a2966ce394f18fd1b | [
"CC0-1.0"
] | 3 | 2021-03-30T13:15:12.000Z | 2021-09-22T18:55:59.000Z | venv/Lib/site-packages/praw/models/reddit/multi.py | briehanrahan/subdiver | 92373f29dac2342e8a7e716a2966ce394f18fd1b | [
"CC0-1.0"
] | 9 | 2020-02-21T23:55:13.000Z | 2021-03-22T07:48:23.000Z | """Provide the Multireddit class."""
import re
from json import dumps
from typing import Any, Dict, List, Optional, TypeVar, Union
from ...const import API_PATH
from ...util.cache import cachedproperty
from ..listing.mixins import SubredditListingMixin
from .base import RedditBase
from .redditor import Redditor
from .subreddit import Subreddit, SubredditStream
_Multireddit = TypeVar("_Multireddit")
Reddit = TypeVar("Reddit")
class Multireddit(SubredditListingMixin, RedditBase):
r"""A class for users' Multireddits.
This is referred to as a Custom Feed on the Reddit UI.
**Typical Attributes**
This table describes attributes that typically belong to objects of this
class. Since attributes are dynamically provided (see
:ref:`determine-available-attributes-of-an-object`), there is not a
guarantee that these attributes will always be present, nor is this list
necessarily comprehensive.
======================= ===================================================
Attribute Description
======================= ===================================================
``can_edit`` A ``bool`` representing whether or not the
authenticated user may edit the multireddit.
``copied_from`` The multireddit that the multireddit was copied
from, if it exists, otherwise ``None``.
``created_utc`` When the multireddit was created, in `Unix Time`_.
``description_html`` The description of the multireddit, as HTML.
``description_md`` The description of the multireddit, as Markdown.
``display_name`` The display name of the multireddit.
``name`` The name of the multireddit.
``over_18`` A ``bool`` representing whether or not the
multireddit is restricted for users over 18.
``subreddits`` A ``list`` of :class:`.Subreddit`\ s that make up
the multireddit.
``visibility`` The visibility of the multireddit, either
``private``, ``public``, or ``hidden``.
======================= ===================================================
.. _Unix Time: https://en.wikipedia.org/wiki/Unix_time
"""
STR_FIELD = "path"
RE_INVALID = re.compile(r"[\W_]+", re.UNICODE)
@staticmethod
def sluggify(title: str):
"""Return a slug version of the title.
:param title: The title to make a slug of.
Adapted from reddit's utils.py.
"""
title = Multireddit.RE_INVALID.sub("_", title).strip("_").lower()
if len(title) > 21: # truncate to nearest word
title = title[:21]
last_word = title.rfind("_")
if last_word > 0:
title = title[:last_word]
return title or "_"
@cachedproperty
def stream(self) -> SubredditStream:
"""Provide an instance of :class:`.SubredditStream`.
Streams can be used to indefinitely retrieve new comments made to a
multireddit, like:
.. code-block:: python
for comment in reddit.multireddit('spez', 'fun').stream.comments():
print(comment)
Additionally, new submissions can be retrieved via the stream. In the
following example all new submissions to the multireddit are fetched:
.. code-block:: python
for submission in reddit.multireddit('bboe',
'games').stream.submissions():
print(submission)
"""
return SubredditStream(self)
def __init__(self, reddit: Reddit, _data: Dict[str, Any]):
"""Construct an instance of the Multireddit object."""
self.path = None
super().__init__(reddit, _data=_data)
self._author = Redditor(reddit, self.path.split("/", 3)[2])
self._path = API_PATH["multireddit"].format(
multi=self.name, user=self._author
)
self.path = "/" + self._path[:-1] # Prevent requests for path
if "subreddits" in self.__dict__:
self.subreddits = [
Subreddit(reddit, x["name"]) for x in self.subreddits
]
def _fetch_info(self):
return (
"multireddit_api",
{"multi": self.name, "user": self._author.name},
None,
)
def _fetch_data(self):
name, fields, params = self._fetch_info()
path = API_PATH[name].format(**fields)
return self._reddit.request("GET", path, params)
def _fetch(self):
data = self._fetch_data()
data = data["data"]
other = type(self)(self._reddit, _data=data)
self.__dict__.update(other.__dict__)
self._fetched = True
def add(self, subreddit: Subreddit):
"""Add a subreddit to this multireddit.
:param subreddit: The subreddit to add to this multi.
"""
url = API_PATH["multireddit_update"].format(
multi=self.name, user=self._author, subreddit=subreddit
)
self._reddit.request(
"PUT", url, data={"model": dumps({"name": str(subreddit)})}
)
self._reset_attributes("subreddits")
def copy(self, display_name: Optional[str] = None) -> _Multireddit:
"""Copy this multireddit and return the new multireddit.
:param display_name: (optional) The display name for the copied
multireddit. Reddit will generate the ``name`` field from this
display name. When not provided the copy will use the same display
name and name as this multireddit.
"""
if display_name:
name = self.sluggify(display_name)
else:
display_name = self.display_name
name = self.name
data = {
"display_name": display_name,
"from": self.path,
"to": API_PATH["multireddit"].format(
multi=name, user=self._reddit.user.me()
),
}
return self._reddit.post(API_PATH["multireddit_copy"], data=data)
def delete(self):
"""Delete this multireddit."""
path = API_PATH["multireddit_api"].format(
multi=self.name, user=self._author.name
)
self._reddit.request("DELETE", path)
def remove(self, subreddit: Subreddit):
"""Remove a subreddit from this multireddit.
:param subreddit: The subreddit to remove from this multi.
"""
url = API_PATH["multireddit_update"].format(
multi=self.name, user=self._author, subreddit=subreddit
)
self._reddit.request(
"DELETE", url, data={"model": dumps({"name": str(subreddit)})}
)
self._reset_attributes("subreddits")
def update(
self, **updated_settings: Union[str, List[Union[str, Subreddit]]]
):
"""Update this multireddit.
Keyword arguments are passed for settings that should be updated. They
can any of:
:param display_name: The display name for this multireddit. Must be no
longer than 50 characters.
:param subreddits: Subreddits for this multireddit.
:param description_md: Description for this multireddit, formatted in
Markdown.
:param icon_name: Can be one of: ``art and design``, ``ask``,
``books``, ``business``, ``cars``, ``comics``, ``cute animals``,
``diy``, ``entertainment``, ``food and drink``, ``funny``,
``games``, ``grooming``, ``health``, ``life advice``, ``military``,
``models pinup``, ``music``, ``news``, ``philosophy``, ``pictures
and gifs``, ``science``, ``shopping``, ``sports``, ``style``,
``tech``, ``travel``, ``unusual stories``, ``video``, or ``None``.
:param key_color: RGB hex color code of the form ``'#FFFFFF'``.
:param visibility: Can be one of: ``hidden``, ``private``, ``public``.
:param weighting_scheme: Can be one of: ``classic``, ``fresh``.
"""
if "subreddits" in updated_settings:
updated_settings["subreddits"] = [
{"name": str(sub)} for sub in updated_settings["subreddits"]
]
path = API_PATH["multireddit_api"].format(
multi=self.name, user=self._author.name
)
response = self._reddit.request(
"PUT", path, data={"model": dumps(updated_settings)}
)
new = Multireddit(self._reddit, response["data"])
self.__dict__.update(new.__dict__)
| 38.488889 | 79 | 0.576097 |
79589543f0f09038ca80392bc3f1808942dc64cc | 5,695 | py | Python | estimateS3ensitivity_CoLA_Finetuned.py | m-hahn/fairseq | 8508699326640a6a7a83ed4de17ac986e6213bbe | [
"MIT"
] | null | null | null | estimateS3ensitivity_CoLA_Finetuned.py | m-hahn/fairseq | 8508699326640a6a7a83ed4de17ac986e6213bbe | [
"MIT"
] | null | null | null | estimateS3ensitivity_CoLA_Finetuned.py | m-hahn/fairseq | 8508699326640a6a7a83ed4de17ac986e6213bbe | [
"MIT"
] | null | null | null | import math
import sys
import torch
task = sys.argv[1]
assert task == "CoLA"
def mean(values):
return sum(values)/len(values)
sensitivityHistogram = [0 for _ in range(40)]
def variance(values):
values = values.exp()
values = 2*values-1 # make probabilities rescale to [-1, 1]
return float(((values-values.mean(dim=0)).pow(2).mean(dim=0)).sum())
from scipy.optimize import linprog
def getMaxOverPartitions(A, b, x_bounds, perSubsetSensitivities):
#print(perSubsetSensitivities)
c = [-x for x in perSubsetSensitivities]
res = linprog(c, A_ub=A, b_ub=b, bounds=x_bounds)
# find the highly sensitive partition
return -res.fun
from random import shuffle
alternatives_predictions_binary = {}
alternatives_predictions_float = {}
averageLabel = [0,0,0]
with open(f"/u/scr/mhahn/PRETRAINED/GLUE/glue_data/CoLA/dev_datapoints_predictions_fairseq.tsv", "r") as inFile:
itemsPredictions = dict([(x[0], x) for x in [x.split("\t") for x in inFile.read().strip().split("\n")]])
with open(f"/u/scr/mhahn/PRETRAINED/GLUE/glue_data/CoLA/dev_alternatives_c_finetuned_predictions_fairseq.tsv", "r") as inFile:
for line in inFile:
if len(line) < 5:
continue
line = line.strip().split("\t")
if len(line) == 2:
line.append("0.0")
sentence, prediction_log, prediction_discrete = line
alternatives_predictions_float[sentence.strip()] = torch.FloatTensor([float(prediction_log)])
averageLabel[0]+=1
averageLabel[1]+=math.exp(float(prediction_log))
averageLabel[2]+=(math.exp(float(prediction_log)))**2
print(len(alternatives_predictions_float))
print("Average Label", 2*averageLabel[1]/averageLabel[0]-1)
print("Label Variance", 4*(averageLabel[2]/averageLabel[0] - (averageLabel[1]/averageLabel[0])**2))
#quit()
print(list(alternatives_predictions_float.items())[:10])
with open(f"/u/scr/mhahn/PRETRAINED/GLUE/glue_data/CoLA/dev_alternatives_c_finetuned.tsv", "r") as inFile:
alternatives = inFile.read().strip().split("#####\n")
print(len(alternatives))
sensitivities = []
with open(f"/u/scr/mhahn/sensitivity/sensitivities/s3ensitivities_{__file__}", "w") as outFile:
print("Original", "\t", "BinaryS3ensitivity", file=outFile)
for alternative in alternatives:
if len(alternative) < 5:
continue
variants_set = set()
variants_dict = {}
alternative = alternative.split("\n")
original = alternative[0].strip()
print(original)
print(original+"#")
assert original in itemsPredictions
entry = itemsPredictions[original]
predictionForOriginal = float(entry[2])
booleanPredictionForOriginal = int(entry[3])
assert predictionForOriginal <= 0
assert booleanPredictionForOriginal in [0,1]
tokenized = alternative[1].split(" ")
for variant in alternative[3:]:
#print(variant)
if len(variant) < 5:
continue
subset, sentence= variant.strip().split("\t")
sentence = "".join(sentence.strip().split(" "))
sentence = sentence.replace("▁", " ").replace("</s>", "")
sentence = sentence.strip()
if sentence not in alternatives_predictions_float:
print("DID NOT FIND", sentence)
assert False
continue
assert sentence in alternatives_predictions_float, sentence
variants_set.add(sentence)
if subset not in variants_dict:
variants_dict[subset] = []
variants_dict[subset].append(sentence)
# print((result))
print(len(variants_set), "variants")
valuesPerVariant = {}
for variant in variants_set:
# print(variant)
try:
valuesPerVariant[variant] = alternatives_predictions_float[variant]
# valuesPerVariant[variant] = float(alternatives_predictions_float[variant] )
# if len(valuesPerVariant) % 100 == 0:
# print(valuesPerVariant[variant], valuesPerVariant[variant] == True, len(valuesPerVariant), len(variants_set), variant)
except ValueError:
print("VALUE ERROR", variant)
valuesPerVariant[variant] = 0
except AttributeError:
print("VALUE ERROR", variant)
valuesPerVariant[variant] = 0
varianceBySubset = {}
for subset in variants_dict:
values = torch.stack([ valuesPerVariant[x] for x in variants_dict[subset]], dim=0)
varianceBySubset[subset] = float(4*((values.exp().mean() - math.exp(predictionForOriginal)).pow(2)))
assert varianceBySubset[subset] <= 4
# print(varianceBySubset)
subsetsEnumeration = list(variants_dict)
if len(subsetsEnumeration) == 0:
continue
N = len(subsetsEnumeration[0])
A = [[0 for subset in range(len(subsetsEnumeration))] for inp in range(N)]
for inp in range(N):
for subset, bitstr in enumerate(subsetsEnumeration):
assert len(bitstr) == N
if bitstr[inp] == "1":
A[inp][subset] = 1
b = [1 for _ in range(N)]
x_bounds = [(0,1) for _ in range(len(subsetsEnumeration))]
perSubsetSensitivities = [varianceBySubset[x] for x in subsetsEnumeration]
sensitivity = getMaxOverPartitions(A, b, x_bounds, perSubsetSensitivities)
print("OVERALL SENSITIVITY ON THIS DATAPOINT", sensitivity)
sensitivityHistogram[int(2*sensitivity)] += 1
sensitivities.append(sensitivity)
print("Average block sensitivity of the model", sum(sensitivities)/len(sensitivities))
print(original, "\t", sensitivity, file=outFile)
print("Average block sensitivity of the model", sum(sensitivities)/len(sensitivities))
print("Median block sensitivity of the model", sorted(sensitivities)[int(len(sensitivities)/2)])
import torch
sensitivityHistogram = torch.FloatTensor(sensitivityHistogram)
print(sensitivityHistogram/sensitivityHistogram.sum())
| 35.372671 | 128 | 0.699034 |
7958954fd7ce4f688604d0b06b4280cab76f0e48 | 5,354 | py | Python | aiochat/chat/views.py | comeonmike/aiohttp_chat | d1aad0dfcd87ebb29e348814beff1efed9e78faa | [
"MIT"
] | null | null | null | aiochat/chat/views.py | comeonmike/aiohttp_chat | d1aad0dfcd87ebb29e348814beff1efed9e78faa | [
"MIT"
] | null | null | null | aiochat/chat/views.py | comeonmike/aiohttp_chat | d1aad0dfcd87ebb29e348814beff1efed9e78faa | [
"MIT"
] | null | null | null | import re
import aiohttp_jinja2
from textwrap import dedent
from aiohttp import web, WSMsgType
from chat.models import Room, Message
from helpers.decorators import login_required
from helpers.tools import redirect, add_message, get_object_or_404
class CreateRoom(web.View):
""" Create new chat room """
@login_required
@aiohttp_jinja2.template('chat/rooms.html')
async def get(self):
return {}
# return {'chat_rooms': await Room.all_rooms(self.request.app.objects)}
@login_required
async def post(self):
""" Check is roomname unique and create new User """
roomname = await self.is_valid()
if not roomname:
redirect(self.request, 'create_room')
if await self.request.app.objects.count(Room.select().where(Room.name ** roomname)):
add_message(self.request, 'danger', f'Room with {roomname} already exists.')
redirect(self.request, 'create_room')
room = await self.request.app.objects.create(Room, name=roomname)
redirect(self.request, 'room', parts=dict(slug=room.name))
async def is_valid(self):
""" Get roomname from post data, and check is correct """
data = await self.request.post()
roomname = data.get('roomname', '').lower()
if not re.match(r'^[a-z]\w{0,31}$', roomname):
add_message(self.request, 'warning', (
'Room name should be alphanumeric, with length [1 .. 32], startswith letter!'))
return False
return roomname
class ChatRoom(web.View):
""" Get room by slug display messages in this Room """
@login_required
@aiohttp_jinja2.template('chat/chat.html')
async def get(self):
room = await get_object_or_404(self.request, Room, name=self.request.match_info['slug'].lower())
return {
'room': room, 'chat_rooms': await Room.all_rooms(self.request.app.objects),
'room_messages': await room.all_messages(self.request.app.objects)}
class WebSocket(web.View):
""" Process WS connections """
async def get(self):
self.room = await get_object_or_404(self.request, Room, name=self.request.match_info['slug'].lower())
user = self.request.user
app = self.request.app
app.logger.debug('Prepare WS connection')
ws = web.WebSocketResponse()
await ws.prepare(self.request)
if self.room.id not in app.ws_list:
app.ws_list[self.room.id] = {}
message = await app.objects.create(
Message, room=self.room, user=None, text=f'@{user.username} join chat room')
app.ws_list[self.room.id][user.username] = ws
await self.broadcast(message)
async for msg in ws:
if msg.type == WSMsgType.text:
if msg.data == 'close':
await ws.close()
else:
text = msg.data.strip()
if text.startswith('/'):
ans = await self.command(text)
if ans is not None:
await ws.send_json(ans)
else:
message = await app.objects.create(Message, room=self.room, user=user, text=text)
await self.broadcast(message)
elif msg.type == WSMsgType.error:
app.logger.debug(f'Connection closed with exception {ws.exception()}')
await self.disconnect(user.username, ws)
return ws
async def command(self, cmd):
""" Run chat command """
app = self.request.app
app.logger.debug(f'Chat command {cmd}')
if cmd.startswith('/kill'):
# unconnect user from room
try:
target = cmd.split(' ')[1]
peer = app.ws_list[self.room.id][target]
await self.disconnect(target, peer, silent=True)
app.logger.debug(f'User {target} killed')
except KeyError:
pass
elif cmd == '/clear':
# drop all room messages
count = await app.objects.execute(Message.delete().where(Message.room == self.room))
app.logger.debug(f'Removed {count} messages')
for peer in app.ws_list[self.room.id].values():
peer.send_json({'cmd': 'empty'})
elif cmd == '/help':
return {'text': dedent('''\
- /help - display this msg
- /kill {username} - remove user from room
- /clear - empty all messages in room
''')}
else:
return {'text': 'wrong cmd {cmd}'}
async def broadcast(self, message):
""" Send messages to all in this room """
for peer in self.request.app.ws_list[self.room.id].values():
await peer.send_json(message.as_dict())
async def disconnect(self, username, socket, silent=False):
""" Close connection and notify broadcast """
app = self.request.app
app.ws_list.pop(username, None)
if not socket.closed:
await socket.close()
if silent:
return
# left chat
message = await app.objects.create(
Message, room=self.room, user=None, text=f'@{username} left chat room')
await self.broadcast(message)
| 38.517986 | 109 | 0.582182 |
795895fbc430cb698f214748f273bb4709a8ca62 | 2,261 | py | Python | idl2py/star/mphase.py | RapidLzj/idl2py | 193051cd8d01db0d125b8975713b885ad521a992 | [
"MIT"
] | null | null | null | idl2py/star/mphase.py | RapidLzj/idl2py | 193051cd8d01db0d125b8975713b885ad521a992 | [
"MIT"
] | null | null | null | idl2py/star/mphase.py | RapidLzj/idl2py | 193051cd8d01db0d125b8975713b885ad521a992 | [
"MIT"
] | null | null | null | """
By Dr Jie Zheng -Q, NAOC
v1 2019-04-27
"""
import numpy as np
from..util import *
def mphase():
pass
#pro mphase,jd, k
#;+
#; NAME:
#; MPHASE
#; PURPOSE:
#; Return the illuminated fraction of the Moon at given Julian date(s)
#;
#; CALLING SEQUENCE:
#; MPHASE, jd, k
#; INPUT:
#; JD - Julian date, scalar or vector, double precision recommended
#; OUTPUT:
#; k - illuminated fraction of Moon's disk (0.0 < k < 1.0), same number
#; of elements as jd. k = 0 indicates a new moon, while k = 1 for
#; a full moon.
#; EXAMPLE:
#; Plot the illuminated fraction of the moon for every day in July
#; 1996 at 0 TD (~Greenwich noon).
#;
#; IDL> jdcnv, 1996, 7, 1, 0, jd ;Get Julian date of July 1
#; IDL> mphase, jd+dindgen(31), k ;Moon phase for all 31 days
#; IDL> plot, indgen(31),k ;Plot phase vs. July day number
#;
#; METHOD:
#; Algorithm from Chapter 46 of "Astronomical Algorithms" by Jean Meeus
#; (Willmann-Bell, Richmond) 1991. SUNPOS and MOONPOS are used to get
#; positions of the Sun and the Moon (and the Moon distance). The
#; selenocentric elongation of the Earth from the Sun (phase angle)
#; is then computed, and used to determine the illuminated fraction.
#; PROCEDURES CALLED:
#; MOONPOS, SUNPOS
#; REVISION HISTORY:
#; Written W. Landsman Hughes STX June 1996
#; Converted to IDL V5.0 W. Landsman September 1997
#; Use /RADIAN keywords to MOONPOS, SUNPOS internally W. Landsman Aug 2000
#;-
# On_error,2
#
# if N_params() LT 2 then begin
# print,'Syntax - MPHASE, jd, k'
# return
# endif
# diss = 1.49598e8 ;Earth-Sun distance (1 AU)
#
# moonpos, jd, ram, decm, dism, /RADIAN
# sunpos, jd, ras, decs, /RADIAN
#
#; phi - geocentric elongation of the Moon from the Sun
#; inc - selenocentric (Moon centered) elongation of the Earth from the Sun
#
# phi = acos( sin(decs)*sin(decm) + cos(decs)*cos(decm)*cos(ras-ram) )
# inc = atan( diss * sin(phi), dism - diss*cos(phi) )
# k = (1 + cos(inc))/2.
#
# return
# end
| 31.84507 | 83 | 0.581601 |
7958974363b5453c81f2b61994973a951b0cb6c8 | 1,169 | py | Python | dataxHWSp2021/HW2_CoreConcepts/student/tests/q4b2.py | UCBerkeley-SCET/DataX-Berkeley | f912d22c838b511d3ada4ecfa3548afd80437b74 | [
"Apache-2.0"
] | 28 | 2020-06-15T23:53:36.000Z | 2022-03-19T09:27:02.000Z | dataxHWSp2021/HW2_CoreConcepts/student/tests/q4b2.py | UCBerkeley-SCET/DataX-Berkeley | f912d22c838b511d3ada4ecfa3548afd80437b74 | [
"Apache-2.0"
] | 4 | 2020-06-24T22:20:31.000Z | 2022-02-28T01:37:36.000Z | dataxHWSp2021/HW2_CoreConcepts/student/tests/q4b2.py | UCBerkeley-SCET/DataX-Berkeley | f912d22c838b511d3ada4ecfa3548afd80437b74 | [
"Apache-2.0"
] | 78 | 2020-06-19T09:41:01.000Z | 2022-02-05T00:13:29.000Z | test = { 'name': 'q4b2',
'points': 3,
'suites': [ { 'cases': [ { 'code': '>>> print '
"(clf_knn.named_steps['knn'].__class__ "
'if clf_knn.__class__==Pipeline '
'else clf_knn.__class__)\n'
'<class '
"'sklearn.neighbors._classification.KNeighborsClassifier'>\n",
'hidden': False,
'locked': False},
{ 'code': '>>> '
'np.mean(cross_val_score(clf_knn,X_train,y_train,cv=kf)) '
'> 0.96\n'
'True',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| 55.666667 | 109 | 0.264328 |
795897a88af1ebe4cccbcb891d2257c35bf6a051 | 628 | py | Python | tests/test_getObjectRoutes.py | madibag/TikTok-Api | aef7cabdef6afe6e028d9382f7d5b1575354f6fa | [
"MIT"
] | null | null | null | tests/test_getObjectRoutes.py | madibag/TikTok-Api | aef7cabdef6afe6e028d9382f7d5b1575354f6fa | [
"MIT"
] | null | null | null | tests/test_getObjectRoutes.py | madibag/TikTok-Api | aef7cabdef6afe6e028d9382f7d5b1575354f6fa | [
"MIT"
] | null | null | null | from TikTokApi import TikTokApi
def test_tiktok_object():
api = TikTokApi()
assert len(api.getTikTokById("6829267836783971589")) > 0
assert (
len(
api.getTikTokByUrl(
"https://www.tiktok.com/@therock/video/6829267836783971589"
)
)
> 0
)
def test_user_object():
api = TikTokApi()
assert len(api.getUserObject("therock")) > 0
def test_music_object():
api = TikTokApi()
assert len(api.getMusicObject("6820695018429253633")) > 0
def test_hashtag_object():
api = TikTokApi()
assert len(api.getHashtagObject("funny")) > 0
| 20.933333 | 75 | 0.622611 |
795897c339d60d9471cbe25bc4141886a88efbc6 | 4,037 | py | Python | sunpy/tests/helpers.py | LaudateCorpus1/sunpy | f7bdf22e5229a577c5851c1e05502f0d68b4b369 | [
"BSD-2-Clause"
] | 1 | 2016-09-19T18:53:34.000Z | 2016-09-19T18:53:34.000Z | sunpy/tests/helpers.py | wtbarnes/sunpy | f7bdf22e5229a577c5851c1e05502f0d68b4b369 | [
"BSD-2-Clause"
] | null | null | null | sunpy/tests/helpers.py | wtbarnes/sunpy | f7bdf22e5229a577c5851c1e05502f0d68b4b369 | [
"BSD-2-Clause"
] | null | null | null | import sys
import platform
import warnings
from pathlib import Path
from functools import wraps
import matplotlib as mpl
import matplotlib.pyplot as plt
import pkg_resources
import pytest
import astropy
from astropy.wcs.wcs import FITSFixedWarning
import sunpy.map
__all__ = ['skip_windows', 'skip_glymur', 'skip_ana', 'skip_32bit',
'warnings_as_errors', 'asdf_entry_points']
# SunPy's JPEG2000 capabilities rely on the glymur library.
# First we check to make sure that glymur imports correctly before proceeding.
try:
import glymur
except ImportError:
SKIP_GLYMUR = True
else:
# See if we have a C backend
if glymur.lib.openjp2.OPENJP2:
SKIP_GLYMUR = False
else:
SKIP_GLYMUR = True
try:
from sunpy.io import _pyana # NOQA
except ImportError:
SKIP_ANA = True
else:
SKIP_ANA = False
if sys.maxsize > 2**32:
SKIP_32 = False
else:
SKIP_32 = True
skip_windows = pytest.mark.skipif(platform.system() == 'Windows', reason="Windows.")
skip_glymur = pytest.mark.skipif(SKIP_GLYMUR, reason="Glymur can not be imported.")
skip_ana = pytest.mark.skipif(SKIP_ANA, reason="ANA is not available.")
skip_32bit = pytest.mark.skipif(SKIP_32, reason="Fails on a 32 bit system.")
# Skip if the SunPy ASDF entry points are missing.
asdf_entry_points = pytest.mark.skipif(not list(pkg_resources.iter_entry_points('asdf_extensions', 'sunpy')),
reason="No SunPy ASDF entry points.")
@pytest.fixture
def warnings_as_errors(request):
warnings.simplefilter('error')
request.addfinalizer(lambda *args: warnings.resetwarnings())
new_hash_library = {}
def get_hash_library_name():
"""
Generate the hash library name for this env.
"""
import mpl_animators
animators_version = "dev" if "+" in mpl_animators.__version__ else mpl_animators.__version__.replace('.', '')
ft2_version = f"{mpl.ft2font.__freetype_version__.replace('.', '')}"
mpl_version = "dev" if "+" in mpl.__version__ else mpl.__version__.replace('.', '')
astropy_version = "dev" if "dev" in astropy.__version__ else astropy.__version__.replace('.', '')
return f"figure_hashes_mpl_{mpl_version}_ft_{ft2_version}_astropy_{astropy_version}_animators_{animators_version}.json"
def figure_test(test_function):
"""
A decorator for a test that verifies the hash of the current figure or the
returned figure, with the name of the test function as the hash identifier
in the library. A PNG is also created in the 'result_image' directory,
which is created on the current path.
All such decorated tests are marked with `pytest.mark.mpl_image` for convenient filtering.
Examples
--------
@figure_test
def test_simple_plot():
plt.plot([0,1])
"""
hash_library_name = get_hash_library_name()
hash_library_file = Path(__file__).parent / hash_library_name
@pytest.mark.remote_data
@pytest.mark.mpl_image_compare(hash_library=hash_library_file,
savefig_kwargs={'metadata': {'Software': None}},
style='default')
@wraps(test_function)
def test_wrapper(*args, **kwargs):
ret = test_function(*args, **kwargs)
if ret is None:
ret = plt.gcf()
return ret
return test_wrapper
def no_vso(f):
"""
Disable the VSO client from returning results via Fido during this test.
"""
from sunpy.net import Fido
from sunpy.net.vso import VSOClient
@wraps(f)
def wrapper(*args, **kwargs):
Fido.registry[VSOClient] = lambda *args: False
res = f(*args, **kwargs)
Fido.registry[VSOClient] = VSOClient._can_handle_query
return res
return wrapper
def fix_map_wcs(smap):
# Helper function to fix a WCS and silence the warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FITSFixedWarning)
wcs = smap.wcs
wcs.fix()
return sunpy.map.Map(smap.data, wcs)
| 30.353383 | 123 | 0.689373 |
795898af1648ec960b85a7321dc963c3b2517679 | 462 | py | Python | api/anime.py | AbdifatahOsman2/Tofu-Animev2 | 8d1d047497a49a3679026fdbe443dd36dc19c973 | [
"MIT"
] | null | null | null | api/anime.py | AbdifatahOsman2/Tofu-Animev2 | 8d1d047497a49a3679026fdbe443dd36dc19c973 | [
"MIT"
] | null | null | null | api/anime.py | AbdifatahOsman2/Tofu-Animev2 | 8d1d047497a49a3679026fdbe443dd36dc19c973 | [
"MIT"
] | null | null | null | from peewee import *
import datetime
from pyparsing import Char
from db import DATABASE
from user import User
class Anime(Model):
id = AutoField()
name = CharField()
author = CharField()
type = CharField()
rating = IntegerField()
hot_take = CharField()
image = CharField()
user = ForeignKeyField(User, backref='animes')
created_at = DateTimeField(default=datetime.datetime.now)
class Meta:
database = DATABASE | 21 | 61 | 0.686147 |
79589abee39a2b165c7a95f5fbe0549444d9ba14 | 189,170 | py | Python | src/azure-cli/azure/cli/command_modules/acs/custom.py | zackliu/azure-cli | 680f8339ac010a89d4063566fabc5991abc8a4c2 | [
"MIT"
] | 1 | 2021-04-22T09:20:56.000Z | 2021-04-22T09:20:56.000Z | src/azure-cli/azure/cli/command_modules/acs/custom.py | zackliu/azure-cli | 680f8339ac010a89d4063566fabc5991abc8a4c2 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acs/custom.py | zackliu/azure-cli | 680f8339ac010a89d4063566fabc5991abc8a4c2 | [
"MIT"
] | 1 | 2021-04-16T18:14:41.000Z | 2021-04-16T18:14:41.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from distutils.version import StrictVersion
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (ResourceNotFoundError,
ArgumentUsageError,
ClientRequestError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
ValidationError)
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2021_02_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2021_02_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2021_02_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2021_02_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2021_02_01.models import ManagedCluster
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterIdentity
from azure.mgmt.containerservice.v2021_02_01.models import AgentPool
from azure.mgmt.containerservice.v2021_02_01.models import AgentPoolUpgradeSettings
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterSKU
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterWindowsProfile
from azure.mgmt.containerservice.v2021_02_01.models import ManagedClusterIdentityUserAssignedIdentitiesValue
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import NetworkProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterMonitorProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_agent_pools
from ._client_factory import get_msi_client
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type,
_parse_comma_separated_list)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import ADDONS
from ._consts import CONST_CANIPULL_IMAGE
from ._consts import CONST_PRIVATE_DNS_ZONE_SYSTEM
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
import zipfile
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None, base_src_url=None,
kubelogin_version='latest', kubelogin_install_location=None,
kubelogin_base_src_url=None):
k8s_install_kubectl(cmd, client_version, install_location, base_src_url)
k8s_install_kubelogin(cmd, kubelogin_version, kubelogin_install_location, kubelogin_base_src_url)
def k8s_install_kubectl(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
if not source_url:
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
cloud_name = cmd.cli_ctx.cloud.name
if not source_url:
source_url = 'https://github.com/Azure/kubelogin/releases/download'
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"', download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
pattern = '/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)' # pylint: disable=line-too-long
resource_id = resource_id.lower()
match = re.search(pattern, resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise ResourceNotFoundError("Identity {} not found.".format(resource_id))
raise ClientRequestError(ex.message)
return identity.client_id
raise InvalidArgumentValueError("Cannot parse identity name from provided resource id {}.".format(resource_id))
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = smc.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, deployment)
if validate:
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if not existing.get(key):
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_check_acr(cmd, client, resource_group_name, name, acr):
if not which("kubectl"):
raise ValidationError("Can not find kubectl executable in PATH")
_, browse_path = tempfile.mkstemp()
aks_get_credentials(
cmd, client, resource_group_name, name, admin=False, path=browse_path
)
# Get kubectl minor version
kubectl_minor_version = -1
try:
cmd = f"kubectl version -o json --kubeconfig {browse_path}"
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
jsonS, _ = output.communicate()
kubectl_version = json.loads(jsonS)
kubectl_minor_version = int(kubectl_version["clientVersion"]["minor"])
kubectl_server_minor_version = int(kubectl_version["serverVersion"]["minor"])
kubectl_server_patch = int(kubectl_version["serverVersion"]["gitVersion"].split(".")[-1])
if kubectl_server_minor_version < 17 or (kubectl_server_minor_version == 17 and kubectl_server_patch < 14):
logger.warning('There is a known issue for Kubernetes versions < 1.17.14 when connecting to '
'ACR using MSI. See https://github.com/kubernetes/kubernetes/pull/96355 for'
'more information.')
except subprocess.CalledProcessError as err:
raise ValidationError("Could not find kubectl minor version: {}".format(err))
if kubectl_minor_version == -1:
raise ValidationError("Failed to get kubectl version")
podName = "canipull-" + str(uuid.uuid4())
overrides = {
"spec": {
"restartPolicy": "Never",
"hostNetwork": True,
"containers": [
{
"securityContext": {"runAsUser": 0},
"name": podName,
"image": CONST_CANIPULL_IMAGE,
"args": ["-v6", acr],
"stdin": True,
"stdinOnce": True,
"tty": True,
"volumeMounts": [
{"name": "azurejson", "mountPath": "/etc/kubernetes"},
{"name": "sslcerts", "mountPath": "/etc/ssl/certs"},
],
}
],
"tolerations": [
{"key": "CriticalAddonsOnly", "operator": "Exists"},
{"effect": "NoExecute", "operator": "Exists"},
],
"volumes": [
{"name": "azurejson", "hostPath": {"path": "/etc/kubernetes"}},
{"name": "sslcerts", "hostPath": {"path": "/etc/ssl/certs"}},
],
}
}
try:
cmd = [
"kubectl",
"run",
"--kubeconfig",
browse_path,
"--rm",
"--quiet",
"--image",
CONST_CANIPULL_IMAGE,
"--overrides",
json.dumps(overrides),
"-it",
podName,
]
# Support kubectl versons < 1.18
if kubectl_minor_version < 18:
cmd += ["--generator=run-pod/v1"]
output = subprocess.check_output(
cmd,
universal_newlines=True,
)
except subprocess.CalledProcessError as err:
raise CLIError("Failed to check the ACR: {}".format(err))
if output:
print(output)
else:
raise CLIError("Failed to check the ACR.")
# pylint: disable=too-many-statements,too-many-branches
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
cmd.cli_ctx.cloud.endpoints.portal + # Azure Portal URL (https://portal.azure.com for public cloud)
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning('To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_MONITORING_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def _add_ingress_appgw_addon_role_assignment(result, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id'))
):
service_principal_msi_id = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config
from msrestazure.tools import parse_resource_id, resource_id
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config:
appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
parsed_appgw_id = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"],
resource_group=parsed_appgw_id["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_ID in config:
subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID]
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_msi_id, is_service_principal, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_CIDR in config:
if result.agent_pool_profiles[0].vnet_subnet_id is not None:
parsed_subnet_vnet_id = parse_resource_id(result.agent_pool_profiles[0].vnet_subnet_id)
vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"],
resource_group=parsed_subnet_vnet_id["resource_group"],
namespace="Microsoft.Network",
type="virtualNetworks",
name=parsed_subnet_vnet_id["name"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual network: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
def _add_virtual_node_role_assignment(cmd, result, vnet_subnet_id):
# Remove trailing "/subnets/<SUBNET_NAME>" to get the vnet id
vnet_id = vnet_subnet_id.rpartition('/')[0]
vnet_id = vnet_id.rpartition('/')[0]
service_principal_msi_id = None
is_service_principal = False
os_type = 'Linux'
addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(addon_name in result.addon_profiles) and
(hasattr(result.addon_profiles[addon_name], 'identity')) and
(hasattr(result.addon_profiles[addon_name].identity, 'object_id'))
):
logger.info('virtual node MSI exists, using it')
service_principal_msi_id = result.addon_profiles[addon_name].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual node addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
private_dns_zone=None,
fqdn_subdomain=None,
enable_managed_identity=True,
assign_identity=None,
attach_acr=None,
enable_aad=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False,
yes=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if dns_name_prefix and fqdn_subdomain:
raise MutuallyExclusiveArgumentError('--dns-name-prefix and --fqdn-subdomain cannot be used at same time')
if not dns_name_prefix and not fqdn_subdomain:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type,
mode="System"
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool_profile.os_disk_type = node_osdisk_type
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username or windows_admin_password:
# To avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None:
try:
from knack.prompting import prompt
windows_admin_username = prompt('windows_admin_username: ')
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise CLIError('Please specify username for Windows in non-interactive mode.')
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
# If customer explicitly provide a service principal, disable managed identity.
if service_principal and client_secret:
enable_managed_identity = False
# Skip create service principal profile for the cluster if the cluster
# enables managed identity and customer doesn't explicitly provide a service principal.
service_principal_profile = None
principal_obj = None
if not(enable_managed_identity and not service_principal and not client_secret):
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
fqdn_subdomain=fqdn_subdomain, location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
need_post_creation_vnet_permission_granting = False
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. Two cases:
# 1. For system assigned identity, we just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
# 2. For user assigned identity, we can grant needed permission to
# user provided user assigned identity before creating managed cluster.
if service_principal_profile is None and not assign_identity:
msg = ('It is highly recommended to use USER assigned identity '
'(option --assign-identity) when you want to bring your own'
'subnet, which will have no latency for the role assignment to '
'take effect. When using SYSTEM assigned identity, '
'azure-cli will grant Network Contributor role to the '
'system assigned identity after the cluster is created, and '
'the role assignment will take some time to take effect, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity, '
'proceed to create cluster with system assigned identity?')
if not yes and not prompt_y_n(msg, default="n"):
return None
need_post_creation_vnet_permission_granting = True
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(cmd.cli_ctx, assign_identity)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
identity_client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
# Attach acr operation will be handled after the cluster is created
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
outbound_type = _set_outbound_type(outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip,
docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
aci_subnet_name,
vnet_subnet_id,
appgw_name,
appgw_subnet_cidr,
appgw_id,
appgw_subnet_id,
appgw_watch_namespace,
enable_sgxquotehelper
)
monitoring = False
if CONST_MONITORING_ADDON_NAME in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles[CONST_MONITORING_ADDON_NAME])
# addon is in the list and is enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in addon_profiles:
enable_virtual_node = True
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
aad_profile = ManagedClusterAADProfile(
managed=True,
admin_group_object_ids=_parse_comma_separated_list(aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if enable_private_cluster and load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = _populate_api_server_access_profile(
api_server_authorized_ip_ranges,
enable_private_cluster=enable_private_cluster
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
identity = None
if not enable_managed_identity and assign_identity:
raise ArgumentUsageError('--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
assign_identity: ManagedClusterIdentityUserAssignedIdentitiesValue()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
api_server_access_profile=api_server_access_profile,
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id
)
use_custom_private_dns_zone = False
if private_dns_zone:
if not enable_private_cluster:
raise InvalidArgumentValueError("Invalid private dns zone for public cluster. "
"It should always be empty for public cluster")
mc.api_server_access_profile.private_dns_zone = private_dns_zone
from msrestazure.tools import is_valid_resource_id
if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM:
if is_valid_resource_id(private_dns_zone):
use_custom_private_dns_zone = True
else:
raise InvalidArgumentValueError(private_dns_zone + " is not a valid Azure resource ID.")
if fqdn_subdomain:
if not use_custom_private_dns_zone:
raise ArgumentUsageError("--fqdn-subdomain should only be used for "
"private cluster with custom private dns zone")
mc.fqdn_subdomain = fqdn_subdomain
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen
# when the cluster enables managed identity. In this case, the header is useless
# and that's OK to not add this header
custom_headers = None
if principal_obj:
custom_headers = {'Ocp-Aad-Session-Key': principal_obj.get("aad_session_key")}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
need_pull_for_result = (monitoring or
(enable_managed_identity and attach_acr) or
ingress_appgw_addon_enabled or
enable_virtual_node or
need_post_creation_vnet_permission_granting)
if need_pull_for_result:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc))
else:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=custom_headers)
if monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if enable_managed_identity and attach_acr:
if result.identity_profile is None or result.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach acr to it, '
'you can manually grant permission to the identity named <ClUSTER_NAME>-agentpool '
'in MC_ resource group to give it permission to pull from ACR.')
else:
kubelet_identity_client_id = result.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
_add_virtual_node_role_assignment(cmd, result, vnet_subnet_id)
if need_post_creation_vnet_permission_granting:
if not _create_role_assignment(cmd.cli_ctx, 'Network Contributor',
result.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name,
appgw_name=appgw_name,
appgw_subnet_cidr=appgw_subnet_cidr,
appgw_id=appgw_id,
appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper,
no_wait=no_wait)
enable_monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
virtual_node_addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
enable_virtual_node = (virtual_node_addon_name in instance.addon_profiles and
instance.addon_profiles[virtual_node_addon_name].enabled)
need_pull_for_result = enable_monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_pull_for_result:
if enable_monitoring:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME])
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
if enable_monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
_add_virtual_node_role_assignment(cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None,
uptime_sla=False,
no_uptime_sla=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (aad_tenant_id is None and aad_admin_group_object_ids is None)
# pylint: disable=too-many-boolean-expressions
if (update_autoscaler != 1 and cluster_autoscaler_profile is None and
not update_lb_profile and
not attach_acr and
not detach_acr and
not uptime_sla and
not no_uptime_sla and
api_server_authorized_ip_ranges is None and
not enable_aad and
not update_aad_profile and
not enable_ahub and
not disable_ahub):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--attach-acr" or "--detach-acr" or'
'"--uptime-sla" or'
'"--no-uptime-sla" or '
'"--api-server-authorized-ip-ranges" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub"')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear autoscaler profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if _is_msi_cluster(instance):
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
if uptime_sla and no_uptime_sla:
raise CLIError('Cannot specify "--uptime-sla" and "--no-uptime-sla" at the same time.')
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if no_uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Free"
)
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance=instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError('Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
instance.aad_profile.admin_group_object_ids = _parse_comma_separated_list(aad_admin_group_object_ids)
if enable_ahub and disable_ahub:
raise CLIError('Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
# pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements
def aks_upgrade(cmd,
client,
resource_group_name, name,
kubernetes_version='',
control_plane_only=False,
node_image_only=False,
no_wait=False,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(True, agent_pool_client,
resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(no_wait, client.upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise ValidationError('The confcom addon is already enabled for this managed cluster.',
recommendation='To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(enabled=False)
else:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None,
aci_subnet_name=None,
vnet_subnet_id=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('azure-policy')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError('"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2",
"brazilsouth": "CQ",
"brazilsoutheast": "BRSE",
"norwayeast": "NOE",
"southafricanorth": "JNB",
"northcentralus": "NCUS",
"uaenorth": "DXB",
"germanywestcentral": "DEWC",
"ukwest": "WUK",
"switzerlandnorth": "CHN",
"switzerlandwest": "CHW",
"uaecentral": "AUH"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "brazilsouth",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "northcentralus",
"northeurope": "northeurope",
"southafricanorth": "southafricanorth",
"southafricawest": "southafricanorth",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "ukwest",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2",
"norwayeast": "norwayeast",
"norwaywest": "norwayeast",
"switzerlandnorth": "switzerlandnorth",
"switzerlandwest": "switzerlandwest",
"uaenorth": "uaenorth",
"germanywestcentral": "germanywestcentral",
"germanynorth": "germanywestcentral",
"uaecentral": "uaecentral",
"eastus2euap": "eastus2euap",
"brazilsoutheast": "brazilsoutheast"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV",
"usgovarizona": "PHX"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia",
"usgovtexas": "usgovvirginia",
"usgovarizona": "usgovarizona"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
workspace_region = rg_location
workspace_region_code = rg_location.upper()
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
for key in list(addon.config):
if (key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and
key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID):
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID]
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
tags=None,
labels=None,
max_surge=None,
mode="User",
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
scale_set_priority=priority,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
upgrade_settings=upgradeSettings,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
nodepool_name,
kubernetes_version='',
node_image_only=False,
max_surge=None,
no_wait=False):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
max_surge=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
else:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, fqdn_subdomain, location)
service_principal, aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satisfy AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
logger.warning('Support for the creation of ARO 3.11 clusters ends 30 Nov 2020. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or
managed_cluster.identity.type.casefold() == "userassigned"))
| 46.026764 | 222 | 0.649448 |
79589b94c3e9a4a42740f98c0f04552e9b6f26c4 | 1,300 | py | Python | align_data/blogs/__init__.py | igor0/alignment-research-dataset | 4a0291a31dd57ce3f7c5fa322980e140be4eccdc | [
"MIT"
] | null | null | null | align_data/blogs/__init__.py | igor0/alignment-research-dataset | 4a0291a31dd57ce3f7c5fa322980e140be4eccdc | [
"MIT"
] | null | null | null | align_data/blogs/__init__.py | igor0/alignment-research-dataset | 4a0291a31dd57ce3f7c5fa322980e140be4eccdc | [
"MIT"
] | null | null | null | from pprint import pprint
from .wp_blog import WordpressBlog
from .medium_blog import MediumBlog
BLOG_REGISTRY = [
WordpressBlog("https://aiimpacts.org"),
WordpressBlog("https://aipulse.org"),
WordpressBlog("https://aisafety.camp"),
WordpressBlog("https://casparoesterheld.com"),
WordpressBlog("https://futureoflife.org"),
WordpressBlog("https://intelligence.org"),
WordpressBlog("https://jsteinhardt.wordpress.com"),
WordpressBlog("https://longtermrisk.org/blog"),
WordpressBlog("https://qualiacomputing.com", ["^by [^\n].*\n"]),
WordpressBlog("https://slatestarcodex.com"),
WordpressBlog("https://unstableontology.com"),
WordpressBlog("https://vkrakovna.wordpress.com"),
WordpressBlog("https://www.econlib.org"),
WordpressBlog("https://www.ibm.com/blogs/policy"),
WordpressBlog("https://www.microsoft.com/en-us/research/blog"),
WordpressBlog("https://www.yudkowsky.net", ["^\s*Download as PDF\n"]),
MediumBlog("https://ai-alignment.com", "2012-01-01"),
MediumBlog("https://towardsdatascience.com", "2010-11-21"),
MediumBlog("https://deepmindsafetyresearch.medium.com/", "2018-09-27"),
MediumBlog("https://medium.com/@lucarade", "2018-04-28"),
MediumBlog("https://medium.com/partnership-on-ai", "2020-04-15"),
]
| 43.333333 | 75 | 0.695385 |
79589d56ab6f6445d7ce6cc0cb8c013c585bfc86 | 105 | py | Python | app/auth/__init__.py | merRen22/PyTask | ec4591e244270914b9e85eb6637f7053a3b8e0b4 | [
"MIT"
] | null | null | null | app/auth/__init__.py | merRen22/PyTask | ec4591e244270914b9e85eb6637f7053a3b8e0b4 | [
"MIT"
] | null | null | null | app/auth/__init__.py | merRen22/PyTask | ec4591e244270914b9e85eb6637f7053a3b8e0b4 | [
"MIT"
] | null | null | null | from flask import Blueprint
auth = Blueprint('auth', __name__, url_prefix='/auth')
from . import views
| 17.5 | 54 | 0.742857 |
79589d93b8082fed8af8d66055d0aa3b7ba431b1 | 1,457 | py | Python | asimo/train.py | selonsy/MachineLearning | 4e1be16aeab6a312511206751e9c168963d31839 | [
"MIT"
] | 1 | 2018-12-27T01:31:59.000Z | 2018-12-27T01:31:59.000Z | asimo/train.py | shenjl/MachineLearning | 4e1be16aeab6a312511206751e9c168963d31839 | [
"MIT"
] | 2 | 2019-11-10T10:29:10.000Z | 2019-11-10T10:29:10.000Z | asimo/train.py | selonsy/MachineLearning | 4e1be16aeab6a312511206751e9c168963d31839 | [
"MIT"
] | null | null | null | from train.run_Train_SiamFPN import train
if __name__ == "__main__":
# data_dir = "/home/hfan/Dataset/ILSVRC2015_crops/Data/VID/train"
# train_imdb = "/home/hfan/Desktop/PyTorch-SiamFC/ILSVRC15-curation/imdb_video_train.json"
# val_imdb = "/home/hfan/Desktop/PyTorch-SiamFC/ILSVRC15-curation/imdb_video_val.json"
# Windows ILSVRC15
data_dir = r"D:\workspace\MachineLearning\asimo\ILSVRC_crops\Data\VID\train"
train_imdb = r"D:\workspace\MachineLearning\asimo\train\ILSVRC15-curation\imdb_video_train.json"
val_imdb = r"D:\workspace\MachineLearning\asimo\train\ILSVRC15-curation\imdb_video_val.json"
# Windows OTB
data_dir = r"D:\workspace\MachineLearning\asimo\OTB_train_crops\img"
train_imdb = r"D:\workspace\MachineLearning\asimo\imdb_video_train_otb.json"
val_imdb = r"D:\workspace\MachineLearning\asimo\imdb_video_val_otb.json"
# Linux
# data_dir = r"/home/sjl/vot/SiamFPN/ILSVRC_crops/Data/VID/train"
# train_imdb = r"/home/sjl/vot/SiamFPN/train/ILSVRC15-curation/imdb_video_train.json"
# val_imdb = r"/home/sjl/vot/SiamFPN/train/ILSVRC15-curation/imdb_video_val.json"
# Linux OTB
# data_dir = r"/home/sjl/vot/SiamFPN/OTB_train_crops/img"
# train_imdb = r"/home/sjl/vot/SiamFPN/imdb_video_train_otb.json"
# val_imdb = r"/home/sjl/vot/SiamFPN/imdb_video_val_otb.json"
# training SiamFC network, using GPU by default
train(data_dir, train_imdb, val_imdb)
| 47 | 100 | 0.748799 |
79589f10b68d379a9611d6e6d8c0e4a681d596ba | 110 | py | Python | src/drugbank_downloader/__init__.py | cthoyt/drugbank_downloader | 1bfac2b7d900abf03788c9441882a739df313d9e | [
"MIT"
] | 11 | 2020-12-20T18:18:27.000Z | 2022-03-27T13:26:12.000Z | src/drugbank_downloader/__init__.py | cthoyt/drugbank_downloader | 1bfac2b7d900abf03788c9441882a739df313d9e | [
"MIT"
] | 1 | 2021-08-31T01:25:53.000Z | 2021-08-31T08:44:32.000Z | src/drugbank_downloader/__init__.py | cthoyt/drugbank_downloader | 1bfac2b7d900abf03788c9441882a739df313d9e | [
"MIT"
] | 1 | 2021-12-07T08:23:43.000Z | 2021-12-07T08:23:43.000Z | # -*- coding: utf-8 -*-
from .api import download_drugbank, get_drugbank_root, open_drugbank, parse_drugbank
| 27.5 | 84 | 0.763636 |
79589fca4ca277f0994845ffa398866d57e81855 | 1,650 | py | Python | simpleml/utils/errors.py | ptoman/SimpleML | a829ee05da01a75b64982d91a012e9274b6f7c6e | [
"BSD-3-Clause"
] | 15 | 2018-08-19T19:36:23.000Z | 2021-11-09T17:47:18.000Z | simpleml/utils/errors.py | ptoman/SimpleML | a829ee05da01a75b64982d91a012e9274b6f7c6e | [
"BSD-3-Clause"
] | 75 | 2020-10-11T17:58:59.000Z | 2022-03-29T22:34:54.000Z | simpleml/utils/errors.py | ptoman/SimpleML | a829ee05da01a75b64982d91a012e9274b6f7c6e | [
"BSD-3-Clause"
] | 4 | 2018-04-30T23:09:42.000Z | 2022-01-19T08:03:18.000Z | '''
Error classes
'''
__author__ = 'Elisha Yadgaran'
class SimpleMLError(Exception):
def __str__(self):
if hasattr(self, 'message'):
return self.message
return self.args[0]
class DatasetError(SimpleMLError):
def __init__(self, *args, **kwargs):
super(DatasetError, self).__init__(*args, **kwargs)
custom_prefix = 'SimpleML Dataset Error: '
self.message = custom_prefix + self.args[0]
class PipelineError(SimpleMLError):
def __init__(self, *args, **kwargs):
super(PipelineError, self).__init__(*args, **kwargs)
custom_prefix = 'SimpleML Pipeline Error: '
self.message = custom_prefix + self.args[0]
class ModelError(SimpleMLError):
def __init__(self, *args, **kwargs):
super(ModelError, self).__init__(*args, **kwargs)
custom_prefix = 'SimpleML Model Error: '
self.message = custom_prefix + self.args[0]
class MetricError(SimpleMLError):
def __init__(self, *args, **kwargs):
super(MetricError, self).__init__(*args, **kwargs)
custom_prefix = 'SimpleML Metric Error: '
self.message = custom_prefix + self.args[0]
class TrainingError(SimpleMLError):
def __init__(self, *args, **kwargs):
super(TrainingError, self).__init__(*args, **kwargs)
custom_prefix = 'SimpleML Training Error: '
self.message = custom_prefix + self.args[0]
class ScoringError(SimpleMLError):
def __init__(self, *args, **kwargs):
super(ScoringError, self).__init__(*args, **kwargs)
custom_prefix = 'SimpleML Scoring Error: '
self.message = custom_prefix + self.args[0]
| 30.555556 | 60 | 0.658182 |
7958a22dc5651b53160092dd56aaa17d3ce790cb | 4,323 | py | Python | envs/GTAV/models/densenet.py | Sindy98/spc2 | dbc1e7b937d80a90d16e69445e14ebcffc05fc68 | [
"BSD-3-Clause"
] | 14 | 2021-01-11T19:22:31.000Z | 2022-01-05T14:07:24.000Z | inplace_abn/models/densenet.py | calincru/inplace_abn | 5c82984ddbe2ef170f978c6273e7f3fbceb8a150 | [
"BSD-3-Clause"
] | 7 | 2021-04-20T08:55:28.000Z | 2021-09-25T07:26:31.000Z | inplace_abn/models/densenet.py | calincru/inplace_abn | 5c82984ddbe2ef170f978c6273e7f3fbceb8a150 | [
"BSD-3-Clause"
] | 4 | 2021-01-19T01:50:04.000Z | 2022-03-03T08:30:02.000Z | import sys
from collections import OrderedDict
from functools import partial
import torch.nn as nn
from ..modules import ABN, GlobalAvgPool2d, DenseModule
from ._util import try_index
class DenseNet(nn.Module):
def __init__(self,
structure,
norm_act=ABN,
input_3x3=False,
growth=32,
theta=0.5,
classes=0,
dilation=1):
"""DenseNet
Parameters
----------
structure : list of int
Number of layers in each of the four dense blocks of the network.
norm_act : callable
Function to create normalization / activation Module.
input_3x3 : bool
If `True` use three `3x3` convolutions in the input module instead of a single `7x7` one.
growth : int
Number of channels in each layer, i.e. the "growth" factor of the DenseNet.
theta : float
Reduction factor for the transition blocks.
classes : int
If not `0` also include global average pooling and a fully-connected layer with `classes` outputs at the end
of the network.
dilation : int or list of int
List of dilation factors, or `1` to ignore dilation. If the dilation factor for a module is greater than `1`
skip the pooling in the transition block right before it.
"""
super(DenseNet, self).__init__()
self.structure = structure
if len(structure) != 4:
raise ValueError("Expected a structure with four values")
# Initial layers
if input_3x3:
layers = [
("conv1", nn.Conv2d(3, growth * 2, 3, stride=2, padding=1, bias=False)),
("bn1", norm_act(growth * 2)),
("conv2", nn.Conv2d(growth * 2, growth * 2, 3, stride=1, padding=1, bias=False)),
("bn2", norm_act(growth * 2)),
("conv3", nn.Conv2d(growth * 2, growth * 2, 3, stride=1, padding=1, bias=False)),
("pool", nn.MaxPool2d(3, stride=2, padding=1))
]
else:
layers = [
("conv1", nn.Conv2d(3, growth * 2, 7, stride=2, padding=3, bias=False)),
("pool", nn.MaxPool2d(3, stride=2, padding=1))
]
self.mod1 = nn.Sequential(OrderedDict(layers))
in_channels = growth * 2
for mod_id in range(4):
d = try_index(dilation, mod_id)
s = 2 if d == 1 and mod_id > 0 else 1
# Create transition module
if mod_id > 0:
out_channels = int(in_channels * theta)
layers = [
("bn", norm_act(in_channels)),
("conv", nn.Conv2d(in_channels, out_channels, 1, bias=False))
]
if s == 2:
layers.append(("pool", nn.AvgPool2d(2, 2)))
self.add_module("tra%d" % (mod_id + 1), nn.Sequential(OrderedDict(layers)))
in_channels = out_channels
# Create dense module
mod = DenseModule(in_channels, growth, structure[mod_id], norm_act=norm_act, dilation=d)
self.add_module("mod%d" % (mod_id + 2), mod)
in_channels = mod.out_channels
# Pooling and predictor
self.bn_out = norm_act(in_channels)
if classes != 0:
self.classifier = nn.Sequential(OrderedDict([
("avg_pool", GlobalAvgPool2d()),
("fc", nn.Linear(in_channels, classes))
]))
def forward(self, x):
x = self.mod1(x)
x = self.mod2(x)
x = self.tra2(x)
x = self.mod3(x)
x = self.tra3(x)
x = self.mod4(x)
x = self.tra4(x)
x = self.mod5(x)
x = self.bn_out(x)
if hasattr(self, "classifier"):
x = self.classifier(x)
return x
_NETS = {
"121": {"structure": [6, 12, 24, 16]},
"169": {"structure": [6, 12, 32, 32]},
"201": {"structure": [6, 12, 48, 32]},
"264": {"structure": [6, 12, 64, 48]},
}
__all__ = []
for name, params in _NETS.items():
net_name = "net_densenet" + name
setattr(sys.modules[__name__], net_name, partial(DenseNet, **params))
__all__.append(net_name)
| 35.727273 | 120 | 0.535508 |
7958a4204655ee9cb168ab3e596cdc678be64b1f | 11,548 | py | Python | lane_finder.py | roshea6/CarND-LaneLines | ec4911a679ecd26fa9dcb954ae08c6a18bf17a09 | [
"MIT"
] | null | null | null | lane_finder.py | roshea6/CarND-LaneLines | ec4911a679ecd26fa9dcb954ae08c6a18bf17a09 | [
"MIT"
] | null | null | null | lane_finder.py | roshea6/CarND-LaneLines | ec4911a679ecd26fa9dcb954ae08c6a18bf17a09 | [
"MIT"
] | null | null | null | import cv2
import math
import numpy as np
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[0, 0, 255], thickness=2, vertices=[]):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
# Used to keep track of the two sets of lane lines
left_lane_lines = []
right_lane_lines = []
left_lane_avg_slope = 0
right_line_avg_slope = 0
left_lane_avg_x = 0
left_lane_avg_y = 0
right_lane_avg_x = 0
right_lane_avg_y = 0
width = img.shape[1]
for line in lines:
# print(line)
# continue
for x1,y1,x2,y2 in line:
# Find the slope of the line so we can throw out lines that arent nearly vertical
slope = (y2-y1)/(x2-x1)
# Check which side of the image the line is on
# TODO: Find a more robust way to do this
if (x1 < width/2) and (abs(slope) > .5):
# cv2.line(img, (x1, y1), (x2, y2), color, thickness)
left_lane_avg_slope += slope
left_lane_avg_x += (x1 + x2)/2
left_lane_avg_y += (y1 + y2)/2
# avg_point = (int((x1+x2)/2), int((y1+y2)/2))
left_lane_lines.append(line)
elif abs(slope) > .5:
# cv2.line(img, (x1, y1), (x2, y2), [255, 0, 0], thickness)
right_line_avg_slope += slope
right_lane_avg_x += (x1 + x2)/2
right_lane_avg_y += (y1 + y2)/2
# avg_point = (int((x1+x2)/2), int((y1+y2)/2))
right_lane_lines.append(line)
if len(left_lane_lines) > 0:
left_lane_avg_slope /= len(left_lane_lines)
left_lane_avg_point = (int(left_lane_avg_x/len(left_lane_lines)), int(left_lane_avg_y/len(left_lane_lines)))
# cv2.circle(img, (left_lane_avg_point[0], left_lane_avg_point[1]), 10, (0, 255, 0), -1)
cv2.line(img, left_lane_avg_point, (int(vertices[0][1][0]*1.05), vertices[0][1][1]), [0,0,255], 5)
cv2.line(img, left_lane_avg_point, (int(vertices[0][0][0]*1.05), vertices[0][0][1]), [0,0,255], 5)
if len(right_lane_lines) > 0:
right_line_avg_slope /= len(right_lane_lines)
right_lane_avg_point = (int(right_lane_avg_x/len(right_lane_lines)), int(right_lane_avg_y/len(right_lane_lines)))
# cv2.circle(img, (right_lane_avg_point[0], right_lane_avg_point[1]), 10, (0, 255, 0), -1)
cv2.line(img, right_lane_avg_point, (int(vertices[0][2][0]*.95), vertices[0][2][1]), [0,0,255], 5)
cv2.line(img, right_lane_avg_point, (int(vertices[0][3][0]*.95), vertices[0][3][1]), [0,0,255], 5)
# First try finding the two farthest apart points on each side and making a line from those
# This can then be extended to the edge of the region of interest by finding where the line interescts that horizantal line
# To make the line more robust to curves it might be good to try to connect line segments with similar slopes to any others that are close to them
# Loop When drawing a line check if there are any other lines close above or below it and connect top of one to bottom of the other and vice versa
# # Grab the initial values for closest and farthest lane points
# first_left_line = left_lane_lines[0]
# # Determines which of the two points is closer to the bottom of the image
# # TODO: Might need to check x as well but y is far more impactful in terms of distance from the camera
# if first_left_line[0][1] > first_left_line[0][3]:
# closest_left_lane_point = (first_left_line[0][0], first_left_line[0][1])
# farthest_left_lane_point = (first_left_line[0][2], first_left_line[0][3])
# else:
# closest_left_lane_point = (first_left_line[0][2], first_left_line[0][3])
# farthest_left_lane_point = (first_left_line[0][0], first_left_line[0][1])
# # Loop through the left lane lines and find the two points that are the farthest apart
# for line in left_lane_lines:
# # Check if the first point in the line is closer or farther than the current best
# if line[0][1] > closest_left_lane_point[1]:
# closest_left_lane_point = (line[0][0], line[0][1])
# elif line[0][1] < farthest_left_lane_point[1]:
# farthest_left_lane_point = (line[0][0], line[0][1])
# # Check if the second point in the line is closer or farther than the current best
# elif line[0][3] > closest_left_lane_point[1]:
# closest_left_lane_point = (line[0][2], line[0][2])
# elif line[0][3] < farthest_left_lane_point[1]:
# farthest_left_lane_point = (line[0][2], line[0][3])
# # Grab the initial values for closest and farthest lane points
# first_right_line = right_lane_lines[0]
# # Determines which of the two points is closer to the bottom of the image
# # TODO: Might need to check x as well but y is far more impactful in terms of distance from the camera
# if first_right_line[0][1] > first_right_line[0][3]:
# closest_right_lane_point = (first_right_line[0][0], first_right_line[0][1])
# farthest_right_lane_point = (first_right_line[0][2], first_right_line[0][3])
# else:
# closest_right_lane_point = (first_right_line[0][2], first_right_line[0][3])
# farthest_right_lane_point = (first_right_line[0][0], first_right_line[0][1])
# # Loop through the right lane lines and find the two points that are the farthest apart
# for line in right_lane_lines:
# # Check if the first point in the line is closer or farther than the current best
# if line[0][1] > closest_right_lane_point[1]:
# closest_right_lane_point = (line[0][0], line[0][1])
# elif line[0][1] < farthest_right_lane_point[1]:
# farthest_right_lane_point = (line[0][0], line[0][1])
# # Check if the second point in the line is closer or farther than the current best
# elif line[0][3] > closest_right_lane_point[1]:
# closest_right_lane_point = (line[0][2], line[0][2])
# elif line[0][3] < farthest_right_lane_point[1]:
# farthest_right_lane_point = (line[0][2], line[0][3])
# cv2.line(img, closest_left_lane_point, farthest_left_lane_point, color, thickness)
# cv2.line(img, closest_right_lane_point, farthest_right_lane_point, [255, 0, 0], 5)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap, vertices):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines, vertices=vertices)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
if __name__ == "__main__":
img = cv2.imread("test_images/solidYellowCurve.jpg")
vid = cv2.VideoCapture("test_videos/challenge.mp4")
while(1):
ret, img = vid.read()
# Get the dimensions of the image
height = img.shape[0]
width = img.shape[1]
cv2.imshow("Original", img)
cv2.waitKey(0)
# Make a copy of the original image so we can use it to find the region of interest later
roi_img = img.copy()
# Make another copy of the image that we will use to display the final lines on
display_img = img.copy()
# Get the grayscale version of the image
gray_img = grayscale(img)
# Apply Guassian blur to smooth the image
blurred_img = gaussian_blur(gray_img, 5)
# Apply the canny edge detector to get all the lines in the image
# Uses the standard low to high threshold ratio of 1:3
edge_img = canny(blurred_img, 50, 150)
# cv2.imshow("Edges", edge_img)
# cv2.waitKey(0)
# Grab the region of interest
vertices = np.array([[[width*.15, height], [width*.45, height*.60], [width*.55, height*.60], [width*.93, height]]], dtype=np.int32)
roi_img = region_of_interest(edge_img, vertices)
cv2.imshow("ROI", roi_img)
# cv2.waitKey(0)
# Use Hough voting to get only the strongest lines in the image
strongest_lines = hough_lines(roi_img, 1, np.pi/180, 20, 20, 15, vertices)
cv2.imshow("Hough lines", strongest_lines)
# cv2.waitKey(0)
# Apply the lines to the original image
hough_img = (weighted_img(strongest_lines, img))
cv2.imshow("Hough image", hough_img)
# cv2.waitKey(0) | 44.245211 | 158 | 0.650762 |
7958a485934130f8ae26d571fc12a72d5ad42e8c | 15,293 | py | Python | cardinal_pythonlib/tools/pdf_to_booklet.py | bopopescu/pythonlib | 9c2187d6092ba133342ca3374eb7c86f9d296c30 | [
"Apache-2.0"
] | null | null | null | cardinal_pythonlib/tools/pdf_to_booklet.py | bopopescu/pythonlib | 9c2187d6092ba133342ca3374eb7c86f9d296c30 | [
"Apache-2.0"
] | null | null | null | cardinal_pythonlib/tools/pdf_to_booklet.py | bopopescu/pythonlib | 9c2187d6092ba133342ca3374eb7c86f9d296c30 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# cardinal_pythonlib/tools/pdf_to_booklet.py
"""
===============================================================================
Original code copyright (C) 2009-2020 Rudolf Cardinal (rudolf@pobox.com).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**Command-line tool to make booklets from PDFs.**
RNC, 18 Nov 2017.
PURPOSE:
Take a PDF created with pdfnup or similar, with A4 sheets and two pages per
sheet, like this:
.. code-block:: none
PDF page 1 +-----+-----+
| | |
| 1 | 2 |
| | |
+-----+-----+
PDF page 2 +-----+-----+
| | |
| 3 | 4 |
| | |
+-----+-----+
PDF page 3 +-----+-----+
| | |
| 5 | 6 |
| | |
+-----+-----+
and create a similar PDF but like this:
.. code-block:: none
PDF page 1 +-----+-----+
| | |
| 6 | 1 |
| | |
+-----+-----+
PDF page 2 +-----+-----+
| | |
| 1 | 2 |
| | |
+-----+-----+
PDF page 3 +-----+-----+
| | |
| 1 | 2 |
| | |
+-----+-----+
so it can be printed double-sided and folded into an A5 booklet.
DEFINITIONS
- page = one side of a piece of paper BUT HERE, IN A BOOK CONTEXT, half that,
i.e. what ends up as a book "page"
- pair = two pages, making up one side of a sheet/leaf
- sheet = one piece of paper (= leaf) (= 4 pages, here)
PRINTING
It's our job here to make pairs from pages, and to create a PDF where each
PDF page is a pair.
It's the printer's job to make sheets from pages. When printing in duplex,
you will need to use SHORT-EDGE BINDING (if you use long-edge binding, the
reverse sides will be inverted).
FURTHER THOUGHT 19 Nov 2017
We can, of course, support LONG-EDGE binding as well; that just requires
an extra step of rotating all the even-numbered pages from the preceding
step. Supported, as below.
"""
import argparse
import logging
import math
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
from typing import List, Tuple
import unittest
from cardinal_pythonlib.logs import BraceStyleAdapter, main_only_quicksetup_rootlogger # noqa
log = BraceStyleAdapter(logging.getLogger(__name__))
CONVERT = "convert"
MUTOOL = "mutool"
PDFJAM = "pdfjam"
# PDFNUP = "pdfnup" # interface to pdfjam, but too restrictive
PDFTK = "pdftk"
HELP_MISSING_IMAGEMAGICK = "Try 'sudo apt install imagemagick'"
HELP_MISSING_MUTOOL = "Try 'sudo apt install mupdf-tools'"
HELP_MISSING_PDFJAM = "Try 'sudo apt install pdfjam'"
HELP_MISSING_PDFTK = "Try 'sudo apt install pdftk'"
LATEX_PAPER_SIZE_A4 = "a4paper"
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
# =============================================================================
# Calculate page sequence
# =============================================================================
def calc_n_sheets(n_pages: int) -> int:
"""
How many sheets does this number of pages need, on the basis of 2 pages
per sheet?
"""
# NB PyCharm's type checker seems to think math.ceil() returns a float,
# but it returns an int.
# noinspection PyTypeChecker
return math.ceil(n_pages / 2)
def calc_n_virtual_pages(n_sheets: int) -> int:
"""
Converts #sheets to #pages, but rounding up to a multiple of 4.
"""
if n_sheets % 2 == 0:
return n_sheets * 2
else:
return (n_sheets + 1) * 2
def page_sequence(n_sheets: int, one_based: bool = True) -> List[int]:
"""
Generates the final page sequence from the starting number of sheets.
"""
n_pages = calc_n_virtual_pages(n_sheets)
assert n_pages % 4 == 0
half_n_pages = n_pages // 2
firsthalf = list(range(half_n_pages))
secondhalf = list(reversed(range(half_n_pages, n_pages)))
# Seen from the top of an UNFOLDED booklet (e.g. a stack of paper that's
# come out of your printer), "firsthalf" are on the right (from top to
# bottom: recto facing up, then verso facing down, then recto, then verso)
# and "secondhalf" are on the left (from top to bottom: verso facing up,
# then recto facing down, etc.).
sequence = [] # type: List[int]
top = True
for left, right in zip(secondhalf, firsthalf):
if not top:
left, right = right, left
sequence += [left, right]
top = not top
if one_based:
sequence = [x + 1 for x in sequence]
log.debug("{} sheets => page sequence {!r}", n_sheets, sequence)
return sequence
# =============================================================================
# PDF processor
# =============================================================================
def require(executable: str, explanation: str = "") -> None:
"""
Ensures that the external tool is available.
Asserts upon failure.
"""
assert shutil.which(executable), "Need {!r} on the PATH.{}".format(
executable, "\n" + explanation if explanation else "")
def run(args: List[str],
get_output: bool = False,
encoding: str = sys.getdefaultencoding()) -> Tuple[str, str]:
"""
Run an external command +/- return the results.
Returns a ``(stdout, stderr)`` tuple (both are blank strings if the output
wasn't wanted).
"""
printable = " ".join(shlex.quote(x) for x in args).replace("\n", r"\n")
log.debug("Running external command: {}", printable)
if get_output:
p = subprocess.run(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, check=True)
stdout, stderr = p.stdout.decode(encoding), p.stderr.decode(encoding)
else:
subprocess.check_call(args)
stdout, stderr = "", ""
return stdout, stderr
def get_page_count(filename: str) -> int:
"""
How many pages are in a PDF?
"""
log.debug("Getting page count for {!r}", filename)
require(PDFTK, HELP_MISSING_PDFTK)
stdout, _ = run([PDFTK, filename, "dump_data"], get_output=True)
regex = re.compile(r"^NumberOfPages: (\d+)$", re.MULTILINE)
m = regex.search(stdout)
if m:
return int(m.group(1))
raise ValueError(f"Can't get PDF page count for: {filename!r}")
def make_blank_pdf(filename: str, paper: str = "A4") -> None:
"""
NOT USED.
Makes a blank single-page PDF, using ImageMagick's ``convert``.
"""
# https://unix.stackexchange.com/questions/277892/how-do-i-create-a-blank-pdf-from-the-command-line # noqa
require(CONVERT, HELP_MISSING_IMAGEMAGICK)
run([CONVERT, "xc:none", "-page", paper, filename])
def slice_pdf(input_filename: str, output_filename: str,
slice_horiz: int, slice_vert: int) -> str:
"""
Slice each page of the original, to convert to "one real page per PDF
page". Return the output filename.
"""
if slice_horiz == 1 and slice_vert == 1:
log.debug("No slicing required")
return input_filename # nothing to do
log.info("Slicing each source page mv into {} horizontally x {} vertically",
slice_horiz, slice_vert)
log.debug("... from {!r} to {!r}", input_filename, output_filename)
require(MUTOOL, HELP_MISSING_MUTOOL)
run([
MUTOOL,
"poster",
"-x", str(slice_horiz),
"-y", str(slice_vert),
input_filename,
output_filename
])
return output_filename
def booklet_nup_pdf(input_filename: str, output_filename: str,
latex_paper_size: str = LATEX_PAPER_SIZE_A4) -> str:
"""
Takes a PDF (e.g. A4) and makes a 2x1 booklet (e.g. 2xA5 per A4).
The booklet can be folded like a book and the final pages will be in order.
Returns the output filename.
"""
log.info("Creating booklet")
log.debug("... {!r} -> {!r}", input_filename, output_filename)
require(PDFJAM, HELP_MISSING_PDFJAM)
n_pages = get_page_count(input_filename)
n_sheets = calc_n_sheets(n_pages)
log.debug("{} pages => {} sheets", n_pages, n_sheets)
pagenums = page_sequence(n_sheets, one_based=True)
pagespeclist = [str(p) if p <= n_pages else "{}"
for p in pagenums]
# ... switches empty pages to "{}", which is pdfjam notation for
# an empty page.
pagespec = ",".join(pagespeclist)
pdfjam_tidy = True # clean up after yourself?
args = [
PDFJAM,
"--paper", latex_paper_size,
"--landscape",
"--nup", "2x1",
"--keepinfo", # e.g. author information
"--outfile", output_filename,
"--tidy" if pdfjam_tidy else "--no-tidy",
"--", # "no more options"
input_filename, pagespec
]
run(args)
return output_filename
def rotate_even_pages_180(input_filename: str, output_filename: str) -> str:
"""
Rotates even-numbered pages 180 degrees.
Returns the output filename.
"""
log.info("Rotating even-numbered pages 180 degrees for long-edge "
"duplex printing")
log.debug("... {!r} -> {!r}", input_filename, output_filename)
require(PDFTK, HELP_MISSING_PDFTK)
args = [
PDFTK,
"A=" + input_filename, # give it handle 'A'
# handles are one or more UPPER CASE letters
"shuffle",
"Aoddnorth", # for 'A', keep odd pages as they are
"Aevensouth", # for 'A', rotate even pages 180 degrees
"output", output_filename,
]
run(args)
return output_filename
def convert_to_foldable(input_filename: str,
output_filename: str,
slice_horiz: int,
slice_vert: int,
overwrite: bool = False,
longedge: bool = False,
latex_paper_size: str = LATEX_PAPER_SIZE_A4) -> bool:
"""
Runs a chain of tasks to convert a PDF to a useful booklet PDF.
"""
if not os.path.isfile(input_filename):
log.warning("Input file does not exist or is not a file")
return False
if not overwrite and os.path.isfile(output_filename):
log.error("Output file exists; not authorized to overwrite (use "
"--overwrite if you are sure)")
return False
log.info("Processing {!r}", input_filename)
with tempfile.TemporaryDirectory() as tmpdir:
log.debug("Using temporary directory {!r}", tmpdir)
intermediate_num = 0
def make_intermediate() -> str:
nonlocal intermediate_num
intermediate_num += 1
return os.path.join(tmpdir,
f"intermediate_{intermediate_num}.pdf")
# Run this as a chain, rewriting input_filename at each step:
# Slice, if necessary.
input_filename = slice_pdf(
input_filename=input_filename,
output_filename=make_intermediate(),
slice_horiz=slice_horiz,
slice_vert=slice_vert
)
# Make the final n-up
input_filename = booklet_nup_pdf(
input_filename=input_filename,
output_filename=make_intermediate(),
latex_paper_size=latex_paper_size
)
# Rotate?
if longedge:
input_filename = rotate_even_pages_180(
input_filename=input_filename,
output_filename=make_intermediate(),
)
# Done.
log.info("Writing to {!r}", output_filename)
shutil.move(input_filename, output_filename)
return True
# =============================================================================
# Unit testing
# =============================================================================
class TestPdfToBooklet(unittest.TestCase):
"""
Unit tests.
"""
def test_sequence(self) -> None:
for n_sheets in range(1, 8 + 1):
log.info("{!r}", page_sequence(n_sheets=n_sheets, one_based=True))
# =============================================================================
# main
# =============================================================================
def main() -> None:
"""
Command-line processor. See ``--help`` for details.
"""
main_only_quicksetup_rootlogger(level=logging.DEBUG)
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"input_file",
help="Input PDF (which is not modified by this program)")
parser.add_argument(
"output_file",
help="Output PDF")
parser.add_argument(
"--slice_horiz", type=int, default=1,
help="Slice the input PDF first into this many parts horizontally")
parser.add_argument(
"--slice_vert", type=int, default=1,
help="Slice the input PDF first into this many parts vertically")
parser.add_argument(
"--longedge", action="store_true",
help="Create PDF for long-edge duplex printing, not short edge")
parser.add_argument(
"--overwrite", action="store_true",
help="Allow overwriting of an existing output file")
parser.add_argument(
"--unittest", action="store_true",
help="Run unit tests and exit (you must pass dummy values for "
"input/output files to use these tests)")
# ... because requiring dummy input/output filenames for unit testing
# is less confusing for the majority of users than showing syntax in
# which they are optional!
args = parser.parse_args()
if args.unittest:
log.warning("Performing unit tests")
# unittest.main() doesn't play nicely with argparse; they both
# use sys.argv by default (and we end up with what looks like garbage
# from the argparse help facility); but this works:
unittest.main(argv=[sys.argv[0]])
sys.exit(EXIT_SUCCESS)
success = convert_to_foldable(
input_filename=os.path.abspath(args.input_file),
output_filename=os.path.abspath(args.output_file),
slice_horiz=args.slice_horiz,
slice_vert=args.slice_vert,
overwrite=args.overwrite,
longedge=args.longedge
)
sys.exit(EXIT_SUCCESS if success else EXIT_FAILURE)
if __name__ == "__main__":
main()
| 33.759382 | 111 | 0.573334 |
7958a5480f85a73ed366ced63af731f79aed5849 | 1,567 | py | Python | pythonforandroid/recipes/gevent/__init__.py | syrykh/python-for-android | dea23e8c6d11cfad0554f3e846cbe245578781fd | [
"MIT"
] | 6,278 | 2015-01-02T16:34:05.000Z | 2022-03-31T10:24:45.000Z | pythonforandroid/recipes/gevent/__init__.py | ganeshgandhiTN/python-for-android | 690dd18bcf9bbd3ff5a245ac1e7296cc266e5543 | [
"MIT"
] | 1,877 | 2015-01-01T16:16:10.000Z | 2022-03-27T17:34:34.000Z | pythonforandroid/recipes/gevent/__init__.py | ganeshgandhiTN/python-for-android | 690dd18bcf9bbd3ff5a245ac1e7296cc266e5543 | [
"MIT"
] | 1,565 | 2015-01-02T19:35:37.000Z | 2022-03-31T15:37:06.000Z | import re
from pythonforandroid.logger import info
from pythonforandroid.recipe import CythonRecipe
class GeventRecipe(CythonRecipe):
version = '1.4.0'
url = 'https://pypi.python.org/packages/source/g/gevent/gevent-{version}.tar.gz'
depends = ['librt', 'setuptools']
patches = ["cross_compiling.patch"]
def get_recipe_env(self, arch=None, with_flags_in_cc=True):
"""
- Moves all -I<inc> -D<macro> from CFLAGS to CPPFLAGS environment.
- Moves all -l<lib> from LDFLAGS to LIBS environment.
- Copies all -l<lib> from LDLIBS to LIBS environment.
- Fixes linker name (use cross compiler) and flags (appends LIBS)
"""
env = super().get_recipe_env(arch, with_flags_in_cc)
# CFLAGS may only be used to specify C compiler flags, for macro definitions use CPPFLAGS
regex = re.compile(r'(?:\s|^)-[DI][\S]+')
env['CPPFLAGS'] = ''.join(re.findall(regex, env['CFLAGS'])).strip()
env['CFLAGS'] = re.sub(regex, '', env['CFLAGS'])
info('Moved "{}" from CFLAGS to CPPFLAGS.'.format(env['CPPFLAGS']))
# LDFLAGS may only be used to specify linker flags, for libraries use LIBS
regex = re.compile(r'(?:\s|^)-l[\w\.]+')
env['LIBS'] = ''.join(re.findall(regex, env['LDFLAGS'])).strip()
env['LIBS'] += ' {}'.format(''.join(re.findall(regex, env['LDLIBS'])).strip())
env['LDFLAGS'] = re.sub(regex, '', env['LDFLAGS'])
info('Moved "{}" from LDFLAGS to LIBS.'.format(env['LIBS']))
return env
recipe = GeventRecipe()
| 44.771429 | 97 | 0.620294 |
7958a5c6865272342eaa73429f89db757ba44560 | 5,581 | py | Python | python/pls.py | mwalton/em-machineLearning | efd76961fa3b78e042ca481733152a683074d15c | [
"MIT"
] | null | null | null | python/pls.py | mwalton/em-machineLearning | efd76961fa3b78e042ca481733152a683074d15c | [
"MIT"
] | null | null | null | python/pls.py | mwalton/em-machineLearning | efd76961fa3b78e042ca481733152a683074d15c | [
"MIT"
] | null | null | null | import numpy as np
from sklearn.cross_decomposition import PLSRegression
import matplotlib.pyplot as plt
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
def loadData(XPath, yPath):
X = np.genfromtxt(XPath, delimiter=",", dtype="float32")
y = np.genfromtxt(yPath, delimiter=",", dtype="float32")
return (X, y)
def scale(label):
#label[label<1e-10]=1e-10
return np.power(label, 0.25)
#return np.log10(label)
def standardize(featureVector):
scaler = StandardScaler()
return scaler.fit_transform(featureVector)
r = 0
"""
xtrainpath="/Users/michaelwalton/Dropbox/Evolved Machines 2014/Machine Learning/datasets/kaggle/paul_medC_BG2/train/sensorActivation.csv"
ytrainpath="/Users/michaelwalton/Dropbox/Evolved Machines 2014/Machine Learning/datasets/kaggle/paul_medC_BG2/train/concentration.csv"
xtestpath="/Users/michaelwalton/Dropbox/Evolved Machines 2014/Machine Learning/datasets/kaggle/paul_highC_BG1/test/sensorActivation.csv"
ytestpath="/Users/michaelwalton/Dropbox/Evolved Machines 2014/Machine Learning/datasets/kaggle/paul_highC_BG1/test/concentration.csv"
"""
"""
xtrainpath="/Users/michaelwalton/Dropbox/Evolved Machines 2014/Machine Learning/datasets/BGtest/BG1/0.01train/sensorActivation.csv"
ytrainpath="/Users/michaelwalton/Dropbox/Evolved Machines 2014/Machine Learning/datasets/BGtest/BG1/0.01train/concentration.csv"
xtestpath="/Users/michaelwalton/Dropbox/Evolved Machines 2014/Machine Learning/datasets/BGtest/BG2/0.19test/sensorActivation.csv"
ytestpath="/Users/michaelwalton/Dropbox/Evolved Machines 2014/Machine Learning/datasets/BGtest/BG2/0.19test/concentration.csv"
"""
rootPath='/Users/michaelwalton/Dropbox/Evolved Machines 2014/Machine Learning/datasets/compSig'
prefix='t5'
xtrainpath=("%s/%strain/sensorActivation.csv" % (rootPath, prefix))
ytrainpath=("%s/%strain/concentration.csv" % (rootPath, prefix))
xtestpath=("%s/%stest/sensorActivation.csv" % (rootPath, prefix))
ytestpath=("%s/%stest/concentration.csv" % (rootPath, prefix))
(Xtrain, ytrain) = loadData(xtrainpath, ytrainpath)
(Xtest, ytest) = loadData(xtestpath, ytestpath)
#trim off background and scale
ytrain=ytrain[:,1:]
#ytrain=scale(ytrain)
Xtrain=standardize(Xtrain)
#trim off background and scale
ytest = ytest[:,1:]
#ytest = scale(ytest)
Xtest = standardize(Xtest)
pls = PLSRegression(n_components=10)
pls.fit(Xtrain, ytrain)
y_pls = pls.predict(Xtest)
print 1 + pls.score(Xtest, ytest)
pls_rmse=[]
pls_rmse.append(sqrt(mean_squared_error(ytest[:,0], y_pls[:,0])))
pls_rmse.append(sqrt(mean_squared_error(ytest[:,1], y_pls[:,1])))
pls_rmse.append(sqrt(mean_squared_error(ytest[:,2], y_pls[:,2])))
pls_rmse.append(sqrt(mean_squared_error(ytest[:,3], y_pls[:,3])))
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(241)
ax1.plot(y_pls[:,0], c='r', label='PLS Fit')
ax1.plot(ytest[:,0], c='grey', label='Target')
ax1.set_xlabel('Time')
ax1.set_ylabel('[c]')
#ax1.set_yscale('log')
ax1.set_title('RED')
ax1.legend()
ax2 = fig.add_subplot(242)
ax2.plot(y_pls[:,1], c='g', label='PLS Fit')
ax2.plot(ytest[:,1], c='grey', label='Target')
ax2.set_xlabel('Time')
ax2.set_title('GREEN')
ax2.legend()
ax3 = fig.add_subplot(243)
ax3.plot(y_pls[:,2], c='b', label='PLS Fit')
#ax3.plot(y_lin[2], c='r', label='Linear Fit')
#ax3.plot(y_poly[2], c='b', label='Poly Fit')
ax3.plot(ytest[:,2], c='grey', label='Target')
ax3.set_xlabel('Time')
#ax3.set_ylabel('log[c]')
ax3.set_title('BLUE')
ax3.legend()
ax4 = fig.add_subplot(244)
ax4.plot(y_pls[:,3], c='y', label='PLS Fit')
#ax4.plot(y_lin[3], c='r', label='Linear Fit')
#ax4.plot(y_poly[3], c='b', label='Poly Fit')
ax4.plot(ytest[:,3], c='grey', label='Target')
ax4.set_xlabel('Time')
#ax4.set_ylabel('log[c]')
ax4.set_title('YELLOW')
ax4.legend()
ax5 = fig.add_subplot(245)
ax5.scatter(ytest[:,0], y_pls[:,0], c='r', label=('PLS RMSE=%0.2f' % pls_rmse[0]))
#ax5.scatter(y[:,0], y_lin[0], c='r', label=('Linear RMSE=%0.2f' % lin_rmse[0]))
#ax5.scatter(y[:,0], y_poly[0], c='b', label=('Polynomial RMSE=%0.2f' % poly_rmse[0]))
ax5.plot(ytest[:,0],ytest[:,0],c='grey')
ax5.set_xlim(np.min(ytest[:,0]), np.max(ytest[:,0]))
ax5.set_xlabel('Prediction')
ax5.set_ylabel('Actual')
ax5.legend()
ax6 = fig.add_subplot(246)
ax6.scatter(ytest[:,1], y_pls[:,1], c='g', label=('PLS RMSE=%0.2f' % pls_rmse[1]))
#ax6.scatter(y[:,1], y_lin[1], c='r', label=('Linear RMSE=%0.2f' % lin_rmse[1]))
#ax6.scatter(y[:,1], y_poly[1], c='b', label=('Polynomial RMSE=%0.2f' % poly_rmse[1]))
ax6.plot(ytest[:,1],ytest[:,1],c='grey')
ax6.set_xlim(np.min(ytest[:,1]), np.max(ytest[:,1]))
ax6.set_xlabel('Prediction')
#ax6.set_ylabel('Actual')
ax6.legend()
ax7 = fig.add_subplot(247)
ax7.scatter(ytest[:,2], y_pls[:,2], c='b', label=('PLS RMSE=%0.2f' % pls_rmse[2]))
#ax7.scatter(y[:,2], y_lin[2], c='r', label=('Linear RMSE=%0.2f' % lin_rmse[2]))
#ax7.scatter(y[:,2], y_poly[2], c='b', label=('Polynomial RMSE=%0.2f' % poly_rmse[2]))
ax7.plot(ytest[:,2],ytest[:,2],c='grey')
ax7.set_xlim(np.min(ytest[:,2]), np.max(ytest[:,2]))
ax7.set_xlabel('Prediction')
#ax7.set_ylabel('Actual')
ax7.legend()
ax8 = fig.add_subplot(248)
ax8.scatter(ytest[:,3], y_pls[:,3], c='y', label=('PLS RMSE=%0.2f' % pls_rmse[3]))
#ax8.scatter(y[:,3], y_lin[3], c='r', label=('Linear RMSE=%0.2f' % lin_rmse[3]))
#ax8.scatter(y[:,3], y_poly[3], c='b', label=('Polynomial RMSE=%0.2f' % poly_rmse[3]))
ax8.plot(ytest[:,3],ytest[:,3],c='grey')
ax8.set_xlim(np.min(ytest[:,3]), np.max(ytest[:,3]))
ax8.set_xlabel('Prediction')
#ax8.set_ylabel('Actual')
ax8.legend()
plt.show() | 37.456376 | 137 | 0.708296 |
7958a5fa5e113f8bc2698658c23481eee58b2359 | 10,338 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/netvisor/_pn_vrouterlbif.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/ansible/modules/network/netvisor/_pn_vrouterlbif.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/ansible/modules/network/netvisor/_pn_vrouterlbif.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | #!/usr/bin/python
""" PN CLI vrouter-loopback-interface-add/remove """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_vrouterlbif
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: CLI command to add/remove vrouter-loopback-interface.
deprecated:
removed_in: '2.12'
why: Doesn't support latest Pluribus Networks netvisor
alternative: Latest modules will be pushed in Ansible future versions.
description:
- Execute vrouter-loopback-interface-add, vrouter-loopback-interface-remove
commands.
- Each fabric, cluster, standalone switch, or virtual network (VNET) can
provide its tenants with a virtual router (vRouter) service that forwards
traffic between networks and implements Layer 3 protocols.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
default: 'local'
state:
description:
- State the action to perform. Use 'present' to add vrouter loopback
interface and 'absent' to remove vrouter loopback interface.
required: True
choices: ['present', 'absent']
pn_vrouter_name:
description:
- Specify the name of the vRouter.
required: True
pn_index:
description:
- Specify the interface index from 1 to 255.
pn_interface_ip:
description:
- Specify the IP address.
required: True
"""
EXAMPLES = """
- name: add vrouter-loopback-interface
pn_vrouterlbif:
state: 'present'
pn_vrouter_name: 'ansible-vrouter'
pn_interface_ip: '104.104.104.1'
- name: remove vrouter-loopback-interface
pn_vrouterlbif:
state: 'absent'
pn_vrouter_name: 'ansible-vrouter'
pn_interface_ip: '104.104.104.1'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the vrouterlb command.
returned: always
type: list
stderr:
description: The set of error responses from the vrouterlb command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
VROUTER_EXISTS = None
LB_INTERFACE_EXISTS = None
# Index range
MIN_INDEX = 1
MAX_INDEX = 255
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks if vRouter exists on the target node.
This method also checks for idempotency using the
vrouter-loopback-interface-show command.
If the given vRouter exists, return VROUTER_EXISTS as True else False.
If a loopback interface with the given ip exists on the given vRouter,
return LB_INTERFACE_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VROUTER_EXISTS, LB_INTERFACE_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
interface_ip = module.params['pn_interface_ip']
# Global flags
global VROUTER_EXISTS, LB_INTERFACE_EXISTS
# Check for vRouter
check_vrouter = cli + ' vrouter-show format name no-show-headers '
check_vrouter = shlex.split(check_vrouter)
out = module.run_command(check_vrouter)[1]
out = out.split()
if vrouter_name in out:
VROUTER_EXISTS = True
else:
VROUTER_EXISTS = False
# Check for loopback interface
show = (cli + ' vrouter-loopback-interface-show vrouter-name %s format ip '
'no-show-headers' % vrouter_name)
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
if interface_ip in out:
LB_INTERFACE_EXISTS = True
else:
LB_INTERFACE_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-loopback-interface-add'
if state == 'absent':
command = 'vrouter-loopback-interface-remove'
return command
def main():
""" This portion is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(required=True, type='str',
choices=['present', 'absent']),
pn_vrouter_name=dict(required=True, type='str'),
pn_interface_ip=dict(type='str'),
pn_index=dict(type='int')
),
required_if=(
["state", "present",
["pn_vrouter_name", "pn_interface_ip"]],
["state", "absent",
["pn_vrouter_name", "pn_interface_ip"]]
)
)
# Accessing the arguments
state = module.params['state']
vrouter_name = module.params['pn_vrouter_name']
interface_ip = module.params['pn_interface_ip']
index = module.params['pn_index']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if index:
if not MIN_INDEX <= index <= MAX_INDEX:
module.exit_json(
msg="Index must be between 1 and 255",
changed=False
)
index = str(index)
if command == 'vrouter-loopback-interface-remove':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if LB_INTERFACE_EXISTS is False:
module.exit_json(
skipped=True,
msg=('Loopback interface with IP %s does not exist on %s'
% (interface_ip, vrouter_name))
)
if not index:
# To remove loopback interface, we need the index.
# If index is not specified, get the Loopback interface index
# using the given interface ip.
get_index = cli
get_index += (' vrouter-loopback-interface-show vrouter-name %s ip '
'%s ' % (vrouter_name, interface_ip))
get_index += 'format index no-show-headers'
get_index = shlex.split(get_index)
out = module.run_command(get_index)[1]
index = out.split()[1]
cli += ' %s vrouter-name %s index %s' % (command, vrouter_name, index)
if command == 'vrouter-loopback-interface-add':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg=('vRouter %s does not exist' % vrouter_name)
)
if LB_INTERFACE_EXISTS is True:
module.exit_json(
skipped=True,
msg=('Loopback interface with IP %s already exists on %s'
% (interface_ip, vrouter_name))
)
cli += (' %s vrouter-name %s ip %s'
% (command, vrouter_name, interface_ip))
if index:
cli += ' index %s ' % index
run_cli(module, cli)
if __name__ == '__main__':
main()
| 30.859701 | 80 | 0.639776 |
7958a6ccf00825ceb9787548ef8a585cc8b76663 | 4,126 | py | Python | growth/too/tool.py | deepchatterjeeligo/growth-too-marshal | cefd6a4549cdd34895bd8067833273baaf891341 | [
"MIT"
] | 14 | 2019-04-16T19:31:30.000Z | 2020-08-18T21:34:54.000Z | growth/too/tool.py | deepchatterjeeligo/growth-too-marshal | cefd6a4549cdd34895bd8067833273baaf891341 | [
"MIT"
] | 86 | 2019-04-16T20:43:04.000Z | 2021-04-23T16:16:39.000Z | growth/too/tool.py | deepchatterjeeligo/growth-too-marshal | cefd6a4549cdd34895bd8067833273baaf891341 | [
"MIT"
] | 11 | 2019-04-16T18:41:53.000Z | 2020-04-21T13:22:13.000Z | from getpass import getpass
import os
import click
from flask.cli import FlaskGroup
import lxml.etree
from passlib.apache import HtpasswdFile
from tqdm import tqdm
from .flask import app
from . import models, tasks
from . import views, twilio # noqa: F401
@click.group(cls=FlaskGroup, create_app=lambda *args, **kwargs: app)
def main():
"""Command line management console for the GROWTH ToO Marshal"""
@app.cli.command(context_settings=dict(allow_extra_args=True,
ignore_unknown_options=True))
@click.pass_context
def celery(ctx):
"""Manage Celery cluster."""
tasks.celery.start(['celery'] + ctx.args)
@app.cli.command()
def gcn():
"""Listen for GCN Notices."""
from .gcn import listen
listen()
@app.cli.command()
def iers():
"""Update IERS data for precise positional astronomy.
The IERS Bulletin A data set is used for precise time conversions and
positional astronomy. To initialize Astroplan, you need to download it.
According to https://astroplan.readthedocs.io/en/latest/faq/iers.html, you
need to run this command::
python -c 'from astroplan import download_IERS_A; download_IERS_A()'
Unfortunately, the USNO server that provides the data file is extremely
flaky. This tool attempts to work around that by retrying the download
several times.
"""
from retry.api import retry_call
from astroplan import download_IERS_A
from urllib.error import URLError
retry_call(
download_IERS_A, exceptions=(IndexError, URLError, ValueError),
tries=5, delay=1, backoff=2)
@app.cli.command()
@click.argument('username', required=False)
def passwd(username):
"""Set the password for a user."""
if username is None:
username = input('Username: ')
password = getpass()
path = os.path.join(app.instance_path, 'htpasswd')
os.makedirs(app.instance_path, exist_ok=True)
try:
htpasswd = HtpasswdFile(path)
except FileNotFoundError:
htpasswd = HtpasswdFile()
htpasswd.set_password(username, password)
htpasswd.save(path)
@app.cli.group()
def db():
"""Manage the PostgreSQL database."""
@db.command()
@click.option('--sample', is_flag=True, help="Populate with sample data.")
def create(sample):
"""Create all tables from SQLAlchemy models"""
models.create_all()
models.db.session.commit()
if sample:
from .gcn import handle
# Don't rely on Celery to be functional.
tasks.celery.conf['task_always_eager'] = True
models.db.session.merge(models.User(name='fritz'))
models.db.session.commit()
filenames = ['GRB180116A_Fermi_GBM_Alert.xml',
'GRB180116A_Fermi_GBM_Flt_Pos.xml',
'GRB180116A_Fermi_GBM_Gnd_Pos.xml',
'GRB180116A_Fermi_GBM_Fin_Pos.xml',
'MS181101ab-1-Preliminary.xml',
'MS181101ab-4-Retraction.xml',
'AMON_151115.xml']
with tqdm(filenames) as progress:
for filename in progress:
progress.set_description(
'processing GCN {}'.format(filename))
with app.open_resource(
os.path.join('tests/data', filename)) as f:
payload = f.read()
handle(payload, lxml.etree.fromstring(payload))
tasks.ztf_client.ztf_obs()
@db.command()
@click.option('--preserve', help='Preserve the named table.', multiple=True)
def drop(preserve):
"""Drop all tables from SQLAlchemy models"""
models.db.reflect(bind=None)
models.db.metadata.drop_all(
bind=models.db.get_engine(app, bind=None),
tables=[value for key, value in models.db.metadata.tables.items()
if key not in preserve])
models.db.session.commit()
@db.command()
@click.option('--sample', is_flag=True, help="Populate with sample data.")
@click.pass_context
def recreate(ctx, sample):
"""Drop and recreate all tables from SQLAlchemy models"""
ctx.invoke(drop)
ctx.forward(create)
| 30.116788 | 78 | 0.656083 |
7958a763d07146962cade9309d57c019fd1cdd25 | 3,877 | py | Python | sparse/repos/jflamant/bispy/setup.py | yuvipanda/mybinder.org-analytics | 7b654e3e21dea790505c626d688aa15640ea5808 | [
"BSD-3-Clause"
] | 1 | 2021-03-18T23:33:35.000Z | 2021-03-18T23:33:35.000Z | sparse/repos/jflamant/bispy/setup.py | yuvipanda/mybinder.org-analytics | 7b654e3e21dea790505c626d688aa15640ea5808 | [
"BSD-3-Clause"
] | 17 | 2020-01-28T22:33:27.000Z | 2021-06-10T21:05:49.000Z | sparse/repos/jflamant/bispy/setup.py | yuvipanda/mybinder.org-analytics | 7b654e3e21dea790505c626d688aa15640ea5808 | [
"BSD-3-Clause"
] | 1 | 2021-07-17T12:55:22.000Z | 2021-07-17T12:55:22.000Z | """A setuptools based setup module.
See:
https://packaging.python.org/tutorials/distributing-packages/#configuring-your-project
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='bispy',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.9.dev',
description='An open-source python framework for processing \
bivariate signals.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/jflamant/bispy',
# Author details
author='Julien Flamant',
author_email='julien.flamant@phd.centralelille.fr',
# Choose your license
license='CeCIll',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Researchers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='signal processing',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy', 'scipy', 'matplotlib', 'numpy-quaternion'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
)
| 35.568807 | 94 | 0.679649 |
7958a7b2421c3e50f8aae1dafb8f614323f38329 | 2,763 | py | Python | userprofile/api_userprofile.py | FiniteElementries/barebone_server | 3713b7d384aa9501741eeebdc49398d917deabb3 | [
"MIT"
] | null | null | null | userprofile/api_userprofile.py | FiniteElementries/barebone_server | 3713b7d384aa9501741eeebdc49398d917deabb3 | [
"MIT"
] | null | null | null | userprofile/api_userprofile.py | FiniteElementries/barebone_server | 3713b7d384aa9501741eeebdc49398d917deabb3 | [
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.http import HttpResponse
import json
import ast
import sys
from userprofile.models import UserProfile
import account.api_account
from helper.http_handler import package_handle
def get_userprofile_detail(request):
"""
:param
:return:
"""
response=dict()
response['success'] = False
user=request.user
my_profile=user.profile
try:
target_username=request.POST['target_username']
except KeyError:
account.api_account.error_response("provide target_username")
try:
target_userprofile=UserProfile.objects.get(user__username=target_username)
except UserProfile.DoesNotExist:
account.api_account.error_response("target_username does not exist")
package=target_userprofile.get_userprofile_info(my_profile)
response['success']=True
response['message']="success"
response['package']=dict()
for key, value in package.iteritems():
response['package'][key]=value
return package_handle(response)
def friend_action(request):
"""
POST method
:param request:
:return:
"""
user=request.user
try:
action=request.POST['action']
target_username=request.POST['target_username']
except KeyError:
return account.api_account.error_response("user POST method to include 'action' and 'target_username'")
try:
target_userprofile=UserProfile.objects.get(user__username=target_username)
except User.DoesNotExist:
return account.api_account.error_response("target username does not exist")
response=dict()
if action=="follow":
user.profile.rs_follow(target_userprofile)
# todo send friend request notification
elif action=="block":
user.profile.rs_block(target_userprofile)
elif action=="unfollow" or action=="unblock":
user.profile.rs_reset(target_userprofile)
response['success']=True
response['message']="success"
return package_handle(response)
def get_friend_list(request):
"""
:param request:
:return: list of friend usernames
"""
user=request.user
friend_list = user.profile.get_follower()
package=",".join(friend_list)
response=dict()
response['success']=True
response['message']="success"
response['package']=package
return package_handle(response)
def change_userprofile_info(request):
profile=request.user.profile
package=request.POST['package']
package=ast.literal_eval(package)
for k, v in package.items():
profile.change_userprofile_info(k,v)
response=dict()
response['success']=True
response['message']="success"
return package_handle(response)
| 22.647541 | 111 | 0.70105 |
7958a7d83648ae174cbd8bab35caaa84732942f0 | 3,221 | py | Python | viz_estimate_nolabel_dataset.py | duncangoudie/LaneATT | 263df500addf38e0912e3da7b4e878ca289391c3 | [
"MIT"
] | null | null | null | viz_estimate_nolabel_dataset.py | duncangoudie/LaneATT | 263df500addf38e0912e3da7b4e878ca289391c3 | [
"MIT"
] | null | null | null | viz_estimate_nolabel_dataset.py | duncangoudie/LaneATT | 263df500addf38e0912e3da7b4e878ca289391c3 | [
"MIT"
] | null | null | null | """
Given an unlabelled dataset (such as KITTI), visualise the result of images being run through the LaneATT model.
"""
import argparse
import cv2
import torch
import random
import numpy as np
from tqdm import tqdm, trange
from lib.config import Config
import os
import sys
PACKAGE_PARENT = '../'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from lib.datasets.lane_dataset import LaneDataset
from lib.datasets.nolabel_dataset import NoLabelDataset
class Visualiser():
def __init__(self, model_path: str, image_folder: str):
"""
@param model_path: eg "/path/to/models/model_0015.pt"
"""
# config file and it's details
self._cfg = Config("cfgs/eval/laneatt_culane_resnet18_evaluation.yml")
self._test_parameters = self._cfg.get_test_parameters()
self._device = torch.device('cpu') if not torch.cuda.is_available() else torch.device('cuda')
self._model_path = model_path
self._model = self._cfg.get_model()
self._model.load_state_dict(torch.load(self._model_path)['model'])
self._model = self._model.to(self._device)
#img_w = 1226
#img_h = 370
#img_w = 1640
#img_h = 590
img_w = 640
img_h = 360
self._dataset_nolabel = LaneDataset(dataset='nolabel_dataset', img_size=(img_h,img_w), root=image_folder)
#self._dataset_nolabel = NoLabelDataset(root=image_folder)
self._test_dataloader = torch.utils.data.DataLoader(dataset=self._dataset_nolabel,
batch_size=1,
shuffle=False,
num_workers=1,)
def visualise_results(self):
"""
@return:
"""
predictions = []
with torch.no_grad():
for idx, (images, _, _) in enumerate(tqdm(self._test_dataloader)):
images = images.to(self._device)
output = self._model(images, **self._test_parameters)
prediction = self._model.decode(output, as_lanes=True)
print("PREDICTION: ", prediction)
predictions.extend(prediction)
img = (images[0].cpu().permute(1, 2, 0).numpy() * 255).astype(np.uint8)
img, fp, fn = self._test_dataloader.dataset.draw_annotation(idx, img=img, pred=prediction[0])
cv2.imshow('pred', img)
cv2.waitKey(0)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i",
help="filepath to folder of images",
action="store")
parser.add_argument("-m",
help="filepath to model",
action="store")
args =parser.parse_args()
if args.i is not None:
image = args.i
model_path = args.m
v = Visualiser(model_path=model_path, image_folder=image)
v.visualise_results()
else:
print('Incorrect arguments...')
if __name__== "__main__":
main() | 30.386792 | 113 | 0.598261 |
7958a84d59a6ca21d5db56b11a7eefd76b4b2c0d | 8,630 | py | Python | PhloxAR/math3/objects/quaternion.py | jardinier/phlox | f312569ec983b5f27c75846b34debc04fe7bdf98 | [
"Apache-2.0"
] | 1 | 2016-05-22T00:12:14.000Z | 2016-05-22T00:12:14.000Z | PhloxAR/math3/objects/quaternion.py | jardinier/phlox | f312569ec983b5f27c75846b34debc04fe7bdf98 | [
"Apache-2.0"
] | null | null | null | PhloxAR/math3/objects/quaternion.py | jardinier/phlox | f312569ec983b5f27c75846b34debc04fe7bdf98 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Represents a Quaternion rotation.
The Quaternion class provides a number of convenient functions and
conversions.
::
import numpy as np
from math3 import Quaternion, Matrix33, Matrix44, Vector3, Vector4
q = Quaternion()
# explicit creation
q = Quaternion.from_x_rotation(np.pi / 2.0)
q = Quaternion.from_matrix(Matrix33.identity())
q = Quaternion.from_matrix(Matrix44.identity())
# inferred conversions
q = Quaternion(Quaternion())
q = Quaternion(Matrix33.identity())
q = Quaternion(Matrix44.identity())
# apply one quaternion to another
q1 = Quaternion.from_y_rotation(np.pi / 2.0)
q2 = Quaternion.from_x_rotation(np.pi / 2.0)
q3 = q1 * q2
# extract a matrix from the quaternion
m33 = q3.matrix33
m44 = q3.matrix44
# convert from matrix back to quaternion
q4 = Quaternion(m44)
# rotate a quaternion by a matrix
q = Quaternion() * Matrix33.identity()
q = Quaternion() * Matrix44.identity()
# apply quaternion to a vector
v3 = Quaternion() * Vector3()
v4 = Quaternion() * Vector4()
# undo a rotation
q = Quaternion.from_x_rotation(np.pi / 2.0)
v = q * Vector3([1.,1.,1.])
# ~q is the same as q.conjugate
original = ~q * v
assert np.allclose(original, v)
# get the dot product of 2 Quaternions
dot = Quaternion() | Quaternion.from_x_rotation(np.pi / 2.0)
"""
from __future__ import absolute_import
import numpy as np
from multipledispatch import dispatch
from .base import BaseObject, BaseQuaternion, BaseMatrix, BaseVector, NpProxy
from .. import quaternion
class Quaternion(BaseQuaternion):
_module = quaternion
_shape = (4,)
#: The X value of this Quaternion.
x = NpProxy(0)
#: The Y value of this Quaternion.
y = NpProxy(1)
#: The Z value of this Quaternion.
z = NpProxy(2)
#: The W value of this Quaternion.
w = NpProxy(3)
#: The X,Y value of this Quaternion as a numpy.ndarray.
xy = NpProxy([0,1])
#: The X,Y,Z value of this Quaternion as a numpy.ndarray.
xyz = NpProxy([0,1,2])
#: The X,Y,Z,W value of this Quaternion as a numpy.ndarray.
xyzw = NpProxy([0,1,2,3])
#: The X,Z value of this Quaternion as a numpy.ndarray.
xz = NpProxy([0,2])
#: The X,Z,W value of this Quaternion as a numpy.ndarray.
xzw = NpProxy([0,2,3])
#: The X,Y,W value of this Quaternion as a numpy.ndarray.
xyw = NpProxy([0,1,3])
#: The X,W value of this Quaternion as a numpy.ndarray.
xw = NpProxy([0,3])
########################
# Creation
@classmethod
def from_x_rotation(cls, theta, dtype=None):
"""Creates a new Quaternion with a rotation around the X-axis.
"""
return cls(quaternion.create_from_x_rotation(theta, dtype))
@classmethod
def from_y_rotation(cls, theta, dtype=None):
"""Creates a new Quaternion with a rotation around the Y-axis.
"""
return cls(quaternion.create_from_y_rotation(theta, dtype))
@classmethod
def from_z_rotation(cls, theta, dtype=None):
"""Creates a new Quaternion with a rotation around the Z-axis.
"""
return cls(quaternion.create_from_z_rotation(theta, dtype))
@classmethod
def from_axis_rotation(cls, axis, theta, dtype=None):
"""Creates a new Quaternion with a rotation around the specified axis.
"""
return cls(quaternion.create_from_axis_rotation(axis, theta, dtype))
@classmethod
def from_matrix(cls, matrix, dtype=None):
"""Creates a Quaternion from the specified Matrix (Matrix33 or Matrix44).
"""
return cls(quaternion.create_from_matrix(matrix, dtype))
@classmethod
def from_eulers(cls, eulers, dtype=None):
"""Creates a Quaternion from the specified Euler angles.
"""
return cls(quaternion.create_from_eulers(eulers, dtype))
@classmethod
def from_inverse_of_eulers(cls, eulers, dtype=None):
"""Creates a Quaternion from the inverse of the specified Euler angles.
"""
return cls(quaternion.create_from_inverse_of_eulers(eulers, dtype))
def __new__(cls, value=None, dtype=None):
if value is not None:
obj = value
if not isinstance(value, np.ndarray):
obj = np.array(value, dtype=dtype)
# matrix33, matrix44
if obj.shape in ((4,4,), (3,3,)) or isinstance(obj, (Matrix33, Matrix44)):
obj = quaternion.create_from_matrix(obj, dtype=dtype)
else:
obj = quaternion.create(dtype=dtype)
obj = obj.view(cls)
return super(Quaternion, cls).__new__(cls, obj)
########################
# Basic Operators
@dispatch(BaseObject)
def __add__(self, other):
self._unsupported_type('add', other)
@dispatch(BaseObject)
def __sub__(self, other):
self._unsupported_type('subtract', other)
@dispatch(BaseObject)
def __mul__(self, other):
self._unsupported_type('multiply', other)
@dispatch(BaseObject)
def __truediv__(self, other):
self._unsupported_type('divide', other)
@dispatch(BaseObject)
def __div__(self, other):
self._unsupported_type('divide', other)
########################
# Quaternions
@dispatch((BaseQuaternion, np.ndarray, list))
def __sub__(self, other):
return Quaternion(super(Quaternion, self).__sub__(other))
@dispatch((BaseQuaternion, list))
def __mul__(self, other):
return self.cross(other)
@dispatch((BaseQuaternion, list))
def __or__(self, other):
return self.dot(other)
def __invert__(self):
return self.conjugate
########################
# Matrices
@dispatch(BaseMatrix)
def __mul__(self, other):
return self * Quaternion(other)
########################
# Vectors
@dispatch(BaseVector)
def __mul__(self, other):
return type(other)(quaternion.apply_to_vector(self, other))
########################
# Methods and Properties
@property
def length(self):
"""Returns the length of this Quaternion.
"""
return quaternion.length(self)
def normalise(self):
"""Normalises this Quaternion in-place.
"""
self[:] = quaternion.normalise(self)
@property
def normalised(self):
"""Returns a normalised version of this Quaternion as a new Quaternion.
"""
return Quaternion(quaternion.normalise(self))
@property
def angle(self):
"""Returns the angle around the axis of rotation of this Quaternion as a float.
"""
return quaternion.rotation_angle(self)
@property
def axis(self):
"""Returns the axis of rotation of this Quaternion as a Vector3.
"""
return Vector3(quaternion.rotation_axis(self))
def cross(self, other):
"""Returns the cross of this Quaternion and another.
This is the equivalent of combining Quaternion rotations (like Matrix multiplication).
"""
return Quaternion(quaternion.cross(self, other))
def dot(self, other):
"""Returns the dot of this Quaternion and another.
"""
return quaternion.dot(self, other)
@property
def conjugate(self):
"""Returns the conjugate of this Quaternion.
This is a Quaternion with the opposite rotation.
"""
return Quaternion(quaternion.conjugate(self))
@property
def inverse(self):
"""Returns the inverse of this quaternion.
"""
return Quaternion(quaternion.inverse(self))
def power(self, exponent):
"""Returns a new Quaternion representing this Quaternion to the power of the exponent.
"""
return Quaternion(quaternion.power(self, exponent))
@property
def negative(self):
"""Returns the negative of the Quaternion.
"""
return Quaternion(quaternion.negate(self))
@property
def is_identity(self):
"""Returns True if the Quaternion has no rotation (0.,0.,0.,1.).
"""
return quaternion.is_identity(self)
@property
def matrix44(self):
"""Returns a Matrix44 representation of this Quaternion.
"""
return Matrix44.from_quaternion(self)
@property
def matrix33(self):
"""Returns a Matrix33 representation of this Quaternion.
"""
return Matrix33.from_quaternion(self)
from .vector3 import Vector3
from .matrix33 import Matrix33
from .matrix44 import Matrix44
| 30.280702 | 94 | 0.631518 |
7958a86575fc366fbcc46e811337400e5e010555 | 1,237 | py | Python | models/__init__.py | pszyu/irr | 43511b4d85f04d62f132bd2e76da31d052000731 | [
"Apache-2.0"
] | null | null | null | models/__init__.py | pszyu/irr | 43511b4d85f04d62f132bd2e76da31d052000731 | [
"Apache-2.0"
] | null | null | null | models/__init__.py | pszyu/irr | 43511b4d85f04d62f132bd2e76da31d052000731 | [
"Apache-2.0"
] | null | null | null | from . import flownet1s
from . import flownet1s_irr
from . import flownet1s_irr_bi
from . import flownet1s_irr_occ
from . import flownet1s_irr_occ_bi
from . import IRR_FlowNet
from . import pwcnet
from . import pwcnet_bi
from . import pwcnet_occ
from . import pwcnet_occ_bi
from . import pwcnet_irr
from . import pwcnet_irr_bi
from . import pwcnet_irr_occ
from . import pwcnet_irr_occ_bi
from . import IRR_PWC
from . import IRR_PWC_FEE_OEE
from . import IRR_PWC_FED_OEE
FlowNet1S = flownet1s.FlowNet1S
FlowNet1S_irr = flownet1s_irr.FlowNet1S
FlowNet1S_irr_bi = flownet1s_irr_bi.FlowNet1S
FlowNet1S_irr_occ = flownet1s_irr_occ.FlowNet1S
FlowNet1S_irr_occ_bi = flownet1s_irr_occ_bi.FlowNet1S
PWCNet = pwcnet.PWCNet
PWCNet_bi = pwcnet_bi.PWCNet
PWCNet_occ = pwcnet_occ.PWCNet
PWCNet_occ_bi = pwcnet_occ_bi.PWCNet
PWCNet_irr = pwcnet_irr.PWCNet
PWCNet_irr_bi = pwcnet_irr_bi.PWCNet
PWCNet_irr_occ = pwcnet_irr_occ.PWCNet
PWCNet_irr_occ_bi = pwcnet_irr_occ_bi.PWCNet
IRR_FlowNet = IRR_FlowNet.FlowNet1S
IRR_PWC = IRR_PWC.PWCNet
IRR_PWC_FEE_OEE = IRR_PWC_FEE_OEE.PWCNet
IRR_PWC_FED_OEE = IRR_PWC_FED_OEE.PWCNet
| 29.452381 | 53 | 0.755861 |
7958a8d3d0de125943542b2bdfdd5b515cd31807 | 691 | py | Python | morad/models.py | MoradAlkhatib/djangoX | 98759b6ad7931f7af78c892725ea147eeee0a529 | [
"MIT"
] | null | null | null | morad/models.py | MoradAlkhatib/djangoX | 98759b6ad7931f7af78c892725ea147eeee0a529 | [
"MIT"
] | null | null | null | morad/models.py | MoradAlkhatib/djangoX | 98759b6ad7931f7af78c892725ea147eeee0a529 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
class Car(models.Model):
name = models.CharField(max_length=64)
color = models.CharField(max_length=32)
type_car = models.CharField(max_length=64)
price = models.FloatField(max_length=32 ,null=False ,blank=False , default=0)
model_car = models.FloatField(max_length=32)
description = models.TextField()
honer = models.ForeignKey( get_user_model() , on_delete = models.CASCADE ,null=False,blank=False ,default='')
def __str__(self) :
return self.name
def get_absolute_url(self):
return reverse('detail-car' , args=[str(self.pk)])
| 38.388889 | 113 | 0.725036 |
7958a98a5d6177ff9cb381d9d4d7c417df5fb786 | 443 | py | Python | ch02-python/mini_project/components/selecter.py | skforest/intro_ds | 478a6b236c2e33c4baffec8aafa8e0a8ed68dca8 | [
"Apache-2.0"
] | null | null | null | ch02-python/mini_project/components/selecter.py | skforest/intro_ds | 478a6b236c2e33c4baffec8aafa8e0a8ed68dca8 | [
"Apache-2.0"
] | null | null | null | ch02-python/mini_project/components/selecter.py | skforest/intro_ds | 478a6b236c2e33c4baffec8aafa8e0a8ed68dca8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: UTF-8 -*-
"""
此脚本用于得到出现次数最多的元素
"""
from mini_project.components.counter import word_count
def get_frequent_item(data):
"""
找出给定列表中,出现次数最多的元素
参数
----
data : list,原始数据列表
返回
----
re : list,在给定列表中出现次数最大的元素
"""
_hash = word_count(data)
max_num = max(_hash.values())
# Python2和Python3的filter并不兼容,所以使用list(filter())
return list(filter(lambda key: _hash[key] == max_num, _hash))
| 17.038462 | 65 | 0.629797 |
7958abed719329e6f485b63e0b929b30c9d23e76 | 6,056 | py | Python | tests/ut/python/pipeline/parse/test_operator.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | 55 | 2020-12-17T10:26:06.000Z | 2022-03-28T07:18:26.000Z | tests/ut/python/pipeline/parse/test_operator.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 1 | 2020-12-29T06:46:38.000Z | 2020-12-29T06:46:38.000Z | tests/ut/python/pipeline/parse/test_operator.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 14 | 2021-01-29T02:39:47.000Z | 2022-03-23T05:00:26.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test_operator """
import numpy as np
from mindspore import Tensor, Model, context
from mindspore.nn import Cell
from mindspore.nn import ReLU
from mindspore.ops import operations as P
from ...ut_filter import non_graph_engine
class arithmetic_Net(Cell):
""" arithmetic_Net definition """
def __init__(self, symbol, loop_count=(1, 3)):
super().__init__()
self.symbol = symbol
self.loop_count = loop_count
self.relu = ReLU()
def construct(self, x):
a, b = self.loop_count
y = self.symbol
if y == 1:
a += b
for _ in (b, a):
x = self.relu(x)
elif y == 2:
b -= a
for _ in (a, b):
x = self.relu(x)
elif y == 3:
z = a + b
for _ in (b, z):
x = self.relu(x)
elif y == 4:
z = b - a
for _ in (z, b):
x = self.relu(x)
elif y == 5:
z = a * b
for _ in (a, z):
x = self.relu(x)
elif y == 6:
z = b / a
for _ in (a, z):
x = self.relu(x)
elif y == 7:
z = b % a + 1
for _ in (a, z):
x = self.relu(x)
else:
if not a:
x = self.relu(x)
return x
class logical_Net(Cell):
""" logical_Net definition """
def __init__(self, symbol, loop_count=(1, 3)):
super().__init__()
self.symbol = symbol
self.loop_count = loop_count
self.fla = P.Flatten()
self.relu = ReLU()
def construct(self, x):
a, b = self.loop_count
y = self.symbol
if y == 1:
if b and a:
x = self.relu(x)
else:
x = self.fla(x)
else:
if b or a:
x = self.relu(x)
else:
x = self.fla(x)
return x
def arithmetic_operator_base(symbol):
""" arithmetic_operator_base """
input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
input_me = Tensor(input_np)
logical_operator = {"++": 1, "--": 2, "+": 3, "-": 4, "*": 5, "/": 6, "%": 7, "not": 8}
x = logical_operator[symbol]
net = arithmetic_Net(x)
context.set_context(mode=context.GRAPH_MODE)
model = Model(net)
model.predict(input_me)
def logical_operator_base(symbol):
""" logical_operator_base """
input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
input_me = Tensor(input_np)
logical_operator = {"and": 1, "or": 2}
x = logical_operator[symbol]
net = logical_Net(x)
context.set_context(mode=context.GRAPH_MODE)
model = Model(net)
model.predict(input_me)
@non_graph_engine
def test_ME_arithmetic_operator_0080():
""" test_ME_arithmetic_operator_0080 """
arithmetic_operator_base('not')
@non_graph_engine
def test_ME_arithmetic_operator_0070():
""" test_ME_arithmetic_operator_0070 """
logical_operator_base('and')
@non_graph_engine
def test_ME_logical_operator_0020():
""" test_ME_logical_operator_0020 """
logical_operator_base('or')
def test_ops():
class OpsNet(Cell):
""" OpsNet definition """
def __init__(self, x, y):
super(OpsNet, self).__init__()
self.x = x
self.y = y
self.int = 4
self.float = 3.2
self.str_a = "hello"
self.str_b = "world"
def construct(self, x, y):
h = x // y
m = x ** y
n = x % y
r = self.x // self.y
s = self.x ** self.y
t = self.x % self.y
p = h + m + n
q = r + s + t
ret_pow = p ** q + q ** p
ret_mod = p % q + q % p
ret_floor = p // q + q // p
ret = ret_pow + ret_mod + ret_floor
if self.int > self.float:
if [1, 2, 3] is not None:
if self.str_a + self.str_b == "helloworld":
if q == 86:
print("hello world")
return ret
return x
net = OpsNet(9, 2)
x = Tensor(np.random.randint(low=1, high=10, size=(2, 3, 4), dtype=np.int32))
y = Tensor(np.random.randint(low=10, high=20, size=(2, 3, 4), dtype=np.int32))
context.set_context(mode=context.GRAPH_MODE)
net(x, y)
def test_in_dict():
class InDictNet(Cell):
""" InDictNet definition """
def __init__(self, key_in, key_not_in):
super(InDictNet, self).__init__()
self.key_in = key_in
self.key_not_in = key_not_in
def construct(self, x, y, z):
d = {"a": x, "b": y}
ret_in = 1
ret_not_in = 2
if self.key_in in d:
ret_in = d[self.key_in]
if self.key_not_in not in d:
ret_not_in = z
ret = ret_in + ret_not_in
return ret
net = InDictNet("a", "c")
x = Tensor(np.random.randint(low=1, high=10, size=(2, 3, 4), dtype=np.int32))
y = Tensor(np.random.randint(low=10, high=20, size=(2, 3, 4), dtype=np.int32))
z = Tensor(np.random.randint(low=20, high=30, size=(2, 3, 4), dtype=np.int32))
context.set_context(mode=context.GRAPH_MODE)
net(x, y, z)
| 29.398058 | 91 | 0.521301 |
7958ac011df18df3b5013ca807d5d502d373f8a2 | 46,411 | py | Python | segno/__init__.py | eduardomazolini/segno | 9bf6f74237485d082d9251be8e0d0e463fd4ffea | [
"BSD-3-Clause"
] | null | null | null | segno/__init__.py | eduardomazolini/segno | 9bf6f74237485d082d9251be8e0d0e463fd4ffea | [
"BSD-3-Clause"
] | null | null | null | segno/__init__.py | eduardomazolini/segno | 9bf6f74237485d082d9251be8e0d0e463fd4ffea | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2020 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
QR Code and Micro QR Code implementation.
"QR Code" and "Micro QR Code" are registered trademarks of DENSO WAVE INCORPORATED.
"""
from __future__ import absolute_import, unicode_literals
import sys
import io
from . import encoder
from .encoder import DataOverflowError
from . import writers, utils
try: # pragma: no cover
str_type = basestring # noqa: F821
except NameError: # pragma: no cover
str_type = str
__version__ = '1.3.2.dev'
__all__ = ('make', 'make_qr', 'make_micro', 'make_sequence', 'QRCode',
'QRCodeSequence', 'DataOverflowError')
# <https://wiki.python.org/moin/PortingToPy3k/BilingualQuickRef#New_Style_Classes>
__metaclass__ = type
def make(content, error=None, version=None, mode=None, mask=None, encoding=None,
eci=False, micro=None, boost_error=True):
"""\
Creates a (Micro) QR Code.
This is main entry point to create QR Codes and Micro QR Codes.
Aside from `content`, all parameters are optional and an optimal (minimal)
(Micro) QR code with a maximal error correction level is generated.
:param content: The data to encode. Either a Unicode string, an integer or
bytes. If bytes are provided, the `encoding` parameter should be
used to specify the used encoding.
:type content: str, int, bytes
:param error: Error correction level. If ``None`` (default), error
correction level ``L`` is used (note: Micro QR Code version M1 does
not support any error correction. If an explicit error correction
level is used, a M1 QR code won't be generated).
Valid values: ``None`` (allowing generation of M1 codes or use error
correction level "L" or better see :paramref:`boost_error <segno.make.boost_error>`),
"L", "M", "Q", "H" (error correction level "H" isn't available for
Micro QR Codes).
===================================== ===========================
Error correction level Error correction capability
===================================== ===========================
L (Segno's default unless version M1) recovers 7% of data
M recovers 15% of data
Q recovers 25% of data
H (not available for Micro QR Codes) recovers 30% of data
===================================== ===========================
Higher error levels may require larger QR codes (see also
:paramref:`version <segno.make.version>` parameter).
The `error` parameter is case insensitive.
See also the :paramref:`boost_error <segno.make.boost_error>` parameter.
:type error: str or None
:param version: QR Code version. If the value is ``None`` (default), the
minimal version which fits for the input data will be used.
Valid values: "M1", "M2", "M3", "M4" (for Micro QR codes) or an
integer between 1 and 40 (for QR codes).
The `version` parameter is case insensitive.
:type version: int, str or None
:param mode: "numeric", "alphanumeric", "byte", "kanji" or "hanzi".
If the value is ``None`` (default) the appropriate mode will
automatically be determined.
If `version` refers to a Micro QR code, this function may raise a
:py:exc:`ValueError` if the provided `mode` is not supported.
The `mode` parameter is case insensitive.
============ =======================
Mode (Micro) QR Code Version
============ =======================
numeric 1 - 40, M1, M2, M3, M4
alphanumeric 1 - 40, M2, M3, M4
byte 1 - 40, M3, M4
kanji 1 - 40, M3, M4
hanzi 1 - 40
============ =======================
.. note::
The Hanzi mode may not be supported by all QR code readers since
it is not part of ISO/IEC 18004:2015(E).
For this reason, this mode must be specified explicitly by the
user::
import segno
qr = segno.make('书读百遍其义自现', mode='hanzi')
:type mode: str or None
:param mask: Data mask. If the value is ``None`` (default), the
appropriate data mask is chosen automatically. If the `mask`
parameter is provided, this function may raise a :py:exc:`ValueError`
if the mask is invalid.
:type mask: int or None
:param encoding: Indicates the encoding in mode "byte". By default
(`encoding` is ``None``) the implementation tries to use the
standard conform ISO/IEC 8859-1 encoding and if it does not fit, it
will use UTF-8. Note that no ECI mode indicator is inserted by
default (see :paramref:`eci <segno.make.eci>`).
The `encoding` parameter is case insensitive.
:type encoding: str or None
:param bool eci: Indicates if binary data which does not use the default
encoding (ISO/IEC 8859-1) should enforce the ECI mode. Since a lot
of QR code readers do not support the ECI mode, this feature is
disabled by default and the data is encoded in the provided
`encoding` using the usual "byte" mode. Set `eci` to ``True`` if
an ECI header should be inserted into the QR Code. Note that
the implementation may not know the ECI designator for the provided
`encoding` and may raise an exception if the ECI designator cannot
be found.
The ECI mode is not supported by Micro QR Codes.
:param micro: If :paramref:`version <segno.make.version>` is ``None`` (default)
this parameter can be used to allow the creation of a Micro QR code.
If set to ``False``, a QR code is generated. If set to
``None`` (default) a Micro QR code may be generated if applicable.
If set to ``True`` the algorithm generates a Micro QR Code or
raises an exception if the `mode` is not compatible or the `content`
is too large for Micro QR codes.
:type micro: bool or None
:param bool boost_error: Indicates if the error correction level may be
increased if it does not affect the version (default: ``True``).
If set to ``True``, the :paramref:`error <segno.make.error>`
parameter is interpreted as minimum error level. If set to ``False``,
the resulting (Micro) QR code uses the provided `error` level
(or the default error correction level, if error is ``None``)
:raises: :py:exc:`ValueError` or :py:exc:`DataOverflowError`: In case the
data does not fit into a (Micro) QR Code or it does not fit into
the provided :paramref:`version`.
:rtype: QRCode
"""
return QRCode(encoder.encode(content, error, version, mode, mask, encoding,
eci, micro, boost_error=boost_error))
def make_qr(content, error=None, version=None, mode=None, mask=None,
encoding=None, eci=False, boost_error=True):
"""\
Creates a QR code (never a Micro QR code).
See :py:func:`make` for a description of the parameters.
:rtype: QRCode
"""
return make(content, error=error, version=version, mode=mode, mask=mask,
encoding=encoding, eci=eci, micro=False, boost_error=boost_error)
def make_micro(content, error=None, version=None, mode=None, mask=None,
encoding=None, boost_error=True):
"""\
Creates a Micro QR code.
See :py:func:`make` for a description of the parameters.
Note: Error correction level "H" isn't available for Micro QR codes. If
used, this function raises a :py:class:`segno.ErrorLevelError`.
:rtype: QRCode
"""
return make(content, error=error, version=version, mode=mode, mask=mask,
encoding=encoding, micro=True, boost_error=boost_error)
def make_sequence(content, error=None, version=None, mode=None, mask=None,
encoding=None, boost_error=True, symbol_count=None):
"""\
Creates a sequence of QR codes using the Structured Append mode.
If the content fits into one QR code and neither ``version`` nor
``symbol_count`` is provided, this function may return a sequence with
one QR Code which does not use the Structured Append mode. Otherwise a
sequence of 2 .. n (max. n = 16) QR codes is returned which use the
Structured Append mode.
The Structured Append mode allows to split the content over a number
(max. 16) QR Codes.
The Structured Append mode isn't available for Micro QR Codes, therefor
the returned sequence contains QR codes, only.
Since this function returns an iterable object, it may be used as follows:
.. code-block:: python
for i, qrcode in enumerate(segno.make_sequence(data, symbol_count=2)):
qrcode.save('seq-%d.svg' % i, scale=10, color='darkblue')
The number of QR codes is determined by the `version` or `symbol_count`
parameter.
See :py:func:`make` for a description of the other parameters.
:param int symbol_count: Number of symbols.
:rtype: QRCodeSequence
"""
return QRCodeSequence(map(QRCode,
encoder.encode_sequence(content, error=error,
version=version,
mode=mode, mask=mask,
encoding=encoding,
boost_error=boost_error,
symbol_count=symbol_count)))
class QRCode:
"""\
Represents a (Micro) QR Code.
"""
__slots__ = ('matrix', 'mask', '_version', '_error', '_mode')
def __init__(self, code):
"""\
Initializes the QR Code object.
:param code: An object with a ``matrix``, ``version``, ``error``,
``mask`` and ``segments`` attribute.
"""
self.matrix = code.matrix
"""Returns the matrix.
:rtype: tuple of :py:class:`bytearray` instances.
"""
self.mask = code.mask
"""Returns the data mask pattern reference
:rtype: int
"""
self._version = code.version
self._error = code.error
self._mode = code.segments[0].mode if len(code.segments) == 1 else None
@property
def version(self):
"""\
(Micro) QR Code version. Either a string ("M1", "M2", "M3", "M4") or
an integer in the range of 1 .. 40.
:rtype: str or int
"""
return encoder.get_version_name(self._version)
@property
def error(self):
"""\
Error correction level; either a string ("L", "M", "Q", "H") or ``None``
if the QR code provides no error correction (Micro QR Code version M1)
:rtype: str
"""
if self._error is None:
return None
return encoder.get_error_name(self._error)
@property
def mode(self):
"""\
String indicating the mode ("numeric", "alphanumeric", "byte", "kanji",
or "hanzi").
May be ``None`` if multiple modes are used.
:rtype: str or None
"""
if self._mode is not None:
return encoder.get_mode_name(self._mode)
return None
@property
def designator(self):
"""\
Returns the version and error correction level as string `V-E` where
`V` represents the version number and `E` the error level.
:rtype: str
"""
version = str(self.version)
return '-'.join((version, self.error) if self.error else (version,))
@property
def default_border_size(self):
"""\
Indicates the default border size aka quiet zone.
QR Codes have a quiet zone of four light modules, while Micro QR Codes
have a quiet zone of two light modules.
:rtype: int
"""
return utils.get_default_border_size(self._version)
@property
def is_micro(self):
"""\
Indicates if this QR code is a Micro QR code
:rtype: bool
"""
return self._version < 1
def __eq__(self, other):
return self.__class__ == other.__class__ and self.matrix == other.matrix
__hash__ = None
def symbol_size(self, scale=1, border=None):
"""\
Returns the symbol size (width x height) with the provided border and
scaling factor.
:param scale: Indicates the size of a single module (default: 1).
The size of a module depends on the used output format; i.e.
in a PNG context, a scaling factor of 2 indicates that a module
has a size of 2 x 2 pixel. Some outputs (i.e. SVG) accept
floating point values.
:type scale: int or float
:param int border: The border size or ``None`` to specify the
default quiet zone (4 for QR Codes, 2 for Micro QR Codes).
:rtype: tuple (width, height)
"""
return utils.get_symbol_size(self._version, scale=scale, border=border)
def matrix_iter(self, scale=1, border=None, verbose=False):
"""\
Returns an iterator over the matrix which includes the border.
The border is returned as sequence of light modules.
Dark modules are reported as ``0x1``, light modules have the value
``0x0``.
The following example converts the QR code matrix into a list of
lists which use boolean values for the modules (True = dark module,
False = light module)::
>>> import segno
>>> qr = segno.make('The Beatles')
>>> width, height = qr.symbol_size(scale=2)
>>> res = []
>>> # Scaling factor 2, default border
>>> for row in qr.matrix_iter(scale=2):
>>> res.append([col == 0x1 for col in row])
>>> width == len(res[0])
True
>>> height == len(res)
True
If `verbose` is ``True``, the iterator returns integer constants which
indicate the type of the module, i.e. ``segno.consts.TYPE_FINDER_PATTERN_DARK``,
``segno.consts.TYPE_FINDER_PATTERN_LIGHT``, ``segno.consts.TYPE_QUIET_ZONE`` etc.
To check if the returned module type is dark or light, use::
if mt >> 8:
print('dark module')
if not mt >> 8:
print('light module')
:param int scale: The scaling factor (default: ``1``).
:param int border: The size of border / quiet zone or ``None`` to
indicate the default border.
:param bool verbose: Indicates if the type of the module should be returned
instead of ``0x1`` and ``0x0`` values.
See :py:mod:`segno.consts` for the return values.
This feature is currently in EXPERIMENTAL state.
:raises: :py:exc:`ValueError` if the scaling factor or the border is
invalid (i.e. negative).
"""
iterfn = utils.matrix_iter_verbose if verbose else utils.matrix_iter
return iterfn(self.matrix, self._version, scale, border)
def show(self, delete_after=20, scale=10, border=None, dark='#000',
light='#fff'): # pragma: no cover
"""\
Displays this QR code.
This method is mainly intended for debugging purposes.
This method saves the QR code as an image (by default with a scaling
factor of 10) to a temporary file and opens it with the standard PNG
viewer application or within the standard webbrowser.
The temporary file is deleted afterwards (unless
:paramref:`delete_after <segno.QRCode.show.delete_after>` is set to ``None``).
If this method does not show any result, try to increase the
:paramref:`delete_after <segno.QRCode.show.delete_after>` value or set
it to ``None``
:param delete_after: Time in seconds to wait till the temporary file is
deleted.
:type delete_after: int or None
:param int scale: Integer indicating the size of a single module.
:param border: Integer indicating the size of the quiet zone.
If set to ``None`` (default), the recommended border size
will be used.
:type border: int or None
:param dark: The color of the dark modules (default: black).
:param light: The color of the light modules (default: white).
"""
import os
import time
import tempfile
import webbrowser
import threading
try: # Python 3
from urllib.parse import urljoin
from urllib.request import pathname2url
except ImportError: # Python 2
from urlparse import urljoin # noqa
from urllib import pathname2url # noqa
def delete_file(name):
time.sleep(delete_after)
try:
os.unlink(name)
except OSError:
pass
f = tempfile.NamedTemporaryFile('wb', suffix='.png', delete=False)
try:
self.save(f, scale=scale, dark=dark, light=light, border=border)
except: # noqa: E722
f.close()
os.unlink(f.name)
raise
f.close()
webbrowser.open_new_tab(urljoin('file:', pathname2url(f.name)))
if delete_after is not None:
t = threading.Thread(target=delete_file, args=(f.name,))
t.start()
def svg_data_uri(self, xmldecl=False, encode_minimal=False,
omit_charset=False, nl=False, **kw):
"""\
Converts the QR code into a SVG data URI.
The XML declaration is omitted by default (set
:paramref:`xmldecl <segno.QRCode.svg_data_uri.xmldecl>` to ``True``
to enable it), further the newline is omitted by default (set ``nl`` to
``True`` to enable it).
Aside from the missing `out` parameter, the different `xmldecl` and
`nl` default values, and the additional parameters
:paramref:`encode_minimal <segno.QRCode.svg_data_uri.encode_minimal>`
and :paramref:`omit_charset <segno.QRCode.svg_data_uri.omit_charset>`,
this method uses the same parameters as the usual SVG serializer, see
:py:func:`save` and the available `SVG parameters <#svg>`_
.. note::
In order to embed a SVG image in HTML without generating a file, the
:py:func:`svg_inline` method could serve better results, as it
usually produces a smaller output.
:param bool xmldecl: Indicates if the XML declaration should be
serialized (default: ``False``)
:param bool encode_minimal: Indicates if the resulting data URI should
use minimal percent encoding (disabled by default).
:param bool omit_charset: Indicates if the ``;charset=...`` should be omitted
(disabled by default)
:param bool nl: Indicates if the document should have a trailing newline
(default: ``False``)
:rtype: str
"""
return writers.as_svg_data_uri(self.matrix, self._version,
xmldecl=xmldecl, nl=nl,
encode_minimal=encode_minimal,
omit_charset=omit_charset, **kw)
def svg_inline(self, **kw):
"""\
Returns a SVG representation which is embeddable into HTML5 contexts.
Due to the fact that HTML5 directly supports SVG, various elements of
a SVG document can or should be suppressed (i.e. the XML declaration and
the SVG namespace).
This method returns a string that can be used in an HTML context.
This method uses the same parameters as the usual SVG serializer, see
:py:func:`save` and the available `SVG parameters <#svg>`_
The returned string can be used directly in Jinja / Django templates,
provided the ``safe`` filter is used::
<div>{{ qr.svg_inline(dark='#228b22', scale=3) | safe }}</div>
:rtype: str
"""
buff = io.BytesIO()
self.save(buff, kind='svg', xmldecl=False, svgns=False, nl=False, **kw)
return buff.getvalue().decode(kw.get('encoding', 'utf-8'))
def png_data_uri(self, **kw):
"""\
Converts the QR code into a PNG data URI.
Uses the same keyword parameters as the usual PNG serializer,
see :py:func:`save` and the available `PNG parameters <#png>`_
:rtype: str
"""
return writers.as_png_data_uri(self.matrix, self._version, **kw)
def terminal(self, out=None, border=None):
"""\
Serializes the matrix as ANSI escape code.
Under Windows, no ANSI escape sequence is generated but the Windows
API is used *unless* :paramref:`out <segno.QRCode.terminal.out>`
is a writable object or using WinAPI fails.
:param out: Filename or a file-like object supporting to write text.
If ``None`` (default), the matrix is written to :py:class:`sys.stdout`.
:param int border: Integer indicating the size of the quiet zone.
If set to ``None`` (default), the recommended border size
will be used (``4`` for QR Codes, ``2`` for Micro QR Codes).
"""
if out is None and sys.platform == 'win32': # pragma: no cover
# Windows < 10 does not support ANSI escape sequences, try to
# call the a Windows specific terminal output which uses the
# Windows API.
try:
writers.write_terminal_win(self.matrix, self._version, border)
except OSError:
# Use the standard output even if it may print garbage
writers.write_terminal(self.matrix, self._version, sys.stdout,
border)
else:
writers.write_terminal(self.matrix, self._version, out or sys.stdout,
border)
def save(self, out, kind=None, **kw):
"""\
Serializes the QR code in one of the supported formats.
The serialization format depends on the filename extension.
.. _common_keywords:
**Common keywords**
========== ==============================================================
Name Description
========== ==============================================================
scale Integer or float indicating the size of a single module.
Default: 1. The interpretation of the scaling factor depends
on the serializer. For pixel-based output (like :ref:`PNG <png>`)
the scaling factor is interepreted as pixel-size (1 = 1 pixel).
:ref:`EPS <eps>` interprets ``1`` as 1 point (1/72 inch) per
module.
Some serializers (like :ref:`SVG <svg>`) accept float values.
If the serializer does not accept float values, the value will be
converted to an integer value (note: int(1.6) == 1).
border Integer indicating the size of the quiet zone.
If set to ``None`` (default), the recommended border size
will be used (``4`` for QR codes, ``2`` for a Micro QR codes).
A value of ``0`` indicates that border should be omitted.
dark A string or tuple representing a color value for the dark
modules. The default value is "black". The color can be
provided as ``(R, G, B)`` tuple, as web color name
(like "red") or in hexadecimal format (``#RGB`` or
``#RRGGBB``). Some serializers (i.e. :ref:`SVG <svg>` and
:ref:`PNG <png>`) accept an alpha transparency value like
``#RRGGBBAA``.
light A string or tuple representing a color for the light modules.
See `dark` for valid values.
The default value depends on the serializer. :ref:`SVG <svg>`
uses no color (``None``) for light modules by default, other
serializers, like :ref:`PNG <png>`, use "white" as default
light color.
========== ==============================================================
.. _module_colors:
**Module Colors**
=============== =======================================================
Name Description
=============== =======================================================
finder_dark Color of the dark modules of the finder patterns
Default: undefined, use value of "dark"
finder_light Color of the light modules of the finder patterns
Default: undefined, use value of "light"
data_dark Color of the dark data modules
Default: undefined, use value of "dark"
data_light Color of the light data modules.
Default: undefined, use value of "light".
version_dark Color of the dark modules of the version information.
Default: undefined, use value of "dark".
version_light Color of the light modules of the version information,
Default: undefined, use value of "light".
format_dark Color of the dark modules of the format information.
Default: undefined, use value of "dark".
format_light Color of the light modules of the format information.
Default: undefined, use value of "light".
alignment_dark Color of the dark modules of the alignment patterns.
Default: undefined, use value of "dark".
alignment_light Color of the light modules of the alignment patterns.
Default: undefined, use value of "light".
timing_dark Color of the dark modules of the timing patterns.
Default: undefined, use value of "dark".
timing_light Color of the light modules of the timing patterns.
Default: undefined, use value of "light".
separator Color of the separator.
Default: undefined, use value of "light".
dark_module Color of the dark module (a single dark module which
occurs in all QR Codes but not in Micro QR Codes.
Default: undefined, use value of "dark".
quiet_zone Color of the quiet zone / border.
Default: undefined, use value of "light".
=============== =======================================================
.. _svg:
**Scalable Vector Graphics (SVG)**
All :ref:`common keywords <common_keywords>` and :ref:`module colors <module_colors>`
are supported.
================ ==============================================================
Name Description
================ ==============================================================
out Filename or :py:class:`io.BytesIO`
kind "svg" or "svgz" (to create a gzip compressed SVG)
scale integer or float
dark Default: "#000" (black)
``None`` is a valid value. If set to ``None``, the resulting
path won't have a "stroke" attribute. The "stroke" attribute
may be defined via CSS (external).
If an alpha channel is defined, the output depends of the
used SVG version. For SVG versions >= 2.0, the "stroke"
attribute will have a value like "rgba(R, G, B, A)", otherwise
the path gets another attribute "stroke-opacity" to emulate
the alpha channel.
To minimize the document size, the SVG serializer uses
automatically the shortest color representation: If
a value like "#000000" is provided, the resulting
document will have a color value of "#000". If the color
is "#FF0000", the resulting color is not "#F00", but
the web color name "red".
light Default value ``None``. If this parameter is set to another
value, the resulting image will have another path which
is used to define the color of the light modules.
If an alpha channel is used, the resulting path may
have a "fill-opacity" attribute (for SVG version < 2.0)
or the "fill" attribute has a "rgba(R, G, B, A)" value.
xmldecl Boolean value (default: ``True``) indicating whether the
document should have an XML declaration header.
Set to ``False`` to omit the header.
svgns Boolean value (default: ``True``) indicating whether the
document should have an explicit SVG namespace declaration.
Set to ``False`` to omit the namespace declaration.
The latter might be useful if the document should be
embedded into a HTML 5 document where the SVG namespace
is implicitly defined.
title String (default: ``None``) Optional title of the generated
SVG document.
desc String (default: ``None``) Optional description of the
generated SVG document.
svgid A string indicating the ID of the SVG document
(if set to ``None`` (default), the SVG element won't have
an ID).
svgclass Default: "segno". The CSS class of the SVG document
(if set to ``None``, the SVG element won't have a class).
lineclass Default: "qrline". The CSS class of the path element
(which draws the dark modules (if set to ``None``, the path
won't have a class).
omitsize Indicates if width and height attributes should be
omitted (default: ``False``). If these attributes are
omitted, a ``viewBox`` attribute will be added to the
document.
unit Default: ``None``
Indicates the unit for width / height and other coordinates.
By default, the unit is unspecified and all values are
in the user space.
Valid values: em, ex, px, pt, pc, cm, mm, in, and percentages
(any string is accepted, this parameter is not validated
by the serializer)
encoding Encoding of the XML document. "utf-8" by default.
svgversion SVG version (default: ``None``). If specified (a float),
the resulting document has an explicit "version" attribute.
If set to ``None``, the document won't have a "version"
attribute. This parameter is not validated.
compresslevel Default: 9. This parameter is only valid, if a compressed
SVG document should be created (file extension "svgz").
1 is fastest and produces the least compression, 9 is slowest
and produces the most. 0 is no compression.
draw_transparent Indicates if transparent SVG paths should be
added to the graphic (default: ``False``)
nl Indicates if the document should have a trailing newline
(default: ``True``)
================ ==============================================================
.. _png:
**Portable Network Graphics (PNG)**
This writes either a grayscale (maybe with transparency) PNG (color type 0)
or a palette-based (maybe with transparency) image (color type 3).
If the dark / light values are ``None``, white or black, the serializer
chooses the more compact grayscale mode, in all other cases a palette-based
image is written.
All :ref:`common keywords <common_keywords>` and :ref:`module colors <module_colors>`
are supported.
=============== ==============================================================
Name Description
=============== ==============================================================
out Filename or :py:class:`io.BytesIO`
kind "png"
scale integer
dark Default: "#000" (black)
``None`` is a valid value iff light is not ``None``.
If set to ``None``, the dark modules become transparent.
light Default value "#fff" (white)
See keyword "dark" for further details.
compresslevel Default: 9. Integer indicating the compression level
for the ``IDAT`` (data) chunk.
1 is fastest and produces the least compression, 9 is slowest
and produces the most. 0 is no compression.
dpi Default: ``None``. Specifies the DPI value for the image.
By default, the DPI value is unspecified. Please note
that the DPI value is converted into meters (maybe with
rounding errors) since PNG does not support the unit
"dots per inch".
=============== ==============================================================
.. _eps:
**Encapsulated PostScript (EPS)**
All :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.StringIO`
kind "eps"
scale integer or float
dark Default: "#000" (black)
light Default value: ``None`` (transparent light modules)
============= ==============================================================
.. _pdf:
**Portable Document Format (PDF)**
All :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.BytesIO`
kind "pdf"
scale integer or float
dark Default: "#000" (black)
light Default value: ``None`` (transparent light modules)
compresslevel Default: 9. Integer indicating the compression level.
1 is fastest and produces the least compression, 9 is slowest
and produces the most. 0 is no compression.
============= ==============================================================
.. _txt:
**Text (TXT)**
Aside of "scale", all :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.StringIO`
kind "txt"
dark Default: "1"
light Default: "0"
============= ==============================================================
.. _ansi:
**ANSI escape code**
Supports the "border" keyword, only!
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.StringIO`
kind "ans"
============= ==============================================================
.. _pbm:
**Portable Bitmap (PBM)**
All :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.BytesIO`
kind "pbm"
scale integer
plain Default: False. Boolean to switch between the P4 and P1 format.
If set to ``True``, the (outdated) P1 serialization format is
used.
============= ==============================================================
.. _pam:
**Portable Arbitrary Map (PAM)**
All :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.BytesIO`
kind "pam"
scale integer
dark Default: "#000" (black).
light Default value "#fff" (white). Use ``None`` for transparent
light modules.
============= ==============================================================
.. _ppm:
**Portable Pixmap (PPM)**
All :ref:`common keywords <common_keywords>` and :ref:`module colors <module_colors>`
are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.BytesIO`
kind "ppm"
scale integer
dark Default: "#000" (black).
light Default value "#fff" (white).
============= ==============================================================
.. _latex:
**LaTeX / PGF/TikZ**
To use the output of this serializer, the ``PGF/TikZ`` (and optionally
``hyperref``) package is required in the LaTeX environment. The
serializer itself does not depend on any external packages.
All :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.StringIO`
kind "tex"
scale integer or float
dark LaTeX color name (default: "black"). The color is written
"at it is", please ensure that the color is a standard color
or it has been defined in the enclosing LaTeX document.
url Default: ``None``. Optional URL where the QR code should
point to. Requires the ``hyperref`` package in the LaTeX
environment.
============= ==============================================================
.. _xbm:
**X BitMap (XBM)**
All :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.StringIO`
kind "xbm"
scale integer
name Name of the variable (default: "img")
============= ==============================================================
.. _xpm:
**X PixMap (XPM)**
All :ref:`common keywords <common_keywords>` are supported.
============= ==============================================================
Name Description
============= ==============================================================
out Filename or :py:class:`io.StringIO`
kind "xpm"
scale integer
dark Default: "#000" (black).
light Default value "#fff" (white)
``None`` indicates transparent light modules.
name Name of the variable (default: "img")
============= ==============================================================
:param out: A filename or a writable file-like object with a
``name`` attribute. Use the :paramref:`kind <segno.QRCode.save.kind>`
parameter if `out` is a :py:class:`io.BytesIO` or
:py:class:`io.StringIO` stream which don't have a ``name``
attribute.
:param str kind: Default ``None``.
If the desired output format cannot be determined from
the :paramref:`out <segno.QRCode.save.out>` parameter, this
parameter can be used to indicate the serialization format
(i.e. "svg" to enforce SVG output). The value is case
insensitive.
:param kw: Any of the supported keywords by the specific serializer.
"""
writers.save(self.matrix, self._version, out, kind, **kw)
def __getattr__(self, name):
"""\
This is used to plug-in external serializers.
When a "to_<name>" method is invoked, this method tries to find
a ``segno.plugin.converter`` plugin with the provided ``<name>``.
If such a plugin exists, a callable function is returned. The result
of invoking the function depends on the plugin.
"""
if name.startswith('to_'):
from pkg_resources import iter_entry_points
from functools import partial
for ep in iter_entry_points(group='segno.plugin.converter',
name=name[3:]):
plugin = ep.load()
return partial(plugin, self)
raise AttributeError('{0} object has no attribute {1}'
.format(self.__class__, name))
class QRCodeSequence(tuple):
"""\
Represents a sequence of 1 .. n (max. n = 16) :py:class:`QRCode` instances.
Iff this sequence contains only one item, it behaves like :py:class:`QRCode`.
"""
__slots__ = ()
def __new__(cls, qrcodes):
return super(QRCodeSequence, cls).__new__(cls, qrcodes)
def terminal(self, out=None, border=None):
"""\
Serializes the sequence of QR codes as ANSI escape code.
See :py:meth:`QRCode.terminal()` for details.
"""
for qrcode in self:
qrcode.terminal(out=out, border=border)
def save(self, out, kind=None, **kw):
"""\
Saves the sequence of QR codes to `out`.
If `out` is a filename, this method modifies the filename and adds
``<Number of QR codes>-<Current QR code>`` to it.
``structured-append.svg`` becomes (if the sequence contains two QR codes):
``structured-append-02-01.svg`` and ``structured-append-02-02.svg``
Please note that using a file or file-like object may result into an
invalid serialization format since all QR codes are written to the same
output.
See :py:meth:`QRCode.save()` for a detailed enumeration of options.
"""
filename = lambda o, n: o # noqa: E731
m = len(self)
if m > 1 and isinstance(out, str_type):
dot_idx = out.rfind('.')
if dot_idx > -1:
out = out[:dot_idx] + '-{0:02d}-{1:02d}' + out[dot_idx:]
filename = lambda o, n: o.format(m, n) # noqa: E731
for n, qrcode in enumerate(self, start=1):
qrcode.save(filename(out, n), kind=kind, **kw)
def __getattr__(self, item):
"""\
Behaves like :py:class:`QRCode` iff this sequence contains a single item.
"""
if len(self) == 1:
return getattr(self[0], item)
raise AttributeError("{0} object has no attribute '{1}'"
.format(self.__class__, item))
| 45.590373 | 97 | 0.515783 |
7958adc648efafbbb98f9745c4e3b5ac657479ae | 37,780 | py | Python | src/pykeen/nn/modules.py | sunny1401/pykeen | ad449ecc753eb603670de67cfa5f49020c61db12 | [
"MIT"
] | null | null | null | src/pykeen/nn/modules.py | sunny1401/pykeen | ad449ecc753eb603670de67cfa5f49020c61db12 | [
"MIT"
] | null | null | null | src/pykeen/nn/modules.py | sunny1401/pykeen | ad449ecc753eb603670de67cfa5f49020c61db12 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Stateful interaction functions."""
from __future__ import annotations
import logging
import math
from abc import ABC, abstractmethod
from typing import (
Any, Callable, Generic, Mapping, MutableMapping, Optional, Sequence, Tuple, Union,
cast,
)
import torch
from torch import FloatTensor, nn
from . import functional as pkf
from ..typing import HeadRepresentation, RelationRepresentation, TailRepresentation
from ..utils import CANONICAL_DIMENSIONS, convert_to_canonical_shape, ensure_tuple, upgrade_to_sequence
__all__ = [
# Base Classes
'Interaction',
'FunctionalInteraction',
'TranslationalInteraction',
# Concrete Classes
'ComplExInteraction',
'ConvEInteraction',
'ConvKBInteraction',
'DistMultInteraction',
'ERMLPInteraction',
'ERMLPEInteraction',
'HolEInteraction',
'KG2EInteraction',
'NTNInteraction',
'ProjEInteraction',
'RESCALInteraction',
'RotatEInteraction',
'SimplEInteraction',
'StructuredEmbeddingInteraction',
'TransDInteraction',
'TransEInteraction',
'TransHInteraction',
'TransRInteraction',
'TuckerInteraction',
'UnstructuredModelInteraction',
]
logger = logging.getLogger(__name__)
def _get_batches(z, slice_size):
for batch in zip(*(hh.split(slice_size, dim=1) for hh in ensure_tuple(z)[0])):
if len(batch) == 1:
batch = batch[0]
yield batch
class Interaction(nn.Module, Generic[HeadRepresentation, RelationRepresentation, TailRepresentation], ABC):
"""Base class for interaction functions."""
#: The symbolic shapes for entity representations
entity_shape: Sequence[str] = ("d",)
#: The symbolic shapes for entity representations for tail entities, if different. This is ony relevant for ConvE.
tail_entity_shape: Optional[Sequence[str]] = None
#: The symbolic shapes for relation representations
relation_shape: Sequence[str] = ("d",)
@abstractmethod
def forward(
self,
h: HeadRepresentation,
r: RelationRepresentation,
t: TailRepresentation,
) -> torch.FloatTensor:
"""Compute broadcasted triple scores given broadcasted representations for head, relation and tails.
:param h: shape: (batch_size, num_heads, 1, 1, ``*``)
The head representations.
:param r: shape: (batch_size, 1, num_relations, 1, ``*``)
The relation representations.
:param t: shape: (batch_size, 1, 1, num_tails, ``*``)
The tail representations.
:return: shape: (batch_size, num_heads, num_relations, num_tails)
The scores.
"""
def score(
self,
h: HeadRepresentation,
r: RelationRepresentation,
t: TailRepresentation,
slice_size: Optional[int] = None,
slice_dim: Optional[str] = None,
) -> torch.FloatTensor:
"""Compute broadcasted triple scores with optional slicing.
.. note ::
At most one of the slice sizes may be not None.
:param h: shape: (batch_size, num_heads, `1, 1, `*``)
The head representations.
:param r: shape: (batch_size, 1, num_relations, 1, ``*``)
The relation representations.
:param t: shape: (batch_size, 1, 1, num_tails, ``*``)
The tail representations.
:param slice_size:
The slice size.
:param slice_dim:
The dimension along which to slice. From {"h", "r", "t"}
:return: shape: (batch_size, num_heads, num_relations, num_tails)
The scores.
"""
return self._forward_slicing_wrapper(h=h, r=r, t=t, slice_size=slice_size, slice_dim=slice_dim)
def _score(
self,
h: HeadRepresentation,
r: RelationRepresentation,
t: TailRepresentation,
slice_size: Optional[int] = None,
slice_dim: str = None,
) -> torch.FloatTensor:
"""Compute scores for the score_* methods outside of models.
TODO: merge this with the Model utilities?
:param h: shape: (b, h, *)
:param r: shape: (b, r, *)
:param t: shape: (b, t, *)
:param slice_size:
The slice size.
:param slice_dim:
The dimension along which to slice. From {"h", "r", "t"}
:return: shape: (b, h, r, t)
"""
args = []
for key, x in zip("hrt", (h, r, t)):
value = []
for xx in upgrade_to_sequence(x): # type: torch.FloatTensor
# bring to (b, n, *)
xx = xx.unsqueeze(dim=1 if key != slice_dim else 0)
# bring to (b, h, r, t, *)
xx = convert_to_canonical_shape(
x=xx,
dim=key,
num=xx.shape[1],
batch_size=xx.shape[0],
suffix_shape=xx.shape[2:],
)
value.append(xx)
# unpack singleton
if len(value) == 1:
value = value[0]
args.append(value)
h, r, t = cast(Tuple[HeadRepresentation, RelationRepresentation, TailRepresentation], args)
return self._forward_slicing_wrapper(h=h, r=r, t=t, slice_dim=slice_dim, slice_size=slice_size)
def _forward_slicing_wrapper(
self,
h: Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]],
r: Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]],
t: Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]],
slice_size: Optional[int],
slice_dim: Optional[str],
) -> torch.FloatTensor:
"""Compute broadcasted triple scores with optional slicing for representations in canonical shape.
.. note ::
Depending on the interaction function, there may be more than one representation for h/r/t. In that case,
a tuple of at least two tensors is passed.
:param h: shape: (batch_size, num_heads, 1, 1, ``*``)
The head representations.
:param r: shape: (batch_size, 1, num_relations, 1, ``*``)
The relation representations.
:param t: shape: (batch_size, 1, 1, num_tails, ``*``)
The tail representations.
:param slice_size:
The slice size.
:param slice_dim:
The dimension along which to slice. From {"h", "r", "t"}
:return: shape: (batch_size, num_heads, num_relations, num_tails)
The scores.
:raises ValueError:
If slice_dim is invalid.
"""
if slice_size is None:
scores = self(h=h, r=r, t=t)
elif slice_dim == "h":
scores = torch.cat([
self(h=h_batch, r=r, t=t)
for h_batch in _get_batches(h, slice_size)
], dim=CANONICAL_DIMENSIONS[slice_dim])
elif slice_dim == "r":
scores = torch.cat([
self(h=h, r=r_batch, t=t)
for r_batch in _get_batches(r, slice_size)
], dim=CANONICAL_DIMENSIONS[slice_dim])
elif slice_dim == "t":
scores = torch.cat([
self(h=h, r=r, t=t_batch)
for t_batch in _get_batches(t, slice_size)
], dim=CANONICAL_DIMENSIONS[slice_dim])
else:
raise ValueError(f'Invalid slice_dim: {slice_dim}')
return scores
def score_hrt(
self,
h: HeadRepresentation,
r: RelationRepresentation,
t: TailRepresentation,
) -> torch.FloatTensor:
"""Score a batch of triples.
:param h: shape: (batch_size, d_e)
The head representations.
:param r: shape: (batch_size, d_r)
The relation representations.
:param t: shape: (batch_size, d_e)
The tail representations.
:return: shape: (batch_size, 1)
The scores.
"""
return self._score(h=h, r=r, t=t)[:, 0, 0, 0, None]
def score_h(
self,
all_entities: HeadRepresentation,
r: RelationRepresentation,
t: TailRepresentation,
slice_size: Optional[int] = None,
) -> torch.FloatTensor:
"""Score all head entities.
:param all_entities: shape: (num_entities, d_e)
The head representations.
:param r: shape: (batch_size, d_r)
The relation representations.
:param t: shape: (batch_size, d_e)
The tail representations.
:param slice_size:
The slice size.
:return: shape: (batch_size, num_entities)
The scores.
"""
return self._score(h=all_entities, r=r, t=t, slice_dim="h", slice_size=slice_size)[:, :, 0, 0]
def score_r(
self,
h: HeadRepresentation,
all_relations: RelationRepresentation,
t: TailRepresentation,
slice_size: Optional[int] = None,
) -> torch.FloatTensor:
"""Score all relations.
:param h: shape: (batch_size, d_e)
The head representations.
:param all_relations: shape: (num_relations, d_r)
The relation representations.
:param t: shape: (batch_size, d_e)
The tail representations.
:param slice_size:
The slice size.
:return: shape: (batch_size, num_entities)
The scores.
"""
return self._score(h=h, r=all_relations, t=t, slice_dim="r", slice_size=slice_size)[:, 0, :, 0]
def score_t(
self,
h: HeadRepresentation,
r: RelationRepresentation,
all_entities: TailRepresentation,
slice_size: Optional[int] = None,
) -> torch.FloatTensor:
"""Score all tail entities.
:param h: shape: (batch_size, d_e)
The head representations.
:param r: shape: (batch_size, d_r)
The relation representations.
:param all_entities: shape: (num_entities, d_e)
The tail representations.
:param slice_size:
The slice size.
:return: shape: (batch_size, num_entities)
The scores.
"""
return self._score(h=h, r=r, t=all_entities, slice_dim="t", slice_size=slice_size)[:, 0, 0, :]
def reset_parameters(self):
"""Reset parameters the interaction function may have."""
for mod in self.modules():
if mod is self:
continue
if hasattr(mod, 'reset_parameters'):
mod.reset_parameters()
class FunctionalInteraction(Interaction, Generic[HeadRepresentation, RelationRepresentation, TailRepresentation]):
"""Base class for interaction functions."""
#: The functional interaction form
func: Callable[..., torch.FloatTensor]
def forward(
self,
h: HeadRepresentation,
r: RelationRepresentation,
t: TailRepresentation,
) -> torch.FloatTensor:
"""Compute broadcasted triple scores given broadcasted representations for head, relation and tails.
:param h: shape: (batch_size, num_heads, 1, 1, ``*``)
The head representations.
:param r: shape: (batch_size, 1, num_relations, 1, ``*``)
The relation representations.
:param t: shape: (batch_size, 1, 1, num_tails, ``*``)
The tail representations.
:return: shape: (batch_size, num_heads, num_relations, num_tails)
The scores.
"""
return self.__class__.func(**self._prepare_for_functional(h=h, r=r, t=t))
def _prepare_for_functional(
self,
h: HeadRepresentation,
r: RelationRepresentation,
t: TailRepresentation,
) -> Mapping[str, torch.FloatTensor]:
"""Conversion utility to prepare the arguments for the functional form."""
kwargs = self._prepare_hrt_for_functional(h=h, r=r, t=t)
kwargs.update(self._prepare_state_for_functional())
return kwargs
@staticmethod
def _prepare_hrt_for_functional(
h: HeadRepresentation,
r: RelationRepresentation,
t: TailRepresentation,
) -> MutableMapping[str, torch.FloatTensor]:
"""Conversion utility to prepare the h/r/t representations for the functional form."""
assert all(torch.is_tensor(x) for x in (h, r, t))
return dict(h=h, r=r, t=t)
def _prepare_state_for_functional(self) -> MutableMapping[str, Any]:
"""Conversion utility to prepare the state to be passed to the functional form."""
return dict()
class TranslationalInteraction(
FunctionalInteraction,
Generic[HeadRepresentation, RelationRepresentation, TailRepresentation],
ABC,
):
"""The translational interaction function shared by the TransE, TransR, TransH, and other Trans<X> models."""
def __init__(self, p: int, power_norm: bool = False):
"""Initialize the translational interaction function.
:param p:
The norm used with :func:`torch.norm`. Typically is 1 or 2.
:param power_norm:
Whether to use the p-th power of the L_p norm. It has the advantage of being differentiable around 0,
and numerically more stable.
"""
super().__init__()
self.p = p
self.power_norm = power_norm
def _prepare_state_for_functional(self) -> MutableMapping[str, Any]: # noqa: D102
return dict(p=self.p, power_norm=self.power_norm)
class TransEInteraction(TranslationalInteraction[FloatTensor, FloatTensor, FloatTensor]):
"""A stateful module for the TransE interaction function.
.. seealso:: :func:`pykeen.nn.functional.transe_interaction`
"""
func = pkf.transe_interaction
class ComplExInteraction(FunctionalInteraction[FloatTensor, FloatTensor, FloatTensor]):
"""A module wrapper for the stateless ComplEx interaction function.
.. seealso:: :func:`pykeen.nn.functional.complex_interaction`
"""
func = pkf.complex_interaction
def _calculate_missing_shape_information(
embedding_dim: int,
input_channels: Optional[int] = None,
width: Optional[int] = None,
height: Optional[int] = None,
) -> Tuple[int, int, int]:
"""Automatically calculates missing dimensions for ConvE.
:param embedding_dim:
The embedding dimension.
:param input_channels:
The number of input channels for the convolution.
:param width:
The width of the embedding "image".
:param height:
The height of the embedding "image".
:return: (input_channels, width, height), such that
`embedding_dim = input_channels * width * height`
:raises ValueError:
If no factorization could be found.
"""
# Store initial input for error message
original = (input_channels, width, height)
# All are None -> try and make closest to square
if input_channels is None and width is None and height is None:
input_channels = 1
result_sqrt = math.floor(math.sqrt(embedding_dim))
height = max(factor for factor in range(1, result_sqrt + 1) if embedding_dim % factor == 0)
width = embedding_dim // height
# Only input channels is None
elif input_channels is None and width is not None and height is not None:
input_channels = embedding_dim // (width * height)
# Only width is None
elif input_channels is not None and width is None and height is not None:
width = embedding_dim // (height * input_channels)
# Only height is none
elif height is None and width is not None and input_channels is not None:
height = embedding_dim // (width * input_channels)
# Width and input_channels are None -> set input_channels to 1 and calculage height
elif input_channels is None and height is None and width is not None:
input_channels = 1
height = embedding_dim // width
# Width and input channels are None -> set input channels to 1 and calculate width
elif input_channels is None and height is not None and width is None:
input_channels = 1
width = embedding_dim // height
if input_channels * width * height != embedding_dim: # type: ignore
raise ValueError(f'Could not resolve {original} to a valid factorization of {embedding_dim}.')
return input_channels, width, height # type: ignore
class ConvEInteraction(
FunctionalInteraction[torch.FloatTensor, torch.FloatTensor, Tuple[torch.FloatTensor, torch.FloatTensor]],
):
"""A stateful module for the ConvE interaction function.
.. seealso:: :func:`pykeen.nn.functional.conve_interaction`
"""
tail_entity_shape = ("d", "k") # with k=1
#: The head-relation encoder operating on 2D "images"
hr2d: nn.Module
#: The head-relation encoder operating on the 1D flattened version
hr1d: nn.Module
#: The interaction function
func = pkf.conve_interaction
def __init__(
self,
input_channels: Optional[int] = None,
output_channels: int = 32,
embedding_height: Optional[int] = None,
embedding_width: Optional[int] = None,
kernel_height: int = 3,
kernel_width: int = 3,
input_dropout: float = 0.2,
output_dropout: float = 0.3,
feature_map_dropout: float = 0.2,
embedding_dim: int = 200,
apply_batch_normalization: bool = True,
):
super().__init__()
# Automatic calculation of remaining dimensions
logger.info(f'Resolving {input_channels} * {embedding_width} * {embedding_height} = {embedding_dim}.')
if embedding_dim is None:
embedding_dim = input_channels * embedding_width * embedding_height
# Parameter need to fulfil:
# input_channels * embedding_height * embedding_width = embedding_dim
input_channels, embedding_width, embedding_height = _calculate_missing_shape_information(
embedding_dim=embedding_dim,
input_channels=input_channels,
width=embedding_width,
height=embedding_height,
)
logger.info(f'Resolved to {input_channels} * {embedding_width} * {embedding_height} = {embedding_dim}.')
if input_channels * embedding_height * embedding_width != embedding_dim:
raise ValueError(
f'Product of input channels ({input_channels}), height ({embedding_height}), and width '
f'({embedding_width}) does not equal target embedding dimension ({embedding_dim})',
)
# encoders
# 1: 2D encoder: BN?, DO, Conv, BN?, Act, DO
hr2d_layers = [
nn.BatchNorm2d(input_channels) if apply_batch_normalization else None,
nn.Dropout(input_dropout),
nn.Conv2d(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=(kernel_height, kernel_width),
stride=1,
padding=0,
bias=True,
),
nn.BatchNorm2d(output_channels) if apply_batch_normalization else None,
nn.ReLU(),
nn.Dropout2d(feature_map_dropout),
]
self.hr2d = nn.Sequential(*(layer for layer in hr2d_layers if layer is not None))
# 2: 1D encoder: FC, DO, BN?, Act
num_in_features = (
output_channels
* (2 * embedding_height - kernel_height + 1)
* (embedding_width - kernel_width + 1)
)
hr1d_layers = [
nn.Linear(num_in_features, embedding_dim),
nn.Dropout(output_dropout),
nn.BatchNorm1d(embedding_dim) if apply_batch_normalization else None,
nn.ReLU(),
]
self.hr1d = nn.Sequential(*(layer for layer in hr1d_layers if layer is not None))
# store reshaping dimensions
self.embedding_height = embedding_height
self.embedding_width = embedding_width
self.input_channels = input_channels
@staticmethod
def _prepare_hrt_for_functional(
h: HeadRepresentation,
r: RelationRepresentation,
t: TailRepresentation,
) -> MutableMapping[str, torch.FloatTensor]: # noqa: D102
return dict(h=h, r=r, t=t[0], t_bias=t[1])
def _prepare_state_for_functional(self) -> MutableMapping[str, Any]: # noqa: D102
return dict(
input_channels=self.input_channels,
embedding_height=self.embedding_height,
embedding_width=self.embedding_width,
hr2d=self.hr2d,
hr1d=self.hr1d,
)
class ConvKBInteraction(FunctionalInteraction[FloatTensor, FloatTensor, FloatTensor]):
"""A stateful module for the ConvKB interaction function.
.. seealso:: :func:`pykeen.nn.functional.convkb_interaction``
"""
func = pkf.convkb_interaction
def __init__(
self,
hidden_dropout_rate: float = 0.,
embedding_dim: int = 200,
num_filters: int = 400,
):
super().__init__()
self.embedding_dim = embedding_dim
self.num_filters = num_filters
# The interaction model
self.conv = nn.Conv2d(in_channels=1, out_channels=num_filters, kernel_size=(1, 3), bias=True)
self.activation = nn.ReLU()
self.hidden_dropout = nn.Dropout(p=hidden_dropout_rate)
self.linear = nn.Linear(embedding_dim * num_filters, 1, bias=True)
def reset_parameters(self): # noqa: D102
# Use Xavier initialization for weight; bias to zero
nn.init.xavier_uniform_(self.linear.weight, gain=nn.init.calculate_gain('relu'))
nn.init.zeros_(self.linear.bias)
# Initialize all filters to [0.1, 0.1, -0.1],
# c.f. https://github.com/daiquocnguyen/ConvKB/blob/master/model.py#L34-L36
nn.init.constant_(self.conv.weight[..., :2], 0.1)
nn.init.constant_(self.conv.weight[..., 2], -0.1)
nn.init.zeros_(self.conv.bias)
def _prepare_state_for_functional(self) -> MutableMapping[str, Any]: # noqa: D102
return dict(
conv=self.conv,
activation=self.activation,
hidden_dropout=self.hidden_dropout,
linear=self.linear,
)
class DistMultInteraction(FunctionalInteraction[FloatTensor, FloatTensor, FloatTensor]):
"""A module wrapper for the stateless DistMult interaction function.
.. seealso:: :func:`pykeen.nn.functional.distmult_interaction`
"""
func = pkf.distmult_interaction
class ERMLPInteraction(FunctionalInteraction[FloatTensor, FloatTensor, FloatTensor]):
"""A stateful module for the ER-MLP interaction.
.. seealso:: :func:`pykeen.nn.functional.ermlp_interaction`
.. math ::
f(h, r, t) = W_2 ReLU(W_1 cat(h, r, t) + b_1) + b_2
"""
func = pkf.ermlp_interaction
def __init__(
self,
embedding_dim: int,
hidden_dim: int,
):
"""Initialize the interaction function.
:param embedding_dim:
The embedding vector dimension.
:param hidden_dim:
The hidden dimension of the MLP.
"""
super().__init__()
"""The multi-layer perceptron consisting of an input layer with 3 * self.embedding_dim neurons, a hidden layer
with self.embedding_dim neurons and output layer with one neuron.
The input is represented by the concatenation embeddings of the heads, relations and tail embeddings.
"""
self.hidden = nn.Linear(in_features=3 * embedding_dim, out_features=hidden_dim, bias=True)
self.activation = nn.ReLU()
self.hidden_to_score = nn.Linear(in_features=hidden_dim, out_features=1, bias=True)
def _prepare_state_for_functional(self) -> MutableMapping[str, Any]: # noqa: D102
return dict(
hidden=self.hidden,
activation=self.activation,
final=self.hidden_to_score,
)
def reset_parameters(self): # noqa: D102
# Initialize biases with zero
nn.init.zeros_(self.hidden.bias)
nn.init.zeros_(self.hidden_to_score.bias)
# In the original formulation,
nn.init.xavier_uniform_(self.hidden.weight)
nn.init.xavier_uniform_(
self.hidden_to_score.weight,
gain=nn.init.calculate_gain(self.activation.__class__.__name__.lower()),
)
class ERMLPEInteraction(FunctionalInteraction[FloatTensor, FloatTensor, FloatTensor]):
"""A stateful module for the ER-MLP (E) interaction function.
.. seealso:: :func:`pykeen.nn.functional.ermlpe_interaction`
"""
func = pkf.ermlpe_interaction
def __init__(
self,
hidden_dim: int = 300,
input_dropout: float = 0.2,
hidden_dropout: float = 0.3,
embedding_dim: int = 200,
):
super().__init__()
self.mlp = nn.Sequential(
nn.Dropout(input_dropout),
nn.Linear(2 * embedding_dim, hidden_dim),
nn.Dropout(hidden_dropout),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, embedding_dim),
nn.Dropout(hidden_dropout),
nn.BatchNorm1d(embedding_dim),
nn.ReLU(),
)
def _prepare_state_for_functional(self) -> MutableMapping[str, Any]: # noqa: D102
return dict(mlp=self.mlp)
class TransRInteraction(
TranslationalInteraction[
torch.FloatTensor,
Tuple[torch.FloatTensor, torch.FloatTensor],
torch.FloatTensor,
],
):
"""A stateful module for the TransR interaction function.
.. seealso:: :func:`pykeen.nn.functional.transr_interaction`
"""
relation_shape = ("e", "de")
func = pkf.transr_interaction
def __init__(self, p: int, power_norm: bool = True):
super().__init__(p=p, power_norm=power_norm)
@staticmethod
def _prepare_hrt_for_functional(
h: HeadRepresentation,
r: RelationRepresentation,
t: TailRepresentation,
) -> MutableMapping[str, torch.FloatTensor]: # noqa: D102
return dict(h=h, r=r[0], t=t, m_r=r[1])
class RotatEInteraction(FunctionalInteraction[FloatTensor, FloatTensor, FloatTensor]):
"""A module wrapper for the stateless RotatE interaction function.
.. seealso:: :func:`pykeen.nn.functional.rotate_interaction`
"""
func = pkf.rotate_interaction
class HolEInteraction(FunctionalInteraction[FloatTensor, FloatTensor, FloatTensor]):
"""A module wrapper for the stateless HolE interaction function.
.. seealso:: :func:`pykeen.nn.functional.hole_interaction`
"""
func = pkf.hole_interaction
class ProjEInteraction(FunctionalInteraction[FloatTensor, FloatTensor, FloatTensor]):
"""A stateful module for the ProjE interaction function.
.. seealso:: :func:`pykeen.nn.functional.proje_interaction`
"""
func = pkf.proje_interaction
def __init__(
self,
embedding_dim: int = 50,
inner_non_linearity: Optional[nn.Module] = None,
):
super().__init__()
# Global entity projection
self.d_e = nn.Parameter(torch.empty(embedding_dim), requires_grad=True)
# Global relation projection
self.d_r = nn.Parameter(torch.empty(embedding_dim), requires_grad=True)
# Global combination bias
self.b_c = nn.Parameter(torch.empty(embedding_dim), requires_grad=True)
# Global combination bias
self.b_p = nn.Parameter(torch.empty(1), requires_grad=True)
if inner_non_linearity is None:
inner_non_linearity = nn.Tanh()
self.inner_non_linearity = inner_non_linearity
def reset_parameters(self): # noqa: D102
embedding_dim = self.d_e.shape[0]
bound = math.sqrt(6) / embedding_dim
for p in self.parameters():
nn.init.uniform_(p, a=-bound, b=bound)
def _prepare_state_for_functional(self) -> MutableMapping[str, Any]:
return dict(d_e=self.d_e, d_r=self.d_r, b_c=self.b_c, b_p=self.b_p, activation=self.inner_non_linearity)
class RESCALInteraction(FunctionalInteraction[FloatTensor, FloatTensor, FloatTensor]):
"""A module wrapper for the stateless RESCAL interaction function.
.. seealso:: :func:`pykeen.nn.functional.rescal_interaction`
"""
relation_shape = ("dd",)
func = pkf.rescal_interaction
class StructuredEmbeddingInteraction(
TranslationalInteraction[
torch.FloatTensor,
Tuple[torch.FloatTensor, torch.FloatTensor],
torch.FloatTensor,
],
):
"""A stateful module for the Structured Embedding (SE) interaction function.
.. seealso:: :func:`pykeen.nn.functional.structured_embedding_interaction`
"""
relation_shape = ("dd", "dd")
func = pkf.structured_embedding_interaction
@staticmethod
def _prepare_hrt_for_functional(
h: HeadRepresentation,
r: RelationRepresentation,
t: TailRepresentation,
) -> MutableMapping[str, torch.FloatTensor]: # noqa: D102
return dict(h=h, t=t, r_h=r[0], r_t=r[1])
class TuckerInteraction(FunctionalInteraction[FloatTensor, FloatTensor, FloatTensor]):
"""A stateful module for the stateless Tucker interaction function.
.. seealso:: :func:`pykeen.nn.functional.tucker_interaction`
"""
func = pkf.tucker_interaction
def __init__(
self,
embedding_dim: int = 200,
relation_dim: Optional[int] = None,
head_dropout: float = 0.3,
relation_dropout: float = 0.4,
head_relation_dropout: float = 0.5,
apply_batch_normalization: bool = True,
):
"""Initialize the Tucker interaction function.
:param embedding_dim:
The entity embedding dimension.
:param relation_dim:
The relation embedding dimension.
:param head_dropout:
The dropout rate applied to the head representations.
:param relation_dropout:
The dropout rate applied to the relation representations.
:param head_relation_dropout:
The dropout rate applied to the combined head and relation representations.
:param apply_batch_normalization:
Whether to use batch normalization on head representations and the combination of head and relation.
"""
super().__init__()
if relation_dim is None:
relation_dim = embedding_dim
# Core tensor
# Note: we use a different dimension permutation as in the official implementation to match the paper.
self.core_tensor = nn.Parameter(
torch.empty(embedding_dim, relation_dim, embedding_dim),
requires_grad=True,
)
# Dropout
self.head_dropout = nn.Dropout(head_dropout)
self.relation_dropout = nn.Dropout(relation_dropout)
self.head_relation_dropout = nn.Dropout(head_relation_dropout)
if apply_batch_normalization:
self.head_batch_norm = nn.BatchNorm1d(embedding_dim)
self.head_relation_batch_norm = nn.BatchNorm1d(embedding_dim)
else:
self.head_batch_norm = self.head_relation_batch_norm = None
def reset_parameters(self): # noqa:D102
# Initialize core tensor, cf. https://github.com/ibalazevic/TuckER/blob/master/model.py#L12
nn.init.uniform_(self.core_tensor, -1., 1.)
# batch norm gets reset automatically, since it defines reset_parameters
def _prepare_state_for_functional(self) -> MutableMapping[str, Any]:
return dict(
core_tensor=self.core_tensor,
do_h=self.head_dropout,
do_r=self.relation_dropout,
do_hr=self.head_relation_dropout,
bn_h=self.head_batch_norm,
bn_hr=self.head_relation_batch_norm,
)
class UnstructuredModelInteraction(
TranslationalInteraction[torch.FloatTensor, None, torch.FloatTensor],
):
"""A stateful module for the UnstructuredModel interaction function.
.. seealso:: :func:`pykeen.nn.functional.unstructured_model_interaction`
"""
# shapes
relation_shape: Sequence[str] = tuple()
func = pkf.unstructured_model_interaction
def __init__(self, p: int, power_norm: bool = True):
super().__init__(p=p, power_norm=power_norm)
@staticmethod
def _prepare_hrt_for_functional(
h: HeadRepresentation,
r: RelationRepresentation,
t: TailRepresentation,
) -> MutableMapping[str, torch.FloatTensor]: # noqa: D102
return dict(h=h, t=t)
class TransDInteraction(
TranslationalInteraction[
Tuple[torch.FloatTensor, torch.FloatTensor],
Tuple[torch.FloatTensor, torch.FloatTensor],
Tuple[torch.FloatTensor, torch.FloatTensor],
],
):
"""A stateful module for the TransD interaction function.
.. seealso:: :func:`pykeen.nn.functional.transd_interaction`
"""
entity_shape = ("d", "d")
relation_shape = ("e", "e")
func = pkf.transd_interaction
def __init__(self, p: int = 2, power_norm: bool = True):
super().__init__(p=p, power_norm=power_norm)
@staticmethod
def _prepare_hrt_for_functional(
h: Tuple[torch.FloatTensor, torch.FloatTensor],
r: Tuple[torch.FloatTensor, torch.FloatTensor],
t: Tuple[torch.FloatTensor, torch.FloatTensor],
) -> MutableMapping[str, torch.FloatTensor]: # noqa: D102
h, h_p = h
r, r_p = r
t, t_p = t
return dict(h=h, r=r, t=t, h_p=h_p, r_p=r_p, t_p=t_p)
class NTNInteraction(
FunctionalInteraction[
torch.FloatTensor,
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor],
torch.FloatTensor,
],
):
"""A stateful module for the NTN interaction function.
.. seealso:: :func:`pykeen.nn.functional.ntn_interaction`
"""
relation_shape = ("kdd", "kd", "kd", "k", "k")
func = pkf.ntn_interaction
def __init__(self, non_linearity: Optional[nn.Module] = None):
super().__init__()
if non_linearity is None:
non_linearity = nn.Tanh()
self.non_linearity = non_linearity
@staticmethod
def _prepare_hrt_for_functional(
h: torch.FloatTensor,
r: Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor],
t: torch.FloatTensor,
) -> MutableMapping[str, torch.FloatTensor]: # noqa: D102
w, vh, vt, b, u = r
return dict(h=h, t=t, w=w, b=b, u=u, vh=vh, vt=vt)
def _prepare_state_for_functional(self) -> MutableMapping[str, Any]: # noqa: D102
return dict(activation=self.non_linearity)
class KG2EInteraction(
FunctionalInteraction[
Tuple[torch.FloatTensor, torch.FloatTensor],
Tuple[torch.FloatTensor, torch.FloatTensor],
Tuple[torch.FloatTensor, torch.FloatTensor],
],
):
"""A stateful module for the KG2E interaction function.
.. seealso:: :func:`pykeen.nn.functional.kg2e_interaction`
"""
entity_shape = ("d", "d")
relation_shape = ("d", "d")
similarity: str
exact: bool
func = pkf.kg2e_interaction
def __init__(self, similarity: Optional[str] = None, exact: bool = True):
super().__init__()
if similarity is None:
similarity = 'KL'
self.similarity = similarity
self.exact = exact
@staticmethod
def _prepare_hrt_for_functional(
h: Tuple[torch.FloatTensor, torch.FloatTensor],
r: Tuple[torch.FloatTensor, torch.FloatTensor],
t: Tuple[torch.FloatTensor, torch.FloatTensor],
) -> MutableMapping[str, torch.FloatTensor]:
h_mean, h_var = h
r_mean, r_var = r
t_mean, t_var = t
return dict(
h_mean=h_mean,
h_var=h_var,
r_mean=r_mean,
r_var=r_var,
t_mean=t_mean,
t_var=t_var,
)
def _prepare_state_for_functional(self) -> MutableMapping[str, Any]:
return dict(
similarity=self.similarity,
exact=self.exact,
)
class TransHInteraction(TranslationalInteraction[FloatTensor, Tuple[FloatTensor, FloatTensor], FloatTensor]):
"""A stateful module for the TransH interaction function.
.. seealso:: :func:`pykeen.nn.functional.transh_interaction`
"""
relation_shape = ("d", "d")
func = pkf.transh_interaction
@staticmethod
def _prepare_hrt_for_functional(
h: HeadRepresentation,
r: RelationRepresentation,
t: TailRepresentation,
) -> MutableMapping[str, torch.FloatTensor]: # noqa: D102
return dict(h=h, w_r=r[0], d_r=r[1], t=t)
class SimplEInteraction(
FunctionalInteraction[
Tuple[torch.FloatTensor, torch.FloatTensor],
Tuple[torch.FloatTensor, torch.FloatTensor],
Tuple[torch.FloatTensor, torch.FloatTensor],
],
):
"""A module wrapper for the SimplE interaction function.
.. seealso:: :func:`pykeen.nn.functional.simple_interaction`
"""
func = pkf.simple_interaction
entity_shape = ("d", "d")
relation_shape = ("d", "d")
def __init__(self, clamp_score: Union[None, float, Tuple[float, float]] = None):
super().__init__()
if isinstance(clamp_score, float):
clamp_score = (-clamp_score, clamp_score)
self.clamp_score = clamp_score
def _prepare_state_for_functional(self) -> MutableMapping[str, Any]: # noqa: D102
return dict(clamp=self.clamp_score)
@staticmethod
def _prepare_hrt_for_functional(
h: HeadRepresentation,
r: RelationRepresentation,
t: TailRepresentation,
) -> MutableMapping[str, torch.FloatTensor]: # noqa: D102
return dict(h=h[0], h_inv=h[1], r=r[0], r_inv=r[1], t=t[0], t_inv=t[1])
| 34.565416 | 119 | 0.635733 |
7958adf3b828ffe6412a76280c707cbcef3a7cf5 | 25,068 | py | Python | lib/DashboardService/DashboardServiceServer.py | eapearson/kbase-sdk-module-dashboard-service | c45562bcb78e3944e70d16ef55dfaa25932ec094 | [
"MIT"
] | null | null | null | lib/DashboardService/DashboardServiceServer.py | eapearson/kbase-sdk-module-dashboard-service | c45562bcb78e3944e70d16ef55dfaa25932ec094 | [
"MIT"
] | null | null | null | lib/DashboardService/DashboardServiceServer.py | eapearson/kbase-sdk-module-dashboard-service | c45562bcb78e3944e70d16ef55dfaa25932ec094 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from DashboardService.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'DashboardService'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from DashboardService.DashboardServiceImpl import DashboardService # noqa @IgnorePep8
impl_DashboardService = DashboardService(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'DashboardService'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_DashboardService.list_all_narratives,
name='DashboardService.list_all_narratives',
types=[dict])
self.method_authentication['DashboardService.list_all_narratives'] = 'optional' # noqa
self.rpc_service.add(impl_DashboardService.create_narrative,
name='DashboardService.create_narrative',
types=[dict])
self.method_authentication['DashboardService.create_narrative'] = 'required' # noqa
self.rpc_service.add(impl_DashboardService.delete_narrative,
name='DashboardService.delete_narrative',
types=[dict])
self.method_authentication['DashboardService.delete_narrative'] = 'required' # noqa
self.rpc_service.add(impl_DashboardService.share_narrative,
name='DashboardService.share_narrative',
types=[dict])
self.method_authentication['DashboardService.share_narrative'] = 'required' # noqa
self.rpc_service.add(impl_DashboardService.unshare_narrative,
name='DashboardService.unshare_narrative',
types=[dict])
self.method_authentication['DashboardService.unshare_narrative'] = 'required' # noqa
self.rpc_service.add(impl_DashboardService.share_narrative_global,
name='DashboardService.share_narrative_global',
types=[dict])
self.method_authentication['DashboardService.share_narrative_global'] = 'required' # noqa
self.rpc_service.add(impl_DashboardService.unshare_narrative_global,
name='DashboardService.unshare_narrative_global',
types=[dict])
self.method_authentication['DashboardService.unshare_narrative_global'] = 'required' # noqa
self.rpc_service.add(impl_DashboardService.status,
name='DashboardService.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'DashboardService ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
| 38.097264 | 151 | 0.549705 |
7958af96652dc8b4cf01f7ae1d58396f49e55dfe | 3,019 | py | Python | graph_clustering/cluster_stats.py | ryota-sugimoto/virome_scripts | 161f9a71f6a1ea78c7028a29d0422bac50b167f5 | [
"MIT"
] | 1 | 2020-09-06T19:42:02.000Z | 2020-09-06T19:42:02.000Z | graph_clustering/cluster_stats.py | ryota-sugimoto/virome_scripts | 161f9a71f6a1ea78c7028a29d0422bac50b167f5 | [
"MIT"
] | null | null | null | graph_clustering/cluster_stats.py | ryota-sugimoto/virome_scripts | 161f9a71f6a1ea78c7028a29d0422bac50b167f5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('protospacers_bedfile',
type=argparse.FileType('r'))
parser.add_argument('cluster_file',
type=argparse.FileType('r'))
parser.add_argument('-t', '--min_co_occurance',
type=int,
default=3)
parser.add_argument('-d', '--min_dice_coefficient',
type=float,
default=0.3)
parser.add_argument('-m', '--min_cluster_size',
type=int,
default=10)
parser.add_argument('-s', '--min_keyspacer_score',
type=float,
default=0.7)
args = parser.parse_args()
clusters = []
for s in args.cluster_file:
clusters.append(s.strip().split())
d = {}
for s in args.protospacers_bedfile:
l = s.strip().split()
contig = l[0]
spacer = l[3]
if contig in d:
d[contig].add(spacer)
else:
d[contig] = set([spacer])
from itertools import chain
import numpy as np
np.set_printoptions(precision=3)
spacers = np.array(sorted(list(set(chain(*d.values())))))
spacers_id = {key:i for i,key in enumerate(spacers)}
contigs = np.array(sorted(d.keys()))
contigs_id = {key:i for i,key in enumerate(contigs)}
row = []
col = []
for contig in contigs:
for spacer in d[contig]:
row.append(contigs_id[contig])
col.append(spacers_id[spacer])
data = np.ones(len(row))
from scipy.sparse import csr_matrix, find
contig_spacer_mat = csr_matrix((data, (row,col)),
shape=(len(contigs), len(spacers)))
spacer_cooccur_mat = contig_spacer_mat.T * contig_spacer_mat
i,j,v = find(spacer_cooccur_mat)
diag = spacer_cooccur_mat.diagonal()
w = np.where(np.logical_and(2*v/(diag[i]+diag[j]) >= args.min_dice_coefficient,
v >= args.min_co_occurance), v, 0)
spacer_cooccur_mat_ = csr_matrix((w, (i,j)),
shape=spacer_cooccur_mat.shape)
spacer_cooccur_mat_.setdiag(0)
spacer_cooccur_mat_.eliminate_zeros()
import igraph
from scipy.sparse import triu
upper = triu(spacer_cooccur_mat_)
row, col = upper.nonzero()
weight = upper.data
g = igraph.Graph(list(zip(row.tolist(), col.tolist())),
vertex_attrs={'name': spacers},
edge_attrs={'weight': weight})
assortativity = g.assortativity_degree(directed=False)
for cluster_id, cluster in enumerate(clusters):
if len(cluster) < args.min_cluster_size:
continue
subg = g.subgraph(cluster)
clustering_coefficient = subg.transitivity_undirected()
degrees = np.array(subg.strength(loops=False, weights='weight'),
dtype=np.float)
degrees /= np.max(degrees)
subg_spacers = np.array(subg.vs['name'])
keyed_subg_spacers = list(s+':'+str(d)
for s,d in zip(subg_spacers, degrees))
tmp = ','.join(keyed_subg_spacers)
print("\t".join(list(map(str,[cluster_id,
clustering_coefficient]))) + '\t' + tmp)
| 30.806122 | 79 | 0.631666 |
7958b0ac5a70495dfaf05400ac4e7cbefaa0545d | 3,310 | py | Python | pypureclient/flasharray/FA_2_5/models/remote_protection_group_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_5/models/remote_protection_group_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_5/models/remote_protection_group_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_5 import models
class RemoteProtectionGroupResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[RemoteProtectionGroup]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.RemoteProtectionGroup]
):
"""
Keyword args:
items (list[RemoteProtectionGroup]): Returns a list of all items after filtering. The values are displayed for each name where meaningful.
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `RemoteProtectionGroupResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RemoteProtectionGroupResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RemoteProtectionGroupResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.553571 | 150 | 0.563444 |
7958b20f29426ebecba4ff5e2c94c08fa7752c41 | 6,046 | py | Python | research/object_detection/utils/variables_helper.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | 1 | 2021-05-17T01:42:29.000Z | 2021-05-17T01:42:29.000Z | research/object_detection/utils/variables_helper.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | research/object_detection/utils/variables_helper.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for manipulating collections of variables during training.
"""
import logging
import re
import tensorflow as tf
slim = tf.contrib.slim
# TODO(derekjchow): Consider replacing with tf.contrib.filter_variables in
# tensorflow/contrib/framework/python/ops/variables.py
def filter_variables(variables, filter_regex_list, invert=False):
"""Filters out the variables matching the filter_regex.
Filter out the variables whose name matches the any of the regular
expressions in filter_regex_list and returns the remaining variables.
Optionally, if invert=True, the complement set is returned.
Args:
variables: a list of tensorflow variables.
filter_regex_list: a list of string regular expressions.
invert: (boolean). If True, returns the complement of the filter set; that
is, all variables matching filter_regex are kept and all others discarded.
Returns:
a list of filtered variables.
"""
kept_vars = []
variables_to_ignore_patterns = list(filter(None, filter_regex_list))
for var in variables:
add = True
for pattern in variables_to_ignore_patterns:
if re.match(pattern, var.op.name):
add = False
break
if add != invert:
kept_vars.append(var)
return kept_vars
def multiply_gradients_matching_regex(grads_and_vars, regex_list, multiplier):
"""Multiply gradients whose variable names match a regular expression.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
regex_list: A list of string regular expressions.
multiplier: A (float) multiplier to apply to each gradient matching the
regular expression.
Returns:
grads_and_vars: A list of gradient to variable pairs (tuples).
"""
variables = [pair[1] for pair in grads_and_vars]
matching_vars = filter_variables(variables, regex_list, invert=True)
for var in matching_vars:
logging.info('Applying multiplier %f to variable [%s]',
multiplier, var.op.name)
grad_multipliers = {var: float(multiplier) for var in matching_vars}
return slim.learning.multiply_gradients(grads_and_vars,
grad_multipliers)
def freeze_gradients_matching_regex(grads_and_vars, regex_list):
"""Freeze gradients whose variable names match a regular expression.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
regex_list: A list of string regular expressions.
Returns:
grads_and_vars: A list of gradient to variable pairs (tuples) that do not
contain the variables and gradients matching the regex.
"""
variables = [pair[1] for pair in grads_and_vars]
matching_vars = filter_variables(variables, regex_list, invert=True)
kept_grads_and_vars = [pair for pair in grads_and_vars
if pair[1] not in matching_vars]
for var in matching_vars:
logging.info('Freezing variable [%s]', var.op.name)
return kept_grads_and_vars
def get_variables_available_in_checkpoint(variables,
checkpoint_path,
include_global_step=True):
"""Returns the subset of variables available in the checkpoint.
Inspects given checkpoint and returns the subset of variables that are
available in it.
TODO(rathodv): force input and output to be a dictionary.
Args:
variables: a list or dictionary of variables to find in checkpoint.
checkpoint_path: path to the checkpoint to restore variables from.
include_global_step: whether to include `global_step` variable, if it
exists. Default True.
Returns:
A list or dictionary of variables.
Raises:
ValueError: if `variables` is not a list or dict.
"""
if isinstance(variables, list):
variable_names_map = {variable.op.name: variable for variable in variables}
elif isinstance(variables, dict):
variable_names_map = variables
else:
raise ValueError('`variables` is expected to be a list or dict.')
ckpt_reader = tf.train.NewCheckpointReader(checkpoint_path)
ckpt_vars_to_shape_map = ckpt_reader.get_variable_to_shape_map()
if not include_global_step:
ckpt_vars_to_shape_map.pop(tf.GraphKeys.GLOBAL_STEP, None)
vars_in_ckpt = {}
for variable_name, variable in sorted(variable_names_map.items()):
if variable_name in ckpt_vars_to_shape_map:
if ckpt_vars_to_shape_map[variable_name] == variable.shape.as_list():
vars_in_ckpt[variable_name] = variable
else:
logging.warning('Variable [%s] is available in checkpoint, but has an '
'incompatible shape with model variable. Checkpoint '
'shape: [%s], model variable shape: [%s]. This '
'variable will not be initialized from the checkpoint.',
variable_name, ckpt_vars_to_shape_map[variable_name],
variable.shape.as_list())
else:
logging.warning('Variable [%s] is not available in checkpoint',
variable_name)
if isinstance(variables, list):
return vars_in_ckpt.values()
return vars_in_ckpt
| 40.851351 | 88 | 0.680284 |
7958b2e1c498af4b3081d0f0b9df8088c1f03ba1 | 4,827 | py | Python | talent.py | Schinkenwurst/zbd_got_talent | f7f3de15f8a90f9cb1023c2ce868733d20febdd8 | [
"MIT"
] | null | null | null | talent.py | Schinkenwurst/zbd_got_talent | f7f3de15f8a90f9cb1023c2ce868733d20febdd8 | [
"MIT"
] | null | null | null | talent.py | Schinkenwurst/zbd_got_talent | f7f3de15f8a90f9cb1023c2ce868733d20febdd8 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from tkinter import *
import pika
from threading import Thread
import simpleaudio as sa
#######################################################################
# Consumer for RabbitMQ
def amqp_consume():
# Callback on message
def onmessage(ch, method, properties, body):
body = body.decode('utf8')
if body == "jury0":
print("[INFO] Alert jury0")
set_alert("jury0")
elif body == "jury1":
print("[INFO] Alert jury1")
set_alert("jury1")
elif body == "jury2":
print("[INFO] Alert jury2")
set_alert("jury2")
elif body == "reset":
print("[INFO] Reset")
reset_alert()
else:
print("[INFO] UNKNOWN")
# Newline for nicer looking
print()
# Main of amqp_consume
amqp_credentials = pika.PlainCredentials('zbdgt', 'schinken')
amqp_parameter = pika.ConnectionParameters(
'192.168.0.1', 5672, '/', amqp_credentials)
amqp_connection = pika.BlockingConnection(amqp_parameter)
amqp_channel = amqp_connection.channel()
amqp_channel.queue_declare(queue='buzzer', auto_delete=True)
amqp_channel.basic_consume(onmessage, queue='buzzer', no_ack=True)
try:
amqp_channel.start_consuming()
except KeyboardInterrupt:
amqp_channel.stop_consuming()
amqp_connection.close()
#######################################################################
# Play buzzer-sound
def play_alarm():
print("[INFO] Playing sound")
wave_obj = sa.WaveObject.from_wave_file("sounds/buzzer1.wav")
play_obj = wave_obj.play()
play_obj.wait_done()
#######################################################################
# Set alerts and call buzzer-sound routine
def set_alert(action):
if action == "jury0":
pic_j0 = Label(root, bg="black", image=pic_alert_j0)
pic_j0.grid(row=0, column=0, padx='0', pady='0')
play_alarm() # call buzzer-sound routine
elif action == "jury1":
pic_j1 = Label(root, bg="black", image=pic_alert_j1)
pic_j1.grid(row=0, column=1, padx='0', pady='0')
play_alarm() # call buzzer-sound routine
elif action == "jury2":
pic_j2 = Label(root, bg="black", image=pic_alert_j2)
pic_j2.grid(row=0, column=2, padx='0', pady='0')
play_alarm() # call buzzer-sound routine
#######################################################################
# Reset alerts
def reset_alert():
pic_j0 = Label(root, bg="black", image=pic_blank_j0)
pic_j1 = Label(root, bg="black", image=pic_blank_j1)
pic_j2 = Label(root, bg="black", image=pic_blank_j2)
pic_j0.grid(row=0, column=0, padx='5', pady='5')
pic_j1.grid(row=0, column=1, padx='5', pady='5')
pic_j2.grid(row=0, column=2, padx='5', pady='5')
#######################################################################
# Button actions
def btn_set_alert_j0():
set_alert("jury0")
def btn_set_alert_j1():
set_alert("jury1")
def btn_set_alert_j2():
set_alert("jury2")
def btn_reset_alert():
reset_alert()
#######################################################################
# Main
#######################################################################
###################################
# Configuration parameter
stand_alone = "false"
###################################
# Create main GUI object
root = Tk()
root.configure(background='black')
root.title("ZBD got Talent")
# Configure Layout
root.columnconfigure(0, weight=5)
root.columnconfigure(1, weight=0)
root.columnconfigure(2, weight=5)
###################################
# Load pictures
pic_alert_j0 = PhotoImage(file="images/alert_bruce.png")
pic_alert_j1 = PhotoImage(file="images/alert_nazan.png")
pic_alert_j2 = PhotoImage(file="images/alert_dieter.png")
pic_blank_j0 = PhotoImage(file="images/blank_bruce.png")
pic_blank_j1 = PhotoImage(file="images/blank_nazan.png")
pic_blank_j2 = PhotoImage(file="images/blank_dieter.png")
###################################
# Init
reset_alert()
###################################
# Start RabbitMQ-Consumer or use buttons for simulation
if stand_alone == "false":
t1 = Thread(target=amqp_consume)
# t1.saemon = True
t1.start()
else:
b_j0_set = Button(root, text="Set Alert", command=btn_set_alert_j0)
b_j1_set = Button(root, text="Set Alert", command=btn_set_alert_j1)
b_j2_set = Button(root, text="Set Alert", command=btn_set_alert_j2)
b_j0_set.grid(row=1, column=0)
b_j1_set.grid(row=1, column=1)
b_j2_set.grid(row=1, column=2)
# hide alert button for testing
b_hide = Button(root, text="Clear Alert", command=btn_reset_alert)
b_hide.grid(row=2, column=1)
# Start main GUI
try:
root.mainloop()
except KeyboardInterrupt:
exit
| 28.732143 | 71 | 0.570955 |
7958b4350ce847609614ff05d065a1d98b75f119 | 952 | py | Python | isi_sdk_8_2_1/test/test_statistics_history_stat.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_2_1/test/test_statistics_history_stat.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_2_1/test/test_statistics_history_stat.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_1
from isi_sdk_8_2_1.models.statistics_history_stat import StatisticsHistoryStat # noqa: E501
from isi_sdk_8_2_1.rest import ApiException
class TestStatisticsHistoryStat(unittest.TestCase):
"""StatisticsHistoryStat unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStatisticsHistoryStat(self):
"""Test StatisticsHistoryStat"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_1.models.statistics_history_stat.StatisticsHistoryStat() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.219512 | 100 | 0.720588 |
7958b49c69a794167eccaa70e2fcc90de2aebb0e | 26,125 | py | Python | tests/test_sanity.py | mehdisadeghi/emscripten | a3e96c3bd4751632de6e9d1d57172ac9429fcb2d | [
"MIT"
] | null | null | null | tests/test_sanity.py | mehdisadeghi/emscripten | a3e96c3bd4751632de6e9d1d57172ac9429fcb2d | [
"MIT"
] | null | null | null | tests/test_sanity.py | mehdisadeghi/emscripten | a3e96c3bd4751632de6e9d1d57172ac9429fcb2d | [
"MIT"
] | null | null | null | # Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import os
import platform
import shutil
import time
import re
import tempfile
import zipfile
from subprocess import PIPE, STDOUT
from runner import RunnerCore, path_from_root, env_modify
from runner import create_test_file, ensure_dir, make_executable
from tools.config import config_file, EM_CONFIG
from tools.shared import PYTHON, EMCC
from tools.shared import CANONICAL_TEMP_DIR
from tools.shared import try_delete, config
from tools.shared import EXPECTED_LLVM_VERSION, Cache
from tools import shared, system_libs, utils
SANITY_FILE = shared.Cache.get_path('sanity.txt')
commands = [[EMCC], [PYTHON, path_from_root('tests', 'runner.py'), 'blahblah']]
def restore():
shutil.copyfile(config_file + '_backup', config_file)
# restore the config file and set it up for our uses
def restore_and_set_up():
restore()
with open(config_file, 'a') as f:
# make LLVM_ROOT sensitive to the LLVM env var, as we test that
f.write('LLVM_ROOT = "%s"\n' % config.LLVM_ROOT)
# unfreeze the cache, so we can test that
f.write('FROZEN_CACHE = False\n')
# wipe the config and sanity files, creating a blank slate
def wipe():
try_delete(config_file)
try_delete(SANITY_FILE)
def add_to_config(content):
with open(config_file, 'a') as f:
f.write('\n' + content + '\n')
def get_basic_config():
return '''\
LLVM_ROOT = "%s"
BINARYEN_ROOT = "%s"
NODE_JS = %s
''' % (config.LLVM_ROOT, config.BINARYEN_ROOT, config.NODE_JS)
def make_fake_tool(filename, version, report_name=None):
if not report_name:
report_name = os.path.basename(filename)
print('make_fake_tool: %s' % filename)
ensure_dir(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write('#!/bin/sh\n')
f.write('echo "%s version %s"\n' % (report_name, version))
f.write('echo "..."\n')
f.write('exit 0\n')
make_executable(filename)
def make_fake_clang(filename, version):
"""Create a fake clang that only handles --version
--version writes to stdout (unlike -v which writes to stderr)
"""
make_fake_tool(filename, version)
make_fake_tool(filename + '++', version)
def make_fake_llc(filename, targets):
"""Create a fake llc that only handles --version and writes target
list to stdout.
"""
print('make_fake_llc: %s' % filename)
ensure_dir(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write('#!/bin/sh\n')
f.write('echo "llc fake output\nRegistered Targets:\n%s"' % targets)
make_executable(filename)
SANITY_MESSAGE = 'Emscripten: Running sanity checks'
EMBUILDER = path_from_root('embuilder.py')
# arguments to build a minimal hello world program, without even libc
# (-O1 avoids -O0's default assertions which bring in checking code;
# FILESYSTEM=0 avoids bringing libc for that)
# (ERROR_ON_UNDEFINED_SYMBOLS=0 is needed because __errno_location is
# not included on the native side but needed by a lot of JS libraries.)
MINIMAL_HELLO_WORLD = [path_from_root('tests', 'hello_world_em_asm.c'), '-O1', '-s', 'FILESYSTEM=0', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0']
class sanity(RunnerCore):
@classmethod
def setUpClass(cls):
super(sanity, cls).setUpClass()
# Unlike the other test suites we explicitly don't want to be skipping
# the sanity checks here
del os.environ['EMCC_SKIP_SANITY_CHECK']
assert os.path.exists(config_file), 'To run these tests, we need a (working!) %s file to already exist' % EM_CONFIG
shutil.copyfile(config_file, config_file + '_backup')
print()
print('Running sanity checks.')
print('WARNING: This will modify %s, and in theory can break it although it should be restored properly. A backup will be saved in %s_backup' % (config_file, config_file))
print()
print('>>> the original settings file is:')
print(open(config_file).read().strip())
print('<<<')
print()
assert 'EMCC_DEBUG' not in os.environ, 'do not run sanity checks in debug mode!'
@classmethod
def tearDownClass(cls):
super(sanity, cls).tearDownClass()
restore()
def setUp(self):
super(sanity, self).setUp()
wipe()
self.start_time = time.time()
def tearDown(self):
super(sanity, self).tearDown()
print('time:', time.time() - self.start_time)
def do(self, command, env=None):
print('Running: ' + ' '.join(command))
if type(command) is not list:
command = [command]
return self.run_process(command, stdout=PIPE, stderr=STDOUT, check=False, env=env).stdout
def check_working(self, command, expected=None):
if type(command) is not list:
command = [command]
if expected is None:
if command[0] == EMCC or (len(command) >= 2 and command[1] == EMCC):
expected = 'no input files'
else:
expected = "could not find the following tests: blahblah"
output = self.do(command)
self.assertContained(expected, output)
return output
# this should be the very first thing that runs. if this fails, everything else is irrelevant!
def test_aaa_normal(self):
for command in commands:
# Your existing EM_CONFIG should work!
restore_and_set_up()
self.check_working(command)
def test_firstrun(self):
for command in commands:
wipe()
def make_new_executable(name):
open(os.path.join(temp_bin, name), 'w').close()
make_executable(os.path.join(temp_bin, name))
env = os.environ.copy()
if 'EM_CONFIG' in env:
del env['EM_CONFIG']
try:
temp_bin = tempfile.mkdtemp()
make_new_executable('llvm-dis')
make_new_executable('node')
env['PATH'] = temp_bin + os.pathsep + os.environ['PATH']
output = self.do(command, env=env)
finally:
shutil.rmtree(temp_bin)
default_config = config.embedded_config
self.assertContained('Welcome to Emscripten!', output)
self.assertContained('This is the first time any of the Emscripten tools has been run.', output)
self.assertContained('A settings file has been copied to %s, at absolute path: %s' % (default_config, default_config), output)
self.assertContained('It contains our best guesses for the important paths, which are:', output)
self.assertContained('LLVM_ROOT', output)
self.assertContained('NODE_JS', output)
if platform.system() != 'Windows':
# os.chmod can't make files executable on Windows
self.assertIdentical(temp_bin, re.search("^ *LLVM_ROOT *= (.*)$", output, re.M).group(1))
possible_nodes = [os.path.join(temp_bin, 'node')]
if os.path.exists('/usr/bin/nodejs'):
possible_nodes.append('/usr/bin/nodejs')
self.assertIdentical(possible_nodes, re.search("^ *NODE_JS *= (.*)$", output, re.M).group(1))
self.assertContained('Please edit the file if any of those are incorrect', output)
self.assertContained('This command will now exit. When you are done editing those paths, re-run it.', output)
assert output.split()[-1].endswith('===='), 'We should have stopped: ' + output
config_file = open(default_config).read()
template_file = open(path_from_root('tools', 'settings_template.py')).read()
self.assertNotContained('{{{', config_file)
self.assertNotContained('}}}', config_file)
self.assertContained('{{{', template_file)
self.assertContained('}}}', template_file)
for content in ['EMSCRIPTEN_ROOT', 'LLVM_ROOT', 'NODE_JS', 'JS_ENGINES']:
self.assertContained(content, config_file)
# The guessed config should be ok
# XXX This depends on your local system! it is possible `which` guesses wrong
# try_delete('a.out.js')
# output = self.run_process([EMCC, path_from_root('tests', 'hello_world.c')], stdout=PIPE, stderr=PIPE).output
# self.assertContained('hello, world!', self.run_js('a.out.js'), output)
# Second run, with bad EM_CONFIG
for settings in ['blah', 'LLVM_ROOT="blarg"; JS_ENGINES=[]; NODE_JS=[]; SPIDERMONKEY_ENGINE=[]']:
f = open(default_config, 'w')
f.write(settings)
f.close()
output = self.do(command, env=env)
if 'LLVM_ROOT' not in settings:
self.assertContained('Error in evaluating %s' % default_config, output)
elif 'runner.py' not in ' '.join(command):
self.assertContained('error:', output) # sanity check should fail
try_delete(default_config)
def test_llvm(self):
LLVM_WARNING = 'LLVM version appears incorrect'
restore_and_set_up()
# Clang should report the version number we expect, and emcc should not warn
assert shared.check_llvm_version()
output = self.check_working(EMCC)
self.assertNotContained(LLVM_WARNING, output)
# Fake a different llvm version
restore_and_set_up()
with open(config_file, 'a') as f:
f.write('LLVM_ROOT = "' + self.in_dir('fake') + '"')
real_version_x, real_version_y = (int(x) for x in EXPECTED_LLVM_VERSION.split('.'))
make_fake_llc(self.in_dir('fake', 'llc'), 'wasm32 - WebAssembly 32-bit')
make_fake_tool(self.in_dir('fake', 'wasm-ld'), EXPECTED_LLVM_VERSION)
for inc_x in range(-2, 3):
for inc_y in range(-2, 3):
try_delete(SANITY_FILE)
expected_x = real_version_x + inc_x
expected_y = real_version_y + inc_y
if expected_x < 0 or expected_y < 0:
continue # must be a valid llvm version
print("mod LLVM version: %d %d -> %d %d" % (real_version_x, real_version_y, expected_x, expected_y))
make_fake_clang(self.in_dir('fake', 'clang'), '%s.%s' % (expected_x, expected_y))
make_fake_tool(self.in_dir('fake', 'llvm-ar'), '%s.%s' % (expected_x, expected_y))
make_fake_tool(self.in_dir('fake', 'llvm-nm'), '%s.%s' % (expected_x, expected_y))
did_modify = inc_x != 0 or inc_y != 0
if did_modify:
output = self.check_working(EMCC, LLVM_WARNING)
else:
output = self.check_working(EMCC)
self.assertNotContained(LLVM_WARNING, output)
def test_emscripten_root(self):
# The correct path
restore_and_set_up()
add_to_config("EMSCRIPTEN_ROOT = '%s'" % path_from_root())
self.check_working(EMCC)
# The correct path with extra stuff
restore_and_set_up()
add_to_config("EMSCRIPTEN_ROOT = '%s'" % (path_from_root() + os.path.sep))
self.check_working(EMCC)
def test_node(self):
NODE_WARNING = 'node version appears too old'
NODE_WARNING_2 = 'cannot check node version'
restore_and_set_up()
# Clang should report the version number we expect, and emcc should not warn
assert shared.check_node_version()
output = self.check_working(EMCC)
self.assertNotContained(NODE_WARNING, output)
# Fake a different node version
restore_and_set_up()
with open(config_file, 'a') as f:
f.write('NODE_JS = "' + self.in_dir('fake', 'nodejs') + '"')
ensure_dir('fake')
for version, succeed in [('v0.8.0', False),
('v4.1.0', False),
('v4.1.1', True),
('v4.2.3-pre', True),
('cheez', False)]:
print(version, succeed)
try_delete(SANITY_FILE)
f = open(self.in_dir('fake', 'nodejs'), 'w')
f.write('#!/bin/sh\n')
f.write('''if [ $1 = "--version" ]; then
echo "%s"
else
%s $@
fi
''' % (version, ' '.join(config.NODE_JS)))
f.close()
make_executable(self.in_dir('fake', 'nodejs'))
if not succeed:
if version[0] == 'v':
self.check_working(EMCC, NODE_WARNING)
else:
self.check_working(EMCC, NODE_WARNING_2)
else:
output = self.check_working(EMCC)
self.assertNotContained(NODE_WARNING, output)
def test_emcc(self):
SANITY_FAIL_MESSAGE = 'sanity check failed to run'
# emcc should check sanity if no ${EM_CONFIG}_sanity
restore_and_set_up()
time.sleep(1)
assert not os.path.exists(SANITY_FILE) # restore is just the settings, not the sanity
output = self.check_working(EMCC)
self.assertContained(SANITY_MESSAGE, output)
# EMCC should have checked sanity successfully
old_sanity = open(SANITY_FILE).read()
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# emcc run again should not sanity check, because the sanity file is newer
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# incorrect sanity contents mean we *must* check
open(SANITY_FILE, 'w').write('wakawaka')
output = self.check_working(EMCC)
self.assertContained(SANITY_MESSAGE, output)
# correct sanity contents mean we need not check
open(SANITY_FILE, 'w').write(old_sanity)
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
# but with EMCC_DEBUG=1 we should check
with env_modify({'EMCC_DEBUG': '1'}):
output = self.check_working(EMCC)
try_delete(CANONICAL_TEMP_DIR)
self.assertContained(SANITY_MESSAGE, output)
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
# Make sure the test runner didn't do anything to the setup
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# emcc should also check sanity if the file is outdated
open(config_file, 'a').write('# extra stuff\n')
output = self.check_working(EMCC)
self.assertContained(SANITY_MESSAGE, output)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
def test_em_config_env_var(self):
# emcc should be configurable directly from EM_CONFIG without any config file
restore_and_set_up()
create_test_file('main.cpp', '''
#include <stdio.h>
int main() {
printf("hello from emcc with no config file\\n");
return 0;
}
''')
wipe()
with env_modify({'EM_CONFIG': get_basic_config()}):
self.run_process([EMCC, 'main.cpp', '-Wno-deprecated', '-o', 'a.out.js'])
self.assertContained('hello from emcc with no config file', self.run_js('a.out.js'))
def erase_cache(self):
Cache.erase()
self.assertCacheEmpty()
def assertCacheEmpty(self):
if os.path.exists(Cache.dirname):
# The cache is considered empty if it contains no files at all or just the cache.lock
self.assertIn(os.listdir(Cache.dirname), ([], ['cache.lock']))
def ensure_cache(self):
self.do([EMCC, '-O2', path_from_root('tests', 'hello_world.c')])
def test_emcc_caching(self):
BUILDING_MESSAGE = 'generating system library: %s'
restore_and_set_up()
self.erase_cache()
# Building a file that *does* need something *should* trigger cache
# generation, but only the first time
libname = Cache.get_lib_name('libc++.a')
for i in range(3):
print(i)
self.clear()
output = self.do([EMCC, '-O' + str(i), path_from_root('tests', 'hello_libcxx.cpp'), '-s', 'DISABLE_EXCEPTION_CATCHING=0'])
print('\n\n\n', output)
self.assertContainedIf(BUILDING_MESSAGE % libname, output, i == 0)
self.assertContained('hello, world!', self.run_js('a.out.js'))
self.assertExists(Cache.dirname)
self.assertExists(os.path.join(Cache.dirname, libname))
def test_cache_clearing_manual(self):
# Manual cache clearing
restore_and_set_up()
self.ensure_cache()
self.assertTrue(os.path.exists(Cache.dirname))
output = self.do([EMCC, '--clear-cache'])
self.assertIn('clearing cache', output)
self.assertIn(SANITY_MESSAGE, output)
self.assertCacheEmpty()
def test_cache_clearing_auto(self):
# Changing LLVM_ROOT, even without altering .emscripten, clears the cache
restore_and_set_up()
self.ensure_cache()
make_fake_clang(self.in_dir('fake', 'bin', 'clang'), EXPECTED_LLVM_VERSION)
make_fake_llc(self.in_dir('fake', 'bin', 'llc'), 'got wasm32 backend! WebAssembly 32-bit')
with env_modify({'EM_LLVM_ROOT': self.in_dir('fake', 'bin')}):
self.assertTrue(os.path.exists(Cache.dirname))
output = self.do([EMCC])
self.assertIn('clearing cache', output)
self.assertCacheEmpty()
# FROZEN_CACHE prevents cache clears, and prevents building
def test_FROZEN_CACHE(self):
restore_and_set_up()
self.erase_cache()
self.ensure_cache()
self.assertTrue(os.path.exists(Cache.dirname))
# changing config file should not clear cache
add_to_config('FROZEN_CACHE = True')
self.do([EMCC])
self.assertTrue(os.path.exists(Cache.dirname))
# building libraries is disallowed
output = self.do([EMBUILDER, 'build', 'libemmalloc'])
self.assertIn('FROZEN_CACHE disallows building system libs', output)
# Test that if multiple processes attempt to access or build stuff to the
# cache on demand, that exactly one of the processes will, and the other
# processes will block to wait until that process finishes.
def test_emcc_multiprocess_cache_access(self):
restore_and_set_up()
create_test_file('test.c', r'''
#include <stdio.h>
int main() {
printf("hello, world!\n");
return 0;
}
''')
cache_dir_name = self.in_dir('test_cache')
libname = Cache.get_lib_name('libc.a')
with env_modify({'EM_CACHE': cache_dir_name}):
tasks = []
num_times_libc_was_built = 0
for i in range(3):
p = self.run_process([EMCC, 'test.c', '-o', '%d.js' % i], stderr=STDOUT, stdout=PIPE)
tasks += [p]
for p in tasks:
print('stdout:\n', p.stdout)
if 'generating system library: ' + libname in p.stdout:
num_times_libc_was_built += 1
# The cache directory must exist after the build
self.assertTrue(os.path.exists(cache_dir_name))
# The cache directory must contain a built libc
self.assertTrue(os.path.exists(os.path.join(cache_dir_name, libname)))
# Exactly one child process should have triggered libc build!
self.assertEqual(num_times_libc_was_built, 1)
def test_emconfig(self):
restore_and_set_up()
fd, custom_config_filename = tempfile.mkstemp(prefix='.emscripten_config_')
orig_config = open(config_file, 'r').read()
# Move the ~/.emscripten to a custom location.
with os.fdopen(fd, "w") as f:
f.write(get_basic_config())
# Make a syntax error in the original config file so that attempting to access it would fail.
open(config_file, 'w').write('asdfasdfasdfasdf\n\'\'\'' + orig_config)
temp_dir = tempfile.mkdtemp(prefix='emscripten_temp_')
with utils.chdir(temp_dir):
self.run_process([EMCC, '--em-config', custom_config_filename] + MINIMAL_HELLO_WORLD + ['-O2'])
result = self.run_js('a.out.js')
self.assertContained('hello, world!', result)
# Clean up created temp files.
os.remove(custom_config_filename)
shutil.rmtree(temp_dir)
def test_emcc_ports(self):
restore_and_set_up()
# listing ports
out = self.do([EMCC, '--show-ports'])
self.assertContained('Available ports:', out)
self.assertContained('SDL2', out)
self.assertContained('SDL2_image', out)
self.assertContained('SDL2_net', out)
# using ports
RETRIEVING_MESSAGE = 'retrieving port'
BUILDING_MESSAGE = 'generating port'
PORTS_DIR = system_libs.Ports.get_dir()
for i in [0, 1]:
self.do([EMCC, '--clear-cache'])
print(i)
if i == 0:
try_delete(PORTS_DIR)
else:
self.do([EMCC, '--clear-ports'])
self.assertNotExists(PORTS_DIR)
# Building a file that doesn't need ports should not trigger anything
output = self.do([EMCC, path_from_root('tests', 'hello_world_sdl.cpp')])
assert RETRIEVING_MESSAGE not in output, output
assert BUILDING_MESSAGE not in output
print('no', output)
self.assertNotExists(PORTS_DIR)
def first_use():
output = self.do([EMCC, path_from_root('tests', 'hello_world_sdl.cpp'), '-s', 'USE_SDL=2'])
self.assertContained(RETRIEVING_MESSAGE, output)
self.assertContained(BUILDING_MESSAGE, output)
self.assertExists(PORTS_DIR)
print('yes', output)
def second_use():
# Using it again avoids retrieve and build
output = self.do([EMCC, path_from_root('tests', 'hello_world_sdl.cpp'), '-s', 'USE_SDL=2'])
self.assertNotContained(RETRIEVING_MESSAGE, output)
self.assertNotContained(BUILDING_MESSAGE, output)
# Building a file that need a port does trigger stuff
first_use()
second_use()
# if the tag doesn't match, we retrieve and rebuild
subdir = os.listdir(os.path.join(PORTS_DIR, 'sdl2'))[0]
os.rename(os.path.join(PORTS_DIR, 'sdl2', subdir), os.path.join(PORTS_DIR, 'sdl2', 'old-subdir'))
ensure_dir('old-sub')
open(os.path.join('old-sub', 'a.txt'), 'w').write('waka')
open(os.path.join('old-sub', 'b.txt'), 'w').write('waka')
with zipfile.ZipFile(os.path.join(PORTS_DIR, 'sdl2.zip'), 'w') as z:
z.write(os.path.join('old-sub', 'a.txt'))
z.write(os.path.join('old-sub', 'b.txt'))
first_use()
second_use()
def test_js_engine_path(self):
# Test that running JS commands works for node, d8, and jsc and is not path dependent
restore_and_set_up()
sample_script = path_from_root('tests', 'print_args.js')
# Fake some JS engines
# Note that the path contains 'd8'.
test_path = self.in_dir('fake', 'abcd8765')
ensure_dir(test_path)
jsengines = [('d8', config.V8_ENGINE),
('d8_g', config.V8_ENGINE),
('js', config.SPIDERMONKEY_ENGINE),
('node', config.NODE_JS),
('nodejs', config.NODE_JS)]
for filename, engine in jsengines:
try_delete(SANITY_FILE)
if type(engine) is list:
engine = engine[0]
if not engine:
print('WARNING: Not testing engine %s, not configured.' % (filename))
continue
print(filename, engine)
test_engine_path = os.path.join(test_path, filename)
with open(test_engine_path, 'w') as f:
f.write('#!/bin/sh\n')
f.write('exec %s $@\n' % (engine))
make_executable(test_engine_path)
out = self.run_js(sample_script, engine=test_engine_path, args=['--foo'])
self.assertEqual('0: --foo', out.strip())
def test_wacky_env(self):
restore_and_set_up()
def build():
return self.check_working([EMCC] + MINIMAL_HELLO_WORLD, '')
def test():
self.assertContained('hello, world!', self.run_js('a.out.js'))
print('normal build')
with env_modify({'EMCC_FORCE_STDLIBS': None}):
Cache.erase()
build()
test()
print('wacky env vars, these should not mess our bootstrapping')
with env_modify({'EMCC_FORCE_STDLIBS': '1'}):
Cache.erase()
build()
test()
def test_vanilla(self):
restore_and_set_up()
Cache.erase()
def make_fake(report):
with open(config_file, 'a') as f:
f.write('LLVM_ROOT = "' + self.in_dir('fake', 'bin') + '"\n')
# BINARYEN_ROOT needs to exist in the config, even though this test
# doesn't actually use it.
f.write('BINARYEN_ROOT= "%s"\n' % self.in_dir('fake', 'bin'))
make_fake_clang(self.in_dir('fake', 'bin', 'clang'), EXPECTED_LLVM_VERSION)
make_fake_llc(self.in_dir('fake', 'bin', 'llc'), report)
make_fake_tool(self.in_dir('fake', 'bin', 'wasm-ld'), EXPECTED_LLVM_VERSION)
# fake llc output
def test_with_fake(report, expected):
make_fake(report)
with env_modify({'EMCC_DEBUG': '1'}):
self.check_working([EMCC] + MINIMAL_HELLO_WORLD + ['-c'], expected)
test_with_fake('got js backend! JavaScript (asm.js, emscripten) backend', 'LLVM has not been built with the WebAssembly backend')
try_delete(CANONICAL_TEMP_DIR)
def test_required_config_settings(self):
# with no binaryen root, an error is shown
restore_and_set_up()
open(config_file, 'a').write('\nBINARYEN_ROOT = ""\n')
self.check_working([EMCC, path_from_root('tests', 'hello_world.c')], 'BINARYEN_ROOT is set to empty value in %s' % config_file)
open(config_file, 'a').write('\ndel BINARYEN_ROOT\n')
self.check_working([EMCC, path_from_root('tests', 'hello_world.c')], 'BINARYEN_ROOT is not defined in %s' % config_file)
def test_embuilder_force(self):
restore_and_set_up()
self.do([EMBUILDER, 'build', 'libemmalloc'])
# Second time it should not generate anything
self.assertNotContained('generating system library', self.do([EMBUILDER, 'build', 'libemmalloc']))
# Unless --force is specified
self.assertContained('generating system library', self.do([EMBUILDER, 'build', 'libemmalloc', '--force']))
def test_embuilder_wasm_backend(self):
restore_and_set_up()
# the --lto flag makes us build wasm-bc
self.do([EMCC, '--clear-cache'])
self.run_process([EMBUILDER, 'build', 'libemmalloc'])
self.assertExists(os.path.join(config.CACHE, 'sysroot', 'lib', 'wasm32-emscripten'))
self.do([EMCC, '--clear-cache'])
self.run_process([EMBUILDER, 'build', 'libemmalloc', '--lto'])
self.assertExists(os.path.join(config.CACHE, 'sysroot', 'lib', 'wasm32-emscripten', 'lto'))
def test_binaryen_version(self):
restore_and_set_up()
with open(config_file, 'a') as f:
f.write('\nBINARYEN_ROOT = "' + self.in_dir('fake') + '"')
make_fake_tool(self.in_dir('fake', 'bin', 'wasm-opt'), 'foo')
self.check_working([EMCC, path_from_root('tests', 'hello_world.c')], 'error parsing binaryen version (wasm-opt version foo). Please check your binaryen installation')
make_fake_tool(self.in_dir('fake', 'bin', 'wasm-opt'), '70')
self.check_working([EMCC, path_from_root('tests', 'hello_world.c')], 'unexpected binaryen version: 70 (expected ')
| 37.321429 | 175 | 0.670392 |
7958b4fd66eddc0589bd0cab9ffca0082b9c62bc | 467 | py | Python | backend/core/utils.py | jerkeeler/teeny-weeny-analytics | 677155c31c411d7d818b8c39f906c25e6fd9b23c | [
"MIT"
] | null | null | null | backend/core/utils.py | jerkeeler/teeny-weeny-analytics | 677155c31c411d7d818b8c39f906c25e6fd9b23c | [
"MIT"
] | 2 | 2020-02-11T23:24:10.000Z | 2020-06-05T19:27:32.000Z | backend/core/utils.py | jerkeeler/teeny-weeny-analytics | 677155c31c411d7d818b8c39f906c25e6fd9b23c | [
"MIT"
] | null | null | null | import random
from django.utils.text import slugify
from core.consts import SLUG_TOKEN_LENGTH, TOKEN_CHARS, TOKEN_LENGTH
def gen_token(token_length: int = TOKEN_LENGTH) -> str:
return ''.join([random.choice(TOKEN_CHARS) for _ in range(token_length)])
def gen_slug(attr: str, max_length: int = 32) -> str:
token = gen_token(token_length=SLUG_TOKEN_LENGTH)
slug = slugify(attr)[:max_length - SLUG_TOKEN_LENGTH]
slug += f'-{token}'
return slug
| 27.470588 | 77 | 0.734475 |
7958b53037bd6d5217c616d0e373ef655fca9074 | 900 | py | Python | spike/DataTools/UploadFolderToGD.py | jian-frank-cao/spike | a02f6d26f8705c79104110b07263f69199325093 | [
"MIT"
] | null | null | null | spike/DataTools/UploadFolderToGD.py | jian-frank-cao/spike | a02f6d26f8705c79104110b07263f69199325093 | [
"MIT"
] | null | null | null | spike/DataTools/UploadFolderToGD.py | jian-frank-cao/spike | a02f6d26f8705c79104110b07263f69199325093 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 24 09:38:58 2021
@author: Jian Cao
Move files from local folder to Google Drive
"""
from spike.DataTools import ConnectGoogleDrive
def UploadFolderToGD(token_path, source_path, gd_folder):
"""Move files from local folder to Google Drive
Parameters:
token_path (str): path to the GD token.
source_path (str): path to the source folder.
gd_folder (str): name of the GD folder.
Returns:
None
"""
google_drive = ConnectGoogleDrive(token_path)
file_cmd = spike.FileCMD()
file_list = file_cmd.ListFiles(source_path)
print('\nUpload List:')
print('\n'.join(file_list))
print('')
item_list = google_drive.ListItems(gd_folder, None)
folder_id = item_list[0]['id']
for file_name in file_list:
google_drive.UploadFile(source_path, folder_id, file_name) | 28.125 | 66 | 0.671111 |
7958b88cbe8a1c6679297c33e29d31c62941132c | 440 | py | Python | backend/app/controllers/errorhandlers.py | DankanTsar/memesmerkatuan | 4654f1164930d2ee0241a3beeae5a1d28daa2e1e | [
"BSD-3-Clause"
] | null | null | null | backend/app/controllers/errorhandlers.py | DankanTsar/memesmerkatuan | 4654f1164930d2ee0241a3beeae5a1d28daa2e1e | [
"BSD-3-Clause"
] | null | null | null | backend/app/controllers/errorhandlers.py | DankanTsar/memesmerkatuan | 4654f1164930d2ee0241a3beeae5a1d28daa2e1e | [
"BSD-3-Clause"
] | null | null | null | from flask import render_template
from .. import app
from ..misc.cur_user import cur_user
@app.errorhandler(403)
def forbidden(e):
return render_template('errors/403.html', user=cur_user()), 403
@app.errorhandler(404)
def page_not_found(e):
return render_template('errors/404.html', user=cur_user()), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('errors/500.html', user=cur_user()), 500
| 23.157895 | 67 | 0.75 |
7958b9df5a4cfc39e803ad5ee63151ab2074dcb8 | 7,534 | py | Python | vcs/testbench/remove_x.py | RISCVERS/pyverilog-helper | e4f0f365e7fc9be5fbdbad3cf962dc4581cae291 | [
"MulanPSL-1.0"
] | 2 | 2021-06-25T04:11:24.000Z | 2021-06-25T04:47:24.000Z | vcs/testbench/remove_x.py | RISCVERS/env-scripts | 84570153957797cdf585c0395ab45e5c8742907e | [
"MulanPSL-1.0"
] | null | null | null | vcs/testbench/remove_x.py | RISCVERS/env-scripts | 84570153957797cdf585c0395ab45e5c8742907e | [
"MulanPSL-1.0"
] | 1 | 2021-06-25T04:47:29.000Z | 2021-06-25T04:47:29.000Z | import sys
need_initial = [
# not initialized registers (from rocket-chip and chisel lib)
# (1) RRArbiter.lastGrant; (2) PLRU replacement init state
("`CORE.memBlock.dcache.missReqArb.lastGrant", 2),
("`CORE.memBlock.dcache.missQueue.pipe_req_arb.lastGrant", 4),
("`CORE.memBlock.dcache.storeReplayUnit.pipe_req_arb.lastGrant", 4),
("`CORE.memBlock.dcache.storeReplayUnit.resp_arb.lastGrant", 4),
("`CORE.memBlock.dcache.probeQueue.pipe_req_arb.lastGrant", 4),
("`CORE.memBlock.dcache.mainPipeReqArb.lastGrant", 2),
]
for i in range(256):
need_initial.append((f"`CORE.l1pluscache.pipe.REG_1_{i}", 7))
for i in range(64):
need_initial.append((f"`CORE.frontend.ifu.icache.REG_1_{i}", 3))
for i in range(64):
need_initial.append((f"`CORE.memBlock.dcache.mainPipe.REG_4_{i}", 7))
for i in range(64):
need_initial.append((f"`CORE.ptw.REG_19_{i}", 7))
need_initial.append((f"`CORE.ptw.REG_38_{i}", 15))
need_force = [
# unknown reason (fetch x?)
("`CORE.frontend.instrUncache.io_resp_bits_data", 256),
("`CORE.frontend.instrUncache.entries_0.io_resp_bits_data", 256),
("`CORE.frontend.ifu.io_redirect_bits_cfiUpdate_pc", 39),#X cause LOOP to be X
("`CORE.frontend.ifu.io_redirect_bits_cfiUpdate_target", 39),#X cause LOOP to be X
# dual-port SRAMs read and write the same index at the same clock cycle
# ("`CORE.frontend.ifu.bpu.bim.bim.array.array_2_ext.R0_data", 32),
# ( "`CORE.frontend.ifu.bpu.preds_3.tables_5.lo_us.array.array_3_ext.R0_data", 16),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_5.hi_us.array.array_3_ext.R0_data", 16),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_5.table_.array.array_7_ext.R0_data", 208),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_4.lo_us.array.array_3_ext.R0_data", 16),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_4.hi_us.array.array_3_ext.R0_data", 16),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_4.table_.array.array_7_ext.R0_data", 208),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_3.lo_us.array.array_5_ext.R0_data", 16),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_3.hi_us.array.array_5_ext.R0_data", 16),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_3.table_.array.array_6_ext.R0_data", 192),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_2.hi_us.array.array_5_ext.R0_data", 16),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_2.lo_us.array.array_5_ext.R0_data", 16),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_2.table_.array.array_6_ext.R0_data", 192),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_1.hi_us.array.array_3_ext.R0_data", 16),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_1.lo_us.array.array_3_ext.R0_data", 16),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_1.table_.array.array_4_ext.R0_data", 176),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_0.lo_us.array.array_3_ext.R0_data", 16),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_0.hi_us.array.array_3_ext.R0_data", 16),
# ("`CORE.frontend.ifu.bpu.preds_3.tables_0.table_.array.array_4_ext.R0_data", 176),
# ("`CORE.frontend.ifu.bpu.preds_3.scTables_5.table_.array.array_8_ext.R0_data", 192),
# ("`CORE.frontend.ifu.bpu.preds_3.scTables_4.table_.array.array_8_ext.R0_data", 192),
# ("`CORE.frontend.ifu.bpu.preds_3.scTables_3.table_.array.array_8_ext.R0_data", 192),
# ("`CORE.frontend.ifu.bpu.preds_3.scTables_2.table_.array.array_8_ext.R0_data", 192),
# ("`CORE.frontend.ifu.bpu.preds_3.scTables_1.table_.array.array_8_ext.R0_data", 192),
# ("`CORE.frontend.ifu.bpu.preds_3.scTables_0.table_.array.array_8_ext.R0_data", 192),
# ("`CORE.ctrlBlock.ftq.ftq_2r_sram.SRAMTemplate_1.array.array_19_ext.R0_data", 275),
# ("`CORE.ctrlBlock.ftq.ftq_2r_sram.SRAMTemplate.array.array_19_ext.R0_data", 275),
# ("`CORE.ctrlBlock.ftq.pred_target_sram.SRAMTemplate.array.array_20_ext.R0_data", 39),
#("`CORE.ctrlBlock.ftq.ftq_1r_sram.SRAMTemplate.array.array_21_ext.R0_data", 944),
("`CORE.ctrlBlock.rename.FreeList_1.io_req_canAlloc", 1),
("tb_top.sim.CPU.axi4deint.REG_1", 5),
]
need_force_1 = [
("`CORE.ctrlBlock.ftq.ftq_1r_sram.SRAMTemplate.array.array_21_ext.R0_data", 944),
("`CORE.frontend.ifu.icache.icacheMissQueue.io_resp_bits_data", 512),
]
# QN
all_modules = [
("`CORE.l1pluscache.pipe.", "/home/xyn/debug/gate/vcs_newgate/20210530-gate/XSCore/L1plusCachePipe.v", "REG_1_"),
("`CORE.frontend.ifu.icache.", "/home/xyn/debug/gate/vcs_newgate/20210530-gate/XSCore/ICache.v", "REG_1_"),
("`CORE.memBlock.dcache.mainPipe.", "/home/xyn/debug/gate/vcs_newgate/20210530-gate/XSCore/DCache_MainPipe_0.v", "REG_4_"),
("`CORE.ptw.", "/home/xyn/debug/gate/vcs_newgate/20210530-gate/XSCore/PTW.v", "REG_19_"),
("`CORE.ptw.", "/home/xyn/debug/gate/vcs_newgate/20210530-gate/XSCore/PTW.v", "REG_38_"),
]
def find_qn(level, filename, prefix):
all_qn = []
last_line = ""
with open(filename) as f:
for line in f:
if ".QN(" in line:
cell_name = last_line.split()[1]
if cell_name.startswith(prefix):
all_qn.append(level + cell_name)
# if not cell_name.startswith("REG_"):
# all_remove = [" l3v", " l2v", " l3_", " l3g", " l1v", " l2_", "ppn_", " l1_", " l1g_", " sp_"]
# found = False
# for x in all_remove:
# if x in last_line or x in line:
# found = True
# continue
# if not found:
# print(last_line.strip() + line)
else:
last_line = line
return all_qn
def rtl_generate():
for source, width in need_initial:
assert(width < 64)
source_name = f"{source}"
print("initial begin")
print(f" force {source_name} = $random();")
print(f" #10 release {source_name};")
print("end")
print("always @(clock) begin")
for source, width in need_force + need_force_1:
for i in range(width):
source_name = f"{source}"
if width > 1:
source_name += f"[{i}]"
print(f"if ({source_name} === 1'bx) begin")
print(f" force {source_name} = $random();")
print(f"end")
print(f"else begin release {source_name}; end")
print("end")
def netlist_generate():
print("always @(clock) begin")
for source, width in need_force_1:
for i in range(width):
source_name = f"{source}_{i}_"
if "io_resp_bits_data" in source and i in [328, 135, 79]:
source_name += "_BAR"
print(f"if ({source_name} === 1'bx) begin")
print(f" force {source_name} = $random();")
print(f"end")
print(f"else begin release {source_name}; end")
for source, width in need_initial:
for i in range(width):
source_name = f"{source}_reg_{i}_.Q"
print(f"if ({source_name} === 1'bx) begin")
print(f" force {source_name} = $random();")
print(f"end")
print(f"else begin release {source_name}; end")
need_qn = []
for level, module, prefix in all_modules:
need_qn += find_qn(level, module, prefix)
for source in need_qn:
source_name = f"{source}.QN"
print(f"if ({source_name} === 1'bx) begin")
print(f" force {source_name} = $random();")
print(f"end")
print(f"else begin release {source_name}; end")
for source, width in need_force:
for i in range(width):
source_name = f"{source}"
if width > 1:
source_name += f"[{i}]"
print(f"if ({source_name} === 1'bx) begin")
print(f" force {source_name} = $random();")
print(f"end")
print(f"else begin release {source_name}; end")
print("end")
if __name__ == "__main__":
func_map = {
"rtl": rtl_generate,
"netlist": netlist_generate
}
func_map[sys.argv[1]]()
| 44.05848 | 125 | 0.675869 |
7958bace07c87257b2e8f00caa9c1995adf5fadb | 83 | py | Python | test.py | dbarroso1/Morti-OS-RTC | d4c0169d7f1e517caf6b1417fad70dea3debe784 | [
"MIT"
] | null | null | null | test.py | dbarroso1/Morti-OS-RTC | d4c0169d7f1e517caf6b1417fad70dea3debe784 | [
"MIT"
] | null | null | null | test.py | dbarroso1/Morti-OS-RTC | d4c0169d7f1e517caf6b1417fad70dea3debe784 | [
"MIT"
] | null | null | null | my_list = [1,2,3,4,5]
if 10 in my_list:
print(my_list)
else:
print('nope') | 13.833333 | 21 | 0.60241 |
7958becfef6d033be691cb049099f49b7058cfb6 | 359 | py | Python | tests/test_tes_tss.py | jergosh/pyranges | a401fd5bf1f6aa1a2546354a22d81e8c59a82114 | [
"MIT"
] | null | null | null | tests/test_tes_tss.py | jergosh/pyranges | a401fd5bf1f6aa1a2546354a22d81e8c59a82114 | [
"MIT"
] | null | null | null | tests/test_tes_tss.py | jergosh/pyranges | a401fd5bf1f6aa1a2546354a22d81e8c59a82114 | [
"MIT"
] | null | null | null | def test_five_end(f1):
result = f1.five_end(slack=5)
print(f1)
print(result)
assert list(result.Start) == [0, 3, 2]
assert list(result.End) == [9, 14, 13]
def test_three_end(f2):
print(f2)
result = f2.three_end(slack=500)
print(result)
assert list(result.Start) == [0, 0]
assert list(result.End) == [503, 508]
| 16.318182 | 42 | 0.601671 |
7958bfd68b95549d4f012791688141df82febd50 | 1,642 | py | Python | fhirclient/models/medicinalproductundesirableeffect.py | zzhou41/client-py | cbfa8d7c7f1bad233b237b7c5582fc0577b21f70 | [
"Apache-2.0"
] | null | null | null | fhirclient/models/medicinalproductundesirableeffect.py | zzhou41/client-py | cbfa8d7c7f1bad233b237b7c5582fc0577b21f70 | [
"Apache-2.0"
] | null | null | null | fhirclient/models/medicinalproductundesirableeffect.py | zzhou41/client-py | cbfa8d7c7f1bad233b237b7c5582fc0577b21f70 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/MedicinalProductUndesirableEffect) on 2019-07-29.
# 2019, SMART Health IT.
import sys
from dataclasses import dataclass
from typing import ClassVar, Optional, List
from .fhirabstractbase import empty_list
from .codeableconcept import CodeableConcept
from .domainresource import DomainResource
from .fhirreference import FHIRReference
from .population import Population
@dataclass
class MedicinalProductUndesirableEffect(DomainResource):
""" MedicinalProductUndesirableEffect.
Describe the undesirable effects of the medicinal product.
"""
resource_type: ClassVar[str] = "MedicinalProductUndesirableEffect"
subject: Optional[List[FHIRReference]] = empty_list()
symptomConditionEffect: Optional[CodeableConcept] = None
classification: Optional[CodeableConcept] = None
frequencyOfOccurrence: Optional[CodeableConcept] = None
population: Optional[List[Population]] = empty_list()
def elementProperties(self):
js = super(MedicinalProductUndesirableEffect, self).elementProperties()
js.extend([
("subject", "subject", FHIRReference, True, None, False),
("symptomConditionEffect", "symptomConditionEffect", CodeableConcept, False, None, False),
("classification", "classification", CodeableConcept, False, None, False),
("frequencyOfOccurrence", "frequencyOfOccurrence", CodeableConcept, False, None, False),
("population", "population", Population, True, None, False),
])
return js | 42.102564 | 130 | 0.737515 |
7958c17bf5445a014e8e24cbce72ac8b488b284c | 27,185 | py | Python | readthedocs/rtd_tests/tests/test_celery.py | mehrdad-khojastefar/readthedocs.org | b958bb8d04c454324d612345890b13af54a19eb6 | [
"MIT"
] | 2,092 | 2019-06-29T07:47:30.000Z | 2022-03-31T14:54:59.000Z | readthedocs/rtd_tests/tests/test_celery.py | mehrdad-khojastefar/readthedocs.org | b958bb8d04c454324d612345890b13af54a19eb6 | [
"MIT"
] | 2,389 | 2019-06-29T04:22:55.000Z | 2022-03-31T22:57:49.000Z | readthedocs/rtd_tests/tests/test_celery.py | mehrdad-khojastefar/readthedocs.org | b958bb8d04c454324d612345890b13af54a19eb6 | [
"MIT"
] | 1,185 | 2019-06-29T21:49:31.000Z | 2022-03-30T09:57:15.000Z | import os
import shutil
from os.path import exists
from tempfile import mkdtemp
from unittest import mock
from unittest.mock import MagicMock, patch
from allauth.socialaccount.models import SocialAccount
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django_dynamic_fixture import get
from messages_extends.models import Message
from readthedocs.builds import tasks as build_tasks
from readthedocs.builds.constants import (
BUILD_STATE_TRIGGERED,
BUILD_STATUS_SUCCESS,
EXTERNAL,
LATEST,
)
from readthedocs.builds.models import Build, Version
from readthedocs.config.config import BuildConfigV2
from readthedocs.doc_builder.environments import (
BuildEnvironment,
LocalBuildEnvironment,
)
from readthedocs.doc_builder.exceptions import VersionLockedError
from readthedocs.oauth.models import RemoteRepository, RemoteRepositoryRelation
from readthedocs.projects import tasks
from readthedocs.projects.exceptions import RepositoryError
from readthedocs.projects.models import Project
from readthedocs.rtd_tests.mocks.mock_api import mock_api
from readthedocs.rtd_tests.utils import (
create_git_branch,
create_git_tag,
delete_git_branch,
make_test_git,
)
class TestCeleryBuilding(TestCase):
"""
These tests run the build functions directly.
They don't use celery
"""
def setUp(self):
repo = make_test_git()
self.repo = repo
super().setUp()
self.eric = User(username='eric')
self.eric.set_password('test')
self.eric.save()
self.project = Project.objects.create(
name='Test Project',
repo_type='git',
# Our top-level checkout
repo=repo,
)
self.project.users.add(self.eric)
def get_update_docs_task(self, version):
build_env = LocalBuildEnvironment(
version.project, version, record=False,
)
update_docs = tasks.UpdateDocsTaskStep(
build_env=build_env,
project=version.project,
version=version,
build={
'id': 99,
'state': BUILD_STATE_TRIGGERED,
},
)
return update_docs
def tearDown(self):
shutil.rmtree(self.repo)
super().tearDown()
def test_remove_dirs(self):
directory = mkdtemp()
self.assertTrue(exists(directory))
result = tasks.remove_dirs.delay((directory,))
self.assertTrue(result.successful())
self.assertFalse(exists(directory))
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.setup_python_environment', new=MagicMock)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.build_docs', new=MagicMock)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.setup_vcs', new=MagicMock)
def test_update_docs(self):
version = self.project.versions.first()
build = get(
Build, project=self.project,
version=version,
)
with mock_api(self.repo) as mapi:
result = tasks.update_docs_task.delay(
version.pk,
build_pk=build.pk,
record=False,
intersphinx=False,
)
self.assertTrue(result.successful())
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.setup_python_environment', new=MagicMock)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.build_docs', new=MagicMock)
@patch('readthedocs.doc_builder.environments.BuildEnvironment.update_build', new=MagicMock)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.setup_vcs')
def test_update_docs_unexpected_setup_exception(self, mock_setup_vcs):
exc = Exception()
mock_setup_vcs.side_effect = exc
version = self.project.versions.first()
build = get(
Build, project=self.project,
version=version,
)
with mock_api(self.repo) as mapi:
result = tasks.update_docs_task.delay(
version.pk,
build_pk=build.pk,
record=False,
intersphinx=False,
)
self.assertTrue(result.successful())
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.setup_python_environment', new=MagicMock)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.setup_vcs', new=MagicMock)
@patch('readthedocs.doc_builder.environments.BuildEnvironment.update_build', new=MagicMock)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.build_docs')
def test_update_docs_unexpected_build_exception(self, mock_build_docs):
exc = Exception()
mock_build_docs.side_effect = exc
version = self.project.versions.first()
build = get(
Build, project=self.project,
version=version,
)
with mock_api(self.repo) as mapi:
result = tasks.update_docs_task.delay(
version.pk,
build_pk=build.pk,
record=False,
intersphinx=False,
)
self.assertTrue(result.successful())
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.setup_python_environment', new=MagicMock)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.build_docs', new=MagicMock)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.send_notifications')
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.setup_vcs')
def test_no_notification_on_version_locked_error(self, mock_setup_vcs, mock_send_notifications):
mock_setup_vcs.side_effect = VersionLockedError()
version = self.project.versions.first()
build = get(
Build, project=self.project,
version=version,
)
with mock_api(self.repo):
result = tasks.update_docs_task.delay(
version.pk,
build_pk=build.pk,
record=False,
intersphinx=False,
)
mock_send_notifications.assert_not_called()
self.assertTrue(result.successful())
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.setup_python_environment', new=MagicMock)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.setup_vcs', new=MagicMock)
@patch('readthedocs.doc_builder.environments.BuildEnvironment.update_build', new=MagicMock)
@patch('readthedocs.projects.tasks.clean_build')
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.build_docs')
def test_clean_build_after_update_docs(self, build_docs, clean_build):
version = self.project.versions.first()
build = get(
Build, project=self.project,
version=version,
)
with mock_api(self.repo) as mapi:
result = tasks.update_docs_task.delay(
version.pk,
build_pk=build.pk,
record=False,
intersphinx=False,
)
self.assertTrue(result.successful())
clean_build.assert_called_with(version.pk)
@patch('readthedocs.projects.tasks.clean_build')
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.run_setup')
def test_clean_build_after_failure_in_update_docs(self, run_setup, clean_build):
run_setup.side_effect = Exception()
version = self.project.versions.first()
build = get(
Build, project=self.project,
version=version,
)
with mock_api(self.repo):
result = tasks.update_docs_task.delay(
version.pk,
build_pk=build.pk,
record=False,
intersphinx=False,
)
clean_build.assert_called_with(version.pk)
@patch('readthedocs.projects.tasks.api_v2')
@patch('readthedocs.projects.tasks.SyncRepositoryMixin.get_version')
@patch('readthedocs.projects.models.Project.checkout_path')
def test_sync_repository(self, checkout_path, get_version, api_v2):
# Create dir where to clone the repo
local_repo = os.path.join(mkdtemp(), 'local')
os.mkdir(local_repo)
checkout_path.return_value = local_repo
version = self.project.versions.get(slug=LATEST)
get_version.return_value = version
result = tasks.sync_repository_task(version.pk)
self.assertTrue(result)
@patch('readthedocs.projects.tasks.clean_build')
def test_clean_build_after_sync_repository(self, clean_build):
version = self.project.versions.get(slug=LATEST)
with mock_api(self.repo):
result = tasks.sync_repository_task.delay(version.pk)
self.assertTrue(result.successful())
clean_build.assert_called_with(version.pk)
@patch('readthedocs.projects.tasks.SyncRepositoryTaskStep.run')
@patch('readthedocs.projects.tasks.clean_build')
def test_clean_build_after_failure_in_sync_repository(self, clean_build, run_syn_repository):
run_syn_repository.side_effect = Exception()
version = self.project.versions.get(slug=LATEST)
with mock_api(self.repo):
result = tasks.sync_repository_task.delay(version.pk)
clean_build.assert_called_with(version.pk)
@patch('readthedocs.projects.models.Project.checkout_path')
def test_check_duplicate_reserved_version_latest(self, checkout_path):
create_git_branch(self.repo, 'latest')
create_git_tag(self.repo, 'latest')
# Create dir where to clone the repo
local_repo = os.path.join(mkdtemp(), 'local')
os.mkdir(local_repo)
checkout_path.return_value = local_repo
version = self.project.versions.get(slug=LATEST)
sync_repository = self.get_update_docs_task(version)
with self.assertRaises(RepositoryError) as e:
sync_repository.sync_repo(sync_repository.build_env)
self.assertEqual(
str(e.exception),
RepositoryError.DUPLICATED_RESERVED_VERSIONS,
)
delete_git_branch(self.repo, 'latest')
sync_repository.sync_repo(sync_repository.build_env)
self.assertTrue(self.project.versions.filter(slug=LATEST).exists())
@patch('readthedocs.projects.tasks.api_v2')
@patch('readthedocs.projects.models.Project.checkout_path')
def test_check_duplicate_reserved_version_stable(self, checkout_path, api_v2):
create_git_branch(self.repo, 'stable')
create_git_tag(self.repo, 'stable')
# Create dir where to clone the repo
local_repo = os.path.join(mkdtemp(), 'local')
os.mkdir(local_repo)
checkout_path.return_value = local_repo
version = self.project.versions.get(slug=LATEST)
sync_repository = self.get_update_docs_task(version)
with self.assertRaises(RepositoryError) as e:
sync_repository.sync_repo(sync_repository.build_env)
self.assertEqual(
str(e.exception),
RepositoryError.DUPLICATED_RESERVED_VERSIONS,
)
# TODO: Check that we can build properly after
# deleting the tag.
def test_check_duplicate_no_reserved_version(self):
create_git_branch(self.repo, 'no-reserved')
create_git_tag(self.repo, 'no-reserved')
version = self.project.versions.get(slug=LATEST)
sync_repository = self.get_update_docs_task(version)
self.assertEqual(self.project.versions.filter(slug__startswith='no-reserved').count(), 0)
sync_repository.sync_repo(sync_repository.build_env)
self.assertEqual(self.project.versions.filter(slug__startswith='no-reserved').count(), 2)
def test_public_task_exception(self):
"""
Test when a PublicTask rises an Exception.
The exception should be caught and added to the ``info`` attribute of
the result. Besides, the task should be SUCCESS.
"""
from readthedocs.core.utils.tasks import PublicTask
from readthedocs.worker import app
@app.task(name='public_task_exception', base=PublicTask)
def public_task_exception():
raise Exception('Something bad happened')
result = public_task_exception.delay()
# although the task risen an exception, it's success since we add the
# exception into the ``info`` attributes
self.assertEqual(result.status, 'SUCCESS')
self.assertEqual(
result.info, {
'task_name': 'public_task_exception',
'context': {},
'public_data': {},
'error': 'Something bad happened',
},
)
@patch('readthedocs.builds.managers.log')
def test_fileify_logging_when_wrong_version_pk(self, mock_logger):
self.assertFalse(Version.objects.filter(pk=345343).exists())
tasks.fileify(
version_pk=345343,
commit=None,
build=1,
search_ranking={},
search_ignore=[],
)
mock_logger.warning.assert_called_with(
'Version not found for given kwargs.',
kwargs={'pk': 345343},
)
@patch('readthedocs.oauth.services.github.GitHubService.send_build_status')
def test_send_build_status_with_remote_repo_github(self, send_build_status):
self.project.repo = 'https://github.com/test/test/'
self.project.save()
social_account = get(SocialAccount, user=self.eric, provider='gitlab')
remote_repo = get(RemoteRepository)
remote_repo.projects.add(self.project)
get(
RemoteRepositoryRelation,
remote_repository=remote_repo,
user=self.eric,
account=social_account
)
external_version = get(Version, project=self.project, type=EXTERNAL)
external_build = get(
Build, project=self.project, version=external_version
)
build_tasks.send_build_status(
external_build.id, external_build.commit, BUILD_STATUS_SUCCESS
)
send_build_status.assert_called_once_with(
build=external_build,
commit=external_build.commit,
state=BUILD_STATUS_SUCCESS,
link_to_build=False,
)
self.assertEqual(Message.objects.filter(user=self.eric).count(), 0)
@patch('readthedocs.oauth.services.github.GitHubService.send_build_status')
def test_send_build_status_with_social_account_github(self, send_build_status):
social_account = get(SocialAccount, user=self.eric, provider='github')
self.project.repo = 'https://github.com/test/test/'
self.project.save()
external_version = get(Version, project=self.project, type=EXTERNAL)
external_build = get(
Build, project=self.project, version=external_version
)
build_tasks.send_build_status(
external_build.id, external_build.commit, BUILD_STATUS_SUCCESS
)
send_build_status.assert_called_once_with(
external_build, external_build.commit, BUILD_STATUS_SUCCESS
)
self.assertEqual(Message.objects.filter(user=self.eric).count(), 0)
@patch('readthedocs.oauth.services.github.GitHubService.send_build_status')
def test_send_build_status_no_remote_repo_or_social_account_github(self, send_build_status):
self.project.repo = 'https://github.com/test/test/'
self.project.save()
external_version = get(Version, project=self.project, type=EXTERNAL)
external_build = get(
Build, project=self.project, version=external_version
)
build_tasks.send_build_status(
external_build.id, external_build.commit, BUILD_STATUS_SUCCESS
)
send_build_status.assert_not_called()
self.assertEqual(Message.objects.filter(user=self.eric).count(), 1)
@patch('readthedocs.oauth.services.gitlab.GitLabService.send_build_status')
def test_send_build_status_with_remote_repo_gitlab(self, send_build_status):
self.project.repo = 'https://gitlab.com/test/test/'
self.project.save()
social_account = get(SocialAccount, user=self.eric, provider='gitlab')
remote_repo = get(RemoteRepository)
remote_repo.projects.add(self.project)
get(
RemoteRepositoryRelation,
remote_repository=remote_repo,
user=self.eric,
account=social_account
)
external_version = get(Version, project=self.project, type=EXTERNAL)
external_build = get(
Build, project=self.project, version=external_version
)
build_tasks.send_build_status(
external_build.id, external_build.commit, BUILD_STATUS_SUCCESS
)
send_build_status.assert_called_once_with(
build=external_build,
commit=external_build.commit,
state=BUILD_STATUS_SUCCESS,
link_to_build=False,
)
self.assertEqual(Message.objects.filter(user=self.eric).count(), 0)
@patch('readthedocs.oauth.services.gitlab.GitLabService.send_build_status')
def test_send_build_status_with_social_account_gitlab(self, send_build_status):
social_account = get(SocialAccount, user=self.eric, provider='gitlab')
self.project.repo = 'https://gitlab.com/test/test/'
self.project.save()
external_version = get(Version, project=self.project, type=EXTERNAL)
external_build = get(
Build, project=self.project, version=external_version
)
build_tasks.send_build_status(
external_build.id, external_build.commit, BUILD_STATUS_SUCCESS
)
send_build_status.assert_called_once_with(
external_build, external_build.commit, BUILD_STATUS_SUCCESS
)
self.assertEqual(Message.objects.filter(user=self.eric).count(), 0)
@patch('readthedocs.oauth.services.gitlab.GitLabService.send_build_status')
def test_send_build_status_no_remote_repo_or_social_account_gitlab(self, send_build_status):
self.project.repo = 'https://gitlab.com/test/test/'
self.project.save()
external_version = get(Version, project=self.project, type=EXTERNAL)
external_build = get(
Build, project=self.project, version=external_version
)
build_tasks.send_build_status(
external_build.id, external_build.commit, BUILD_STATUS_SUCCESS
)
send_build_status.assert_not_called()
self.assertEqual(Message.objects.filter(user=self.eric).count(), 1)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.setup_python_environment', new=MagicMock)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.build_docs', new=MagicMock)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.setup_vcs', new=MagicMock)
@patch.object(BuildEnvironment, 'run')
@patch('readthedocs.doc_builder.config.load_config')
def test_install_apt_packages(self, load_config, run):
config = BuildConfigV2(
{},
{
'version': 2,
'build': {
'apt_packages': [
'clangd',
'cmatrix',
],
},
},
source_file='readthedocs.yml',
)
config.validate()
load_config.return_value = config
version = self.project.versions.first()
build = get(
Build,
project=self.project,
version=version,
)
with mock_api(self.repo):
result = tasks.update_docs_task.delay(
version.pk,
build_pk=build.pk,
record=False,
intersphinx=False,
)
self.assertTrue(result.successful())
self.assertEqual(run.call_count, 2)
apt_update = run.call_args_list[0]
apt_install = run.call_args_list[1]
self.assertEqual(
apt_update,
mock.call(
'apt-get',
'update',
'--assume-yes',
'--quiet',
user='root:root',
)
)
self.assertEqual(
apt_install,
mock.call(
'apt-get',
'install',
'--assume-yes',
'--quiet',
'--',
'clangd',
'cmatrix',
user='root:root',
)
)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.build_docs', new=MagicMock)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.setup_vcs', new=MagicMock)
@patch.object(BuildEnvironment, 'run')
@patch('readthedocs.doc_builder.config.load_config')
def test_build_tools(self, load_config, build_run):
config = BuildConfigV2(
{},
{
'version': 2,
'build': {
'os': 'ubuntu-20.04',
'tools': {
'python': '3.10',
'nodejs': '16',
'rust': '1.55',
'golang': '1.17',
},
},
},
source_file='readthedocs.yml',
)
config.validate()
load_config.return_value = config
version = self.project.versions.first()
build = get(
Build,
project=self.project,
version=version,
)
with mock_api(self.repo):
result = tasks.update_docs_task.delay(
version.pk,
build_pk=build.pk,
record=False,
intersphinx=False,
)
self.assertTrue(result.successful())
self.assertEqual(build_run.call_count, 14)
python_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['python']['3.10']
nodejs_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['nodejs']['16']
rust_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['rust']['1.55']
golang_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['golang']['1.17']
self.assertEqual(
build_run.call_args_list,
[
mock.call('asdf', 'install', 'python', python_version),
mock.call('asdf', 'global', 'python', python_version),
mock.call('asdf', 'reshim', 'python', record=False),
mock.call('python', '-mpip', 'install', '-U', 'virtualenv', 'setuptools<58.3.0'),
mock.call('asdf', 'install', 'nodejs', nodejs_version),
mock.call('asdf', 'global', 'nodejs', nodejs_version),
mock.call('asdf', 'reshim', 'nodejs', record=False),
mock.call('asdf', 'install', 'rust', rust_version),
mock.call('asdf', 'global', 'rust', rust_version),
mock.call('asdf', 'reshim', 'rust', record=False),
mock.call('asdf', 'install', 'golang', golang_version),
mock.call('asdf', 'global', 'golang', golang_version),
mock.call('asdf', 'reshim', 'golang', record=False),
mock.ANY,
],
)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.build_docs', new=MagicMock)
@patch('readthedocs.projects.tasks.UpdateDocsTaskStep.setup_vcs', new=MagicMock)
@patch('readthedocs.doc_builder.python_environments.tarfile')
@patch('readthedocs.doc_builder.python_environments.build_tools_storage')
@patch.object(BuildEnvironment, 'run')
@patch('readthedocs.doc_builder.config.load_config')
def test_build_tools_cached(self, load_config, build_run, build_tools_storage, tarfile):
config = BuildConfigV2(
{},
{
'version': 2,
'build': {
'os': 'ubuntu-20.04',
'tools': {
'python': '3.10',
'nodejs': '16',
'rust': '1.55',
'golang': '1.17',
},
},
},
source_file='readthedocs.yml',
)
config.validate()
load_config.return_value = config
build_tools_storage.open.return_value = b''
build_tools_storage.exists.return_value = True
tarfile.open.return_value.__enter__.return_value.extract_all.return_value = None
version = self.project.versions.first()
build = get(
Build,
project=self.project,
version=version,
)
with mock_api(self.repo):
result = tasks.update_docs_task.delay(
version.pk,
build_pk=build.pk,
record=False,
intersphinx=False,
)
self.assertTrue(result.successful())
self.assertEqual(build_run.call_count, 13)
python_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['python']['3.10']
nodejs_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['nodejs']['16']
rust_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['rust']['1.55']
golang_version = settings.RTD_DOCKER_BUILD_SETTINGS['tools']['golang']['1.17']
self.assertEqual(
# NOTE: casting the first argument as `list()` shows a better diff
# explaining where the problem is
list(build_run.call_args_list),
[
mock.call(
'mv',
# Use mock.ANY here because path differs when ran locally
# and on CircleCI
mock.ANY,
f'/home/docs/.asdf/installs/python/{python_version}',
record=False,
),
mock.call('asdf', 'global', 'python', python_version),
mock.call('asdf', 'reshim', 'python', record=False),
mock.call(
'mv',
mock.ANY,
f'/home/docs/.asdf/installs/nodejs/{nodejs_version}',
record=False,
),
mock.call('asdf', 'global', 'nodejs', nodejs_version),
mock.call('asdf', 'reshim', 'nodejs', record=False),
mock.call(
'mv',
mock.ANY,
f'/home/docs/.asdf/installs/rust/{rust_version}',
record=False,
),
mock.call('asdf', 'global', 'rust', rust_version),
mock.call('asdf', 'reshim', 'rust', record=False),
mock.call(
'mv',
mock.ANY,
f'/home/docs/.asdf/installs/golang/{golang_version}',
record=False,
),
mock.call('asdf', 'global', 'golang', golang_version),
mock.call('asdf', 'reshim', 'golang', record=False),
mock.ANY,
],
)
| 38.725071 | 100 | 0.618503 |
7958c2a137fd3682c21bde108045d7dc957a2a20 | 3,559 | py | Python | bindings/python/ensmallen/datasets/string/marinospirilluminsulare.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/marinospirilluminsulare.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/marinospirilluminsulare.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Marinospirillum insulare.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def MarinospirillumInsulare(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Marinospirillum insulare graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Marinospirillum insulare graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="MarinospirillumInsulare",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.953704 | 223 | 0.678561 |
7958c3903b3d63d29e127647bd0603187f137469 | 3,916 | py | Python | base/base/settings.py | daavelino/vulnerability-catalog | 61e0db9cc4656a16847ec635a4cac3e9a6c67dd4 | [
"MIT"
] | 12 | 2018-01-09T18:03:41.000Z | 2021-02-04T08:21:43.000Z | base/base/settings.py | daavelino/vulnerability-catalog | 61e0db9cc4656a16847ec635a4cac3e9a6c67dd4 | [
"MIT"
] | 21 | 2018-01-13T21:23:22.000Z | 2021-04-08T18:28:05.000Z | base/base/settings.py | daavelino/vulnerability-catalog | 61e0db9cc4656a16847ec635a4cac3e9a6c67dd4 | [
"MIT"
] | 7 | 2017-08-29T10:27:19.000Z | 2021-11-09T00:37:03.000Z | """
Django settings for base project.
For more information on this file, see
https://docs.djangoproject.com/en/3.2.3/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2.3/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2.3/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '__SECRET_KEY__'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '[::1]']
VERSION = 3.0
# Application definition
INSTALLED_APPS = [
'catalog.apps.CatalogConfig',
'tinymce',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware', # http://whitenoise.evans.io/en/stable/
]
ROOT_URLCONF = 'base.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'base.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2.3/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2.3/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2.3/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2.3/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'catalog/static/'
##### Security
## Cookie options:
# https://docs.djangoproject.com/en/3.2.3/ref/settings/#sessions
SESSION_COOKIE_NAME = 'sessionid'
SESSION_COOKIE_AGE = 1209600 # (2 weeks, in seconds)
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_COOKIE_DOMAIN = None
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = False
SESSION_SAVE_EVERY_REQUEST = False
## HTTP options:
# https://docs.djangoproject.com/en/3.2.3/ref/settings/#http
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_SSL_REDIRECT = False
X_FRAME_OPTIONS = 'SAMEORIGIN'
| 24.475 | 91 | 0.710419 |
7958c3a1ee2104829abebc0979f00ff9a9518c2e | 103 | py | Python | clib/training/__init__.py | Swall0w/clib | 46f659783d5a0a6ec5994c3c707c1cc8a7934385 | [
"MIT"
] | 1 | 2017-08-27T00:01:27.000Z | 2017-08-27T00:01:27.000Z | clib/training/__init__.py | Swall0w/clib | 46f659783d5a0a6ec5994c3c707c1cc8a7934385 | [
"MIT"
] | 49 | 2017-08-20T02:09:26.000Z | 2017-12-31T11:58:27.000Z | clib/training/__init__.py | Swall0w/clib | 46f659783d5a0a6ec5994c3c707c1cc8a7934385 | [
"MIT"
] | 1 | 2017-12-08T08:31:38.000Z | 2017-12-08T08:31:38.000Z | from clib.training import dataset
from clib.training import iterator
from clib.training import updater
| 25.75 | 34 | 0.854369 |
7958c3e0a5e6843a712077f2682eb5a1644248d0 | 5,309 | py | Python | test/functional/rpc_preciousblock.py | valuero-org/valuero | c0a8d40d377c39792e5a79d4a67f00bc592aef87 | [
"MIT"
] | null | null | null | test/functional/rpc_preciousblock.py | valuero-org/valuero | c0a8d40d377c39792e5a79d4a67f00bc592aef87 | [
"MIT"
] | null | null | null | test/functional/rpc_preciousblock.py | valuero-org/valuero | c0a8d40d377c39792e5a79d4a67f00bc592aef87 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Copyright (c) 2018-2019 The Rito Core developers
# Copyright (c) 2019 The Valuero developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the preciousblock RPC."""
from test_framework.test_framework import ValueroTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
sync_chain,
sync_blocks,
)
def unidirectional_node_sync_via_rpc(node_src, node_dest):
blocks_to_copy = []
blockhash = node_src.getbestblockhash()
while True:
try:
assert(len(node_dest.getblock(blockhash, False)) > 0)
break
except:
blocks_to_copy.append(blockhash)
blockhash = node_src.getblockheader(blockhash, True)['previousblockhash']
blocks_to_copy.reverse()
for blockhash in blocks_to_copy:
blockdata = node_src.getblock(blockhash, False)
assert(node_dest.submitblock(blockdata) in (None, 'inconclusive'))
def node_sync_via_rpc(nodes):
for node_src in nodes:
for node_dest in nodes:
if node_src is node_dest:
continue
unidirectional_node_sync_via_rpc(node_src, node_dest)
class PreciousTest(ValueroTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-maxreorg=10000"], ["-maxreorg=10000"], ["-maxreorg=10000"]]
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Ensure submitblock can in principle reorg to a competing chain")
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getblockcount(), 1)
hashZ = self.nodes[1].generate(2)[-1]
assert_equal(self.nodes[1].getblockcount(), 2)
node_sync_via_rpc(self.nodes[0:3])
assert_equal(self.nodes[0].getbestblockhash(), hashZ)
self.log.info("Mine blocks A-B-C on Node 0")
hashC = self.nodes[0].generate(3)[-1]
assert_equal(self.nodes[0].getblockcount(), 5)
self.log.info("Mine competing blocks E-F-G on Node 1")
hashG = self.nodes[1].generate(3)[-1]
assert_equal(self.nodes[1].getblockcount(), 5)
assert(hashC != hashG)
self.log.info("Connect nodes and check no reorg occurs")
# Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync)
node_sync_via_rpc(self.nodes[0:2])
connect_nodes_bi(self.nodes,0,1)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block C again")
self.nodes[0].preciousblock(hashC)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block C")
self.nodes[1].preciousblock(hashC)
sync_chain(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block G again")
self.nodes[1].preciousblock(hashG)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G again")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node1 prefer block C again")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1")
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getblockcount(), 6)
sync_blocks(self.nodes[0:2])
hashH = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Node1 should not be able to prefer block C anymore")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Mine competing blocks I-J-K-L on Node 2")
self.nodes[2].generate(4)
assert_equal(self.nodes[2].getblockcount(), 6)
hashL = self.nodes[2].getbestblockhash()
self.log.info("Connect nodes and check no reorg occurs")
node_sync_via_rpc(self.nodes[1:3])
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
assert_equal(self.nodes[0].getbestblockhash(), hashH)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashL)
self.log.info("Make Node1 prefer block L")
self.nodes[1].preciousblock(hashL)
assert_equal(self.nodes[1].getbestblockhash(), hashL)
self.log.info("Make Node2 prefer block H")
self.nodes[2].preciousblock(hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashH)
if __name__ == '__main__':
PreciousTest().main()
| 44.613445 | 127 | 0.670371 |
7958c49f40d607ac32aab3794bd041cbfe57b6c6 | 1,166 | py | Python | youtube_sync/formatter.py | tommygod3/youtube-sync | 613bed22df2befb208463f9e654f43f42ef649f3 | [
"MIT"
] | null | null | null | youtube_sync/formatter.py | tommygod3/youtube-sync | 613bed22df2befb208463f9e654f43f42ef649f3 | [
"MIT"
] | null | null | null | youtube_sync/formatter.py | tommygod3/youtube-sync | 613bed22df2befb208463f9e654f43f42ef649f3 | [
"MIT"
] | null | null | null | import os, re
from mutagen.mp3 import EasyMP3
regex_old = re.compile(r"^(?P<title>.*) - (?P<name>.*)-(?P<mess>.{11}).mp3$")
regex_new = re.compile(r"^(?P<name>.*) - (?P<title>.*)-(?P<mess>.{11}).mp3$")
for filename in os.listdir("."):
if '.mp3' in filename:
print(f"Old name: {filename}")
match_old = regex_old.match(filename)
match_new = regex_new.match(filename)
if filename.startswith("H3") and match_old:
print(f"Mess: {match_old.group('mess')}")
new_name = f"{match_old.group('title')} - {match_old.group('name')}.mp3"
os.rename(filename, new_name)
filename = new_name
print(f"New name: {new_name}")
elif match_new:
print(f"Mess: {match_new.group('mess')}")
new_name = f"{match_new.group('title')} - {match_new.group('name')}.mp3"
os.rename(filename, new_name)
filename = new_name
print(f"New name: {new_name}")
else:
print(f"No change: {filename}")
audio = EasyMP3(filename)
audio["artist"] = "H3"
audio["album"] = "H3 Podcast"
audio.save()
| 37.612903 | 84 | 0.554889 |
7958c5530eb8267b59051001c433338607f3f09f | 23,985 | py | Python | hottbox/algorithms/decomposition/cpd.py | adamurban98/hottbox | 26580018ec6d38a1b08266c04ce4408c9e276130 | [
"Apache-2.0"
] | 167 | 2018-05-07T10:31:00.000Z | 2022-02-24T19:20:31.000Z | hottbox/algorithms/decomposition/cpd.py | adamurban98/hottbox | 26580018ec6d38a1b08266c04ce4408c9e276130 | [
"Apache-2.0"
] | 19 | 2018-05-10T13:26:39.000Z | 2020-01-31T12:49:27.000Z | hottbox/algorithms/decomposition/cpd.py | adamurban98/hottbox | 26580018ec6d38a1b08266c04ce4408c9e276130 | [
"Apache-2.0"
] | 24 | 2018-04-02T17:16:50.000Z | 2021-12-07T06:21:40.000Z | import functools
import warnings
import numpy as np
from hottbox.utils.generation.basic import residual_tensor
from hottbox.core.structures import Tensor, TensorCPD
from hottbox.core.operations import khatri_rao, hadamard, sampled_khatri_rao
from .base import Decomposition, svd
# TODO: Need to add option of sorting vectors in the factor matrices and making them sign invariant
class BaseCPD(Decomposition):
def __init__(self, init, max_iter, epsilon, tol, random_state, verbose):
super(BaseCPD, self).__init__()
self.init = init
self.max_iter = max_iter
self.epsilon = epsilon
self.tol = tol
self.random_state = random_state
self.verbose = verbose
def copy(self):
""" Copy of the Decomposition as a new object """
new_object = super(BaseCPD, self).copy()
return new_object
@property
def name(self):
""" Name of the decomposition
Returns
-------
decomposition_name : str
"""
decomposition_name = super(BaseCPD, self).name
return decomposition_name
def decompose(self, tensor, rank, keep_meta):
raise NotImplementedError('Not implemented in base (BaseCPD) class')
@property
def converged(self):
""" Checks convergence
Returns
-------
is_converged : bool
"""
try: # This insures that the cost has been computed at least twice without checking number of iterations
is_converged = abs(self.cost[-2] - self.cost[-1]) <= self.tol
except IndexError:
is_converged = False
return is_converged
def _init_fmat(self, tensor, rank):
""" Initialisation of factor matrices
Parameters
----------
tensor : Tensor
Multidimensional data to be decomposed
rank : tuple
Should be of shape (R,1), where R is the desired tensor rank. It should be passed as tuple for consistency.
Returns
-------
fmat : list[np.ndarray]
List of factor matrices
"""
t_rank = rank[0]
fmat = [np.array([])] * tensor.order
# Check if all dimensions are greater then kryskal rank
dim_check = (np.array(tensor.shape) >= t_rank).sum() == tensor.order
if dim_check:
if self.init == 'svd':
for mode in range(tensor.order):
# TODO: don't really like this implementation
k = tensor.unfold(mode, inplace=False).data
fmat[mode], _, _ = svd(k, t_rank)
elif self.init == 'random':
fmat = [np.random.randn(mode_size, t_rank) for mode_size in tensor.shape]
else:
raise NotImplementedError('The given initialization is not available')
else:
fmat = [np.random.randn(mode_size, t_rank) for mode_size in tensor.shape]
if self.verbose and self.init != 'random':
warnings.warn(
"Specified rank value is greater then one of the dimensions of a tensor ({} > {}).\n"
"Factor matrices have been initialized randomly.".format(t_rank, tensor.shape), RuntimeWarning
)
return fmat
def plot(self):
raise NotImplementedError('Not implemented in base (BaseCPD) class')
class CPD(BaseCPD):
""" Canonical Polyadic Decomposition.
Computed via alternating least squares (ALS)
Parameters
----------
init : str
Type of factor matrix initialisation. Available options are `svd` and `random`
max_iter : int
Maximum number of iteration
epsilon : float
Threshold for the relative error of approximation.
tol : float
Threshold for convergence of factor matrices
random_state : int
verbose : bool
If True, enable verbose output
Attributes
----------
cost : list
A list of relative approximation errors at each iteration of the algorithm.
"""
def __init__(self, init='svd', max_iter=50, epsilon=10e-3, tol=10e-5,
random_state=None, verbose=False) -> None:
super(CPD, self).__init__(init=init,
max_iter=max_iter,
epsilon=epsilon,
tol=tol,
random_state=random_state,
verbose=verbose)
self.cost = []
def copy(self):
""" Copy of the CPD algorithm as a new object """
new_object = super(CPD, self).copy()
new_object.cost = []
return new_object
@property
def name(self):
""" Name of the decomposition
Returns
-------
decomposition_name : str
"""
decomposition_name = super(CPD, self).name
return decomposition_name
def decompose(self, tensor, rank, keep_meta=0, kr_reverse=False, factor_mat=None):
""" Performs CPD-ALS on the ``tensor`` with respect to the specified ``rank``
Parameters
----------
tensor : Tensor
Multi-dimensional data to be decomposed
rank : tuple
Desired Kruskal rank for the given ``tensor``. Should contain only one value.
If it is greater then any of dimensions then random initialisation is used
keep_meta : int
Keep meta information about modes of the given ``tensor``.
0 - the output will have default values for the meta data
1 - keep only mode names
2 - keep mode names and indices
kr_reverse : bool
factor_mat : list(np.ndarray)
Initial list of factor matrices.
Specifying this option will ignore ``init``.
Returns
-------
tensor_cpd : TensorCPD
CP representation of the ``tensor``
Notes
-----
khatri-rao product should be of matrices in reversed order. But this will duplicate original data (e.g. images)
Probably this has something to do with data ordering in Python and how it relates to kr product
"""
if not isinstance(tensor, Tensor):
raise TypeError("Parameter `tensor` should be an object of `Tensor` class!")
if not isinstance(rank, tuple):
raise TypeError("Parameter `rank` should be passed as a tuple!")
if len(rank) != 1:
raise ValueError("Parameter `rank` should be tuple with only one value!")
if factor_mat is None:
fmat = self._init_fmat(tensor, rank)
else:
if not isinstance(factor_mat, list):
raise TypeError("Parameter `factor_mat` should be a list object")
if not all(isinstance(m, np.ndarray) for m in factor_mat):
raise TypeError("Parameter `factor_mat` should be a list object of np.ndarray objects")
# Dimensionality checks
if len(factor_mat) != tensor.order:
raise ValueError("Parameter `factor_mat` should be of the same length as the tensor order")
if not all(m.shape == (mode, rank[0]) for m, mode in zip(factor_mat, tensor.shape)):
raise ValueError("Parameter `factor_mat` should have the shape [mode_n x r]. Incorrect shapes!")
fmat = factor_mat.copy()
self.cost = [] # Reset cost every time when method decompose is called
tensor_cpd = None
core_values = np.repeat(np.array([1]), rank)
norm = tensor.frob_norm
for n_iter in range(self.max_iter):
# Update factor matrices
for mode in range(tensor.order):
kr_result = khatri_rao(fmat, skip_matrix=mode, reverse=kr_reverse)
hadamard_result = hadamard([np.dot(mat.T, mat) for i, mat in enumerate(fmat) if i != mode])
# Do consecutive multiplication of np.ndarray
update = functools.reduce(np.dot, [tensor.unfold(mode, inplace=False).data,
kr_result,
np.linalg.pinv(hadamard_result)])
fmat[mode] = update
# Update cost
tensor_cpd = TensorCPD(fmat=fmat, core_values=core_values)
residual = residual_tensor(tensor, tensor_cpd)
self.cost.append(abs(residual.frob_norm / norm))
if self.verbose:
print('Iter {}: relative error of approximation = {}'.format(n_iter, self.cost[-1]))
# Check termination conditions
if self.cost[-1] <= self.epsilon:
if self.verbose:
print('Relative error of approximation has reached the acceptable level: {}'.format(self.cost[-1]))
break
if self.converged:
if self.verbose:
print('Converged in {} iteration(s)'.format(len(self.cost)))
break
if self.verbose and not self.converged and self.cost[-1] > self.epsilon:
print('Maximum number of iterations ({}) has been reached. '
'Variation = {}'.format(self.max_iter, abs(self.cost[-2] - self.cost[-1])))
if keep_meta == 1:
mode_names = {i: mode.name for i, mode in enumerate(tensor.modes)}
tensor_cpd.set_mode_names(mode_names=mode_names)
elif keep_meta == 2:
tensor_cpd.copy_modes(tensor)
else:
pass
return tensor_cpd
@property
def converged(self):
""" Checks convergence of the CPD-ALS algorithm.
Returns
-------
bool
"""
is_converged = super(CPD, self).converged
return is_converged
def _init_fmat(self, tensor, rank):
fmat = super(CPD, self)._init_fmat(tensor=tensor,
rank=rank)
return fmat
def plot(self):
print('At the moment, `plot()` is not implemented for the {}'.format(self.name))
# TODO: Fix efficiency issues with this
class RandomisedCPD(BaseCPD):
""" Randomised Canonical Polyadic Decomposition.
Computed via sampled alternating least squares (ALS)
Parameters
----------
init : str
Type of factor matrix initialisation. Available options are `svd` and `random`
max_iter : int
Maximum number of iteration
epsilon : float
Threshold for the relative error of approximation.
tol : float
Threshold for convergence of factor matrices
random_state : int
verbose : bool
If True, enable verbose output
Attributes
----------
cost : list
A list of relative approximation errors at each iteration of the algorithm.
References
----------
- Battaglino, C., Ballard, G., & Kolda, T. G. (2018). A Practical Randomized CP Tensor
Decomposition. SIAM Journal on Matrix Analysis and Applications, 39(2), 876–901.
http://doi.org/10.1137/17m1112303
"""
def __init__(self, init='svd', sample_size=None, max_iter=50, epsilon=10e-3, tol=10e-5,
random_state=None, verbose=False) -> None:
super(RandomisedCPD, self).__init__(init=init,
max_iter=max_iter,
epsilon=epsilon,
tol=tol,
random_state=random_state,
verbose=verbose)
self.cost = []
self.sample_size = sample_size
def copy(self):
""" Copy of the CPD algorithm as a new object """
new_object = super(RandomisedCPD, self).copy()
new_object.cost = []
return new_object
@property
def name(self):
""" Name of the decomposition
Returns
-------
decomposition_name : str
"""
decomposition_name = super(RandomisedCPD, self).name
return decomposition_name
def decompose(self, tensor, rank, keep_meta=0, kr_reverse=False):
""" Performs CPD-ALS on the ``tensor`` with respect to the specified ``rank``
Parameters
----------
tensor : Tensor
Multi-dimensional data to be decomposed
rank : tuple
Desired Kruskal rank for the given ``tensor``. Should contain only one value.
If it is greater then any of dimensions then random initialisation is used
keep_meta : int
Keep meta information about modes of the given ``tensor``.
0 - the output will have default values for the meta data
1 - keep only mode names
2 - keep mode names and indices
kr_reverse : bool
Returns
-------
tensor_cpd : TensorCPD
CP representation of the ``tensor``
Notes
-----
khatri-rao product should be of matrices in reversed order. But this will duplicate original data (e.g. images)
Probably this has something to do with data ordering in Python and how it relates to kr product
"""
if not isinstance(tensor, Tensor):
raise TypeError("Parameter `tensor` should be an object of `Tensor` class!")
if not isinstance(rank, tuple):
raise TypeError("Parameter `rank` should be passed as a tuple!")
if len(rank) != 1:
raise ValueError("Parameter `rank` should be tuple with only one value!")
self.cost = [] # Reset cost every time when method decompose is called
tensor_cpd = None
fmat = self._init_fmat(tensor, rank)
core_values = np.repeat(np.array([1]), rank)
norm = tensor.frob_norm
lm = np.arange(tensor.order).tolist()
for n_iter in range(self.max_iter):
# Update factor matrices
for mode in lm:
kr_result, idxlist = sampled_khatri_rao(fmat, sample_size=self.sample_size, skip_matrix=mode)
lmodes = lm[:mode] + lm[mode+1:]
xs = np.array([tensor.access(m, lmodes) for m in np.array(idxlist).T.tolist()])
# Solve kr_result^-1 * xs
pos_def = np.dot(kr_result.T, kr_result)
corr_term = np.dot(kr_result.T, xs)
min_result = np.linalg.solve(pos_def, corr_term)
fmat[mode] = min_result.T
# Update cost
tensor_cpd = TensorCPD(fmat=fmat, core_values=core_values)
residual = residual_tensor(tensor, tensor_cpd)
self.cost.append(abs(residual.frob_norm / norm))
if self.verbose:
print('Iter {}: relative error of approximation = {}'.format(n_iter, self.cost[-1]))
# Check termination conditions
if self.cost[-1] <= self.epsilon:
if self.verbose:
print('Relative error of approximation has reached the acceptable level: {}'.format(self.cost[-1]))
break
if self.converged:
if self.verbose:
print('Converged in {} iteration(s)'.format(len(self.cost)))
break
if self.verbose and not self.converged and self.cost[-1] > self.epsilon:
print('Maximum number of iterations ({}) has been reached. '
'Variation = {}'.format(self.max_iter, abs(self.cost[-2] - self.cost[-1])))
if keep_meta == 1:
mode_names = {i: mode.name for i, mode in enumerate(tensor.modes)}
tensor_cpd.set_mode_names(mode_names=mode_names)
elif keep_meta == 2:
tensor_cpd.copy_modes(tensor)
else:
pass
return tensor_cpd
@property
def converged(self):
""" Checks convergence of the Randomised CPD-ALS algorithm.
Returns
-------
bool
"""
is_converged = super(RandomisedCPD, self).converged
return is_converged
def _init_fmat(self, tensor, rank):
fmat = super(RandomisedCPD, self)._init_fmat(tensor=tensor,
rank=rank)
return fmat
def plot(self):
print('At the moment, `plot()` is not implemented for the {}'.format(self.name))
class Parafac2(BaseCPD):
""" Computes PARAFAC2 for ``tensors`` of order three with respect to a specified ``rank``.
Computed via alternating least squares (ALS)
Parameters
----------
max_iter : int
Maximum number of iteration
epsilon : float
Threshold for the relative error of approximation.
tol : float
Threshold for convergence of factor matrices
random_state : int
verbose : bool
If True, enable verbose output
Attributes
----------
cost : list
A list of relative approximation errors at each iteration of the algorithm.
References
----------
- Kiers, H., ten Berge, J. and Bro, R. (1999). PARAFAC2 - Part I.
A direct fitting algorithm for the PARAFAC2 model. Journal of Chemometrics,
13(3-4), pp.275-294.
"""
# TODO: change init use requiring a change in TensorCPD
def __init__(self, max_iter=50, epsilon=10e-3, tol=10e-5,
random_state=None, verbose=False) -> None:
super(Parafac2, self).__init__(init='random',
max_iter=max_iter,
epsilon=epsilon,
tol=tol,
random_state=random_state,
verbose=verbose)
self.cost = []
def copy(self):
""" Copy of the CPD algorithm as a new object """
new_object = super(Parafac2, self).copy()
new_object.cost = []
return new_object
@property
def name(self):
""" Name of the decomposition
Returns
-------
decomposition_name : str
"""
decomposition_name = super(Parafac2, self).name
return decomposition_name
# TODO: Parameters differ to base class decomposed - fix
def decompose(self, tenl, rank):
""" Performs Direct fitting using ALS on a list of tensors of order 2 with respect to the specified ``rank``.
Parameters
----------
tenl : List(np.ndarray)
List of np.ndarray of dimension 2 to be decomposed
rank : tuple
Desired Kruskal rank for the given ``tensor``. Should contain only one value.
If it is greater then any of dimensions then random initialisation is used
Returns
-------
fmat_u, fmat_s, fmat_v, reconstructed : Tuple(np.ndarray)
fmat_u,fmat_s,fmat_v are PARAFAC2 representation of list of tensors
reconstructed is the reconstruction of the original tensor directly using fmat_u, fmat_s, fmat_v
Notes
-----
khatri-rao product should be of matrices in reversed order. But this will duplicate original data (e.g. images)
Probably this has something to do with data ordering in Python and how it relates to kr product
"""
if not isinstance(tenl, list):
raise TypeError("Parameter `tenl` should be a list of np.ndarray objects!")
if not all(isinstance(m, np.ndarray) for m in tenl):
raise TypeError("Parameter `tenl` should be a list of np.ndarray objects!")
if not isinstance(rank, tuple):
raise TypeError("Parameter `rank` should be passed as a tuple!")
if len(rank) != 1:
raise ValueError("Parameter `rank` should be tuple with only one value!")
self.cost = [] # Reset cost every time when method decompose is called
sz = np.array([t.shape for t in tenl])
_m = list(sz[:, 1])
if _m[1:] != _m[:-1]:
raise ValueError("Tensors must be of shape I[k] x J")
num_t = len(sz)
mode_b = _m[0]
# Initialisations
cpd = CPD(max_iter=1)
fmat_h, fmat_v, fmat_s, fmat_u = self._init_fmat(rank, sz)
cpd_fmat = None
for n_iter in range(self.max_iter):
for k in range(num_t):
p, _, q = svd(fmat_h.dot(fmat_s[:, :, k]).dot(fmat_v.T).dot(tenl[k].T), rank=rank[0])
fmat_u[k] = q.T.dot(p.T)
y = np.zeros((rank[0], mode_b, num_t))
for k in range(num_t):
y[:, :, k] = fmat_u[k].T.dot(tenl[k])
fmat = [fmat_h, fmat_v, cpd_fmat]
if n_iter == 0:
fmat = None
decomposed_cpd = cpd.decompose(Tensor(y), rank, factor_mat=fmat)
fmat_h, fmat_v, cpd_fmat = decomposed_cpd.fmat
cpd_fmat = cpd_fmat.dot(np.diag(decomposed_cpd._core_values))
for k in range(num_t):
fmat_s[:, :, k] = np.diag(cpd_fmat[k, :])
reconstructed = [(fmat_u[k].dot(fmat_h).dot(fmat_s[:, :, k])).dot(fmat_v.T) for k in range(num_t)]
err = np.sum([np.sum((tenl[k] - reconstructed[k]) ** 2)
for k in range(num_t)])
self.cost.append(err)
if self.verbose:
print('Iter {}: relative error of approximation = {}'.format(n_iter, self.cost[-1]))
# Check termination conditions
if self.cost[-1] <= self.epsilon:
if self.verbose:
print('Relative error of approximation has reached the acceptable level: {}'.format(self.cost[-1]))
break
if self.converged:
if self.verbose:
print('Converged in {} iteration(s)'.format(len(self.cost)))
break
if self.verbose and not self.converged and self.cost[-1] > self.epsilon:
print('Maximum number of iterations ({}) has been reached. '
'Variation = {}'.format(self.max_iter, abs(self.cost[-2] - self.cost[-1])))
# TODO: possibly make another structure
return fmat_u, fmat_s, fmat_v, reconstructed
@property
def converged(self):
""" Checks convergence of the CPD-ALS algorithm.
Returns
-------
bool
"""
try: # This insures that the cost has been computed at least twice without checking number of iterations
is_converged = abs(self.cost[-2] - self.cost[-1]) <= self.tol*self.cost[-2]
except IndexError:
is_converged = False
return is_converged
def _init_fmat(self, rank, modes):
""" Initialisation of matrices used in Parafac2
Parameters
----------
rank : tuple
Should be of shape (R,1), where R is the desired tensor rank. It should be passed as tuple for consistency.
modes : tuple
np.ndarray of the shapes of matrices
Returns
-------
(fmat_h,fmat_v,fmat_s,fmat_u) : Tuple[np.ndarray]
Factor matrices used in Parafac2
"""
mode_sz = len(modes)
s_mode = modes[0, 1]
modes = modes[:, 0]
fmat_h = np.identity(rank[0])
fmat_v = np.random.randn(s_mode, rank[0])
fmat_s = np.random.randn(rank[0], rank[0], mode_sz)
for k in range(mode_sz):
fmat_s[:, :, k] = np.identity(rank[0])
fmat_u = np.array([np.random.randn(modes[i], rank[0]) for i in range(mode_sz)])
if (np.array(modes) < rank[0]).sum() != 0:
warnings.warn(
"Specified rank value is greater then one of the dimensions of a tensor ({} > {}).\n"
"Factor matrices have been initialized randomly.".format(rank, modes), RuntimeWarning
)
return fmat_h, fmat_v, fmat_s, fmat_u
def plot(self):
print('At the moment, `plot()` is not implemented for the {}'.format(self.name))
| 39 | 119 | 0.574651 |
7958c6972800396d14317e36be3e2f5dfec5c63d | 1,037 | py | Python | src/vm/stack.py | mingz2013/lang-py | 1788bae92cbc8b5f3f99d9ae1c45ea116d870d91 | [
"Apache-2.0"
] | null | null | null | src/vm/stack.py | mingz2013/lang-py | 1788bae92cbc8b5f3f99d9ae1c45ea116d870d91 | [
"Apache-2.0"
] | null | null | null | src/vm/stack.py | mingz2013/lang-py | 1788bae92cbc8b5f3f99d9ae1c45ea116d870d91 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@FileName: stack
@Time: 2020/2/4 14:18
@Author: zhaojm
Module Description
"""
class StackNode(object):
def __get_d(self):
d = self.__dict__
d['__class_name__'] = self.__class__.__name__
return d
def __str__(self):
return str(self.__get_d())
def __repr__(self):
return repr(self.__get_d())
def __init__(self):
self.prev = None
self.data = None
class Stack(object):
def __get_d(self):
d = self.__dict__
d['__class_name__'] = self.__class__.__name__
return d
def __str__(self):
return str(self.__get_d())
def __repr__(self):
return repr(self.__get_d())
def __init__(self):
self.top_node = None
self.back_node = None
def push(self, node):
assert isinstance(node, StackNode)
node.prev = self.top_node
self.top_node = node
def pop(self):
node = self.top_node
self.top_node = node.prev
return node
| 17.87931 | 53 | 0.583414 |
7958c70aded789ff0f34dab3625c58587e551999 | 2,802 | py | Python | rampwf/utils/pretty_print.py | mehdidc/ramp-workflow | 68146005369b31c1c855c2372172d355440994a1 | [
"BSD-3-Clause"
] | null | null | null | rampwf/utils/pretty_print.py | mehdidc/ramp-workflow | 68146005369b31c1c855c2372172d355440994a1 | [
"BSD-3-Clause"
] | null | null | null | rampwf/utils/pretty_print.py | mehdidc/ramp-workflow | 68146005369b31c1c855c2372172d355440994a1 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
"""
Utility methods to print the results in a terminal using term colors
"""
from __future__ import print_function
import numpy as np
from pandas import option_context
from colored import stylize, fg, attr
# Dictionary of term colors used for printing to terminal
fg_colors = {
'official_train': 'light_green',
'official_valid': 'light_blue',
'official_test': 'red',
'train': 'dark_sea_green_3b',
'valid': 'light_slate_blue',
'test': 'pink_1',
'title': 'gold_3b',
'warning': 'grey_46',
}
def print_title(str):
print(stylize(str, fg(fg_colors['title']) + attr('bold')))
def print_warning(str):
print(stylize(str, fg(fg_colors['warning'])))
def print_df_scores(df_scores, score_types, indent=''):
"""Pretty print the scores dataframe.
Parameters
----------
df_scores : pd.DataFrame
the score dataframe
score_types : list of score types
a list of score types to use
indent : str, default=''
indentation if needed
"""
try:
# try to re-order columns/rows in the printed array
# we may not have all train, valid, test, so need to select
index_order = np.array(['train', 'valid', 'test'])
ordered_index = index_order[np.isin(index_order, df_scores.index)]
df_scores = df_scores.loc[
ordered_index, [score_type.name for score_type in score_types]]
except Exception:
print_warning("Couldn't re-order the score matrix..")
with option_context("display.width", 160):
df_repr = repr(df_scores)
df_repr_out = []
for line, color_key in zip(df_repr.splitlines(),
[None, None] +
list(df_scores.index.values)):
if line.strip() == 'step':
continue
if color_key is None:
# table header
line = stylize(line, fg(fg_colors['title']) + attr('bold'))
if color_key is not None:
tokens = line.split()
tokens_bak = tokens[:]
if 'official_' + color_key in fg_colors:
# line label and official score bold & bright
label_color = fg(fg_colors['official_' + color_key])
tokens[0] = stylize(tokens[0], label_color + attr('bold'))
tokens[1] = stylize(tokens[1], label_color + attr('bold'))
if color_key in fg_colors:
# other scores pale
tokens[2:] = [stylize(token, fg(fg_colors[color_key]))
for token in tokens[2:]]
for token_from, token_to in zip(tokens_bak, tokens):
line = line.replace(token_from, token_to)
line = indent + line
df_repr_out.append(line)
print('\n'.join(df_repr_out))
| 34.592593 | 75 | 0.598144 |
7958c74b2b84f29f1c972c3369debd283abe709c | 3,164 | py | Python | app.py | aewens/babili-bot | a28241c7180e2dc17b9fee71357bbb96d625532a | [
"BSD-3-Clause"
] | null | null | null | app.py | aewens/babili-bot | a28241c7180e2dc17b9fee71357bbb96d625532a | [
"BSD-3-Clause"
] | null | null | null | app.py | aewens/babili-bot | a28241c7180e2dc17b9fee71357bbb96d625532a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
from argparse import ArgumentParser
from os.path import dirname, realpath
from bot import Bot, Tasks, Responses
from actions import actions
from coroutines import coroutines
parser = ArgumentParser(description="A meta bot for ~team")
parser.add_argument(
"-c",
"--config",
dest="config",
default="settings.json",
help="Load config file"
)
arguments = parser.parse_args()
bot = Bot("127.0.0.1", 6667)
responses = Responses(bot)
tasks = Tasks(bot)
# for coro in coroutines:
# worker = coro["worker"]
# interval = coro["interval"]
# state = coro.get("state", None)
# coro_state = state if state is not None else (bot,)
# tasks.add_coroutine(worker, interval, coro_state)
tasks.coroutines = coroutines
for action in actions:
if "type" in action and "pattern" in action and "callback" in action:
responses.add_trigger(
action["type"],
action["pattern"],
action["callback"]
)
def try_to_king_me(channel):
bot.send_message("ChanServ", "REGISTER {}", channel)
bot.send_message("ChanServ", "SET Successor {} {}", channel, bot.botnick)
bot.send_message("ChanServ", "SET Founder {} {}", channel, bot.author)
def handle_pm(name, response):
print("PM: {} - {}".format(name, response))
def handle_mode(channel, mode):
if mode == "-r":
try_to_king_me(channel)
def handle_invite(channel, name):
changed = False
kingme = bot.settings.get("extras", dict()).get("kingme", [])
if channel in kingme:
try_to_king_me(channel)
users = bot.memories["users"]
if name not in users:
bot.memories["users"][name] = dict()
changed = True
if "invites" not in users[name]:
bot.memories["users"][name]["invites"] = list()
changed = True
if channel not in bot.memories["users"][name]["invites"]:
bot.memories["users"][name]["invites"].append(channel)
changed = True
if changed:
bot.thread(bot.save_memories)
def handle_kick(name):
users = bot.memories["users"]
if name not in users:
bot.memories["users"][name] = dict()
bot.memories["users"][name]["kicker"] = True
bot.thread(bot.save_memories)
def handle_message(name, source, response):
responses.parse(name, source, response)
if response == "!debug":
bot.logger.debug(":: {}".format(bot.memories))
def handle_crashed():
bot.logger.debug("Rebooting")
bot.crashed = True
bot.tasks.stop()
tasks = Tasks(bot)
tasks.coroutines = coroutines
bot.tasks = tasks
bot.start(arguments.config, dirname(realpath(__file__)), {
"pm": handle_pm,
"mode": handle_mode,
"invite": handle_invite,
"kick": handle_kick,
"crashed": handle_crashed,
"message": handle_message
})
if __name__ == "__main__":
bot.tasks = tasks
bot.start(arguments.config, dirname(realpath(__file__)), {
"pm": handle_pm,
"mode": handle_mode,
"invite": handle_invite,
"kick": handle_kick,
"crashed": handle_crashed,
"message": handle_message
})
| 27.275862 | 77 | 0.631479 |
7958c7ca93bde48377c396c4f52f80ee133e0a08 | 8,522 | py | Python | backend/crypto_bot/management/commands/feed_exchange_history.py | landdafku11/cryptocurrencybot | d91976d4fb42c75632e92bf9dd2b183d584b0f89 | [
"MIT"
] | 1 | 2021-09-30T13:41:12.000Z | 2021-09-30T13:41:12.000Z | backend/crypto_bot/management/commands/feed_exchange_history.py | KangDo96/CryptoCurrencyBot | 3c5d347495b27c752016302fec761efc46daaae5 | [
"MIT"
] | null | null | null | backend/crypto_bot/management/commands/feed_exchange_history.py | KangDo96/CryptoCurrencyBot | 3c5d347495b27c752016302fec761efc46daaae5 | [
"MIT"
] | null | null | null | import os ,sys
import pprint
from django.conf import settings
import django
from django.core.management import BaseCommand, CommandError
main_folder = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(main_folder)
import ccxt
from services.ccxt_api import CCXTApiHandler as handler
from services.coingecko import CoinGeckoHandler as gecko_handler
print(main_folder)
from models import *
class Command(BaseCommand):
exchanges = [
"Binance",
"Kraken",
"Bisq",
"coinbase",
'bitfinex',
'kucoin',
'ftx',
'liquid',
'bithumb',
'poloniex',
]
obj_handler = handler()
def add_arguments(self, parser):
parser.add_argument(
"--list-exchanges",
help="List exchanges available."
)
parser.add_argument(
"--list-trade-pairs",
help="List trade pairs for a given exchange."
)
parser.add_argument(
"--exchange",
type=str,
help="Exchange ID (run command with --list-exchanges option)."
)
parser.add_argument(
"--list_exchange_trade_pair_ohlcv",
help="List the ohlcv values for the given trade and exchange."
)
parser.add_argument(
"--exchange",
type=str,
help="Exchange ID (run command with --list_exchange_trade_pair_ohlcv option)."
)
parser.add_argument(
"--traid-pair",
type=str,
help="Trade pair identifier of the exchange (run command with --list_exchange_trade_pair_ohlcv option)."
)
parser.add_argument(
"--since",
type=int,
help="timestamp to start looking from (run command with --list_exchange_trade_pair_ohlcv option)."
)
parser.add_argument(
"--limit",
type=int,
help="timestamp to stop looking to (run command with --list_exchange_trade_pair_ohlcv option)."
)
parser.add_argument(
"--granularity",
type=str,
help="specification between records (run command with --list_exchange_trade_pair_ohlcv option)."
)
parser.add_argument(
"--commit",
type=bool,
help="save the data in the db (run command with --list_exchange_trade_pair_ohlcv option)."
)
parser.add_argument(
"--cache",
type=bool,
help="print the data in the terminal (run command with --list_exchange_trade_pair_ohlcv option)."
)
def handle(self, **options):
if options.get("list_exchanges", None) is not None:
self.list_exchanges()
exit(0)
if options.get("list_trade_pairs", None) is not None and options.get("exchange", None) is not None:
exchange = options.get("exchange")
self.list_trade_pairs_by_exchange(exchange)
exit(0)
if options.get('list_exchange_trade_pair_ohlcv',None) and (options.get("exchange", None)
and
options.get("trade_pair", None)
and
options.get("since", None)
and
options.get("limit", None)
and
options.get("granularity", None)
and
(options.get("commit", None) or options.get("cache", None))
):
params = {key:value for key, value in options.items() if key in ["exchange","trade_pair",
"since","limit","granularity",'cache',
'commit']}
self.list_exchange_trade_pair_ohlcv(params=params)
exit(0)
def list_exchanges(self):
self.stdout.write(self.style.SUCCESS("Listing available exchanges"))
self.stdout.write(self.style.WARNING("---------------------------"))
for exchange in self.exchanges:
self.stdout.write(exchange)
def list_trade_pairs_by_exchange(self, exchange):
try:
exchange = getattr(ccxt, exchange.lower())()
markets = exchange.load_markets().keys()
except Exception as error:
self.stdout.write(self.style.ERROR(str(error)))
return False
for market in markets:
self.stdout.write(self.style.WARNING(market))
return True
# this one is set up to work with coingecko
def list_general_trade_pair_ohlcv(self, coin_id, currency, from_timestamp, to_timestamp):
"""
gets the coin data coming from the
given data which comes with the
ohlcv values for the given coin
The command 1.2 --list_general_trade_pair_ohlcv requires changes.
We use pycoingecko.CoinGeckoAPI().t1.get_coin_market_chart_range_by_id(id='bitcoin', vs_currency='eur', from_timestamp='1562338218', to_timestamp='1563153464')
to retrieve price (i.e. set ohlc=price, just 4 redundant fields that have the same value), volume, market_cap (this one will be omitted for now,
it won't be printed, nor stored in the DB). Each property has a separate timestamp. We need to resample values, so that we take a union of timestamps,
interpolate using nearest.
Granularity doesn't make sense here, let's make it --granularity <auto>. If none has been specified, a default value is 'auto', otherwise anything like
<1m/1h/1d> will trigger an error, that forces to specify either 'auto' or omit the flag.
pycoingecko.CoinGeckoAPI().get_coin_ohlc_by_id(id='bitcoin', vs_currency='eur', days='max') is worse. It provides sparse timestamps, and doesn't support ranges.
The command should provide a general information on a trade pair. It must unify information across exchanges. It should not depend on exchange parameter.
Ideally, we would be using an average across exchanges, or pick up the most trusted. We make coingecko responsible for this task, and just take their data.
:param trade_pair:
:param since:
:param limit:
:param timeframe:
:return:
"""
def list_exchange_trade_pair_ohlcv(self, params):
"""
lists the ohlcv for the given exchange with the given filters
:param params: dict containing:
:param trade_pair: identification for the exhange
:param since: time to start fetching
:param limit: time to stop fetching from
:param granularity: how long the difference is between records
:return:json
"""
accepted_params = ["exchange", "trade_pair", "since","limit","granularity",'commit','cache']
if not all(value for key ,value in params.items() if key in accepted_params):
self.stderr.write(self.style.ERROR("there's an issue with the parameters provided, try again."))
return False
try:
exchange = handler.load_exchange_manager(exchange=params['exchange'])
data = handler.list_ohlcvs(exchange_obj=exchange,symbol=params['trade_paid'], since=params['since'],
limit=params['limit'], timeframe=params['granularity'])
for obj_ohlcv in data:
if params['cache']:
self.stdout.write(self.style.WARNING(obj_ohlcv))
elif params['commit']:
obj = Exchange.get_obj(search_key=params['exchange']) if Exchange.exists(params['exchange'])\
else Exchange.objects.create(dump_data=handler.get_exchange_fields(exchange))
obj.ohlcv_set.create(
timestamp=obj_ohlcv.timestamp,
open_price=obj_ohlcv.o,
highest_price=obj_ohlcv.h,
lowest_price=obj_ohlcv.l,
closing_price=obj_ohlcv.c,
volume=obj_ohlcv.v,
)
return True
except Exception as X:
self.stderr.write(self.style.ERROR(f"there's an issue with the request {X}, try again."))
return False
| 43.258883 | 168 | 0.581906 |
7958c89e8f71092e3a6db5620b292ae3c0c4c6a4 | 33,336 | py | Python | tests/core/full_node/test_full_node_store.py | morrillup/chaingreen-blockchain | 0b2d008dd10228670decf360d21448a65fce48a4 | [
"Apache-2.0"
] | 1 | 2021-11-12T20:30:23.000Z | 2021-11-12T20:30:23.000Z | tests/core/full_node/test_full_node_store.py | morrillup/chaingreen-blockchain | 0b2d008dd10228670decf360d21448a65fce48a4 | [
"Apache-2.0"
] | null | null | null | tests/core/full_node/test_full_node_store.py | morrillup/chaingreen-blockchain | 0b2d008dd10228670decf360d21448a65fce48a4 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa: F811, F401
import asyncio
import atexit
import logging
from secrets import token_bytes
from typing import List, Optional
import pytest
from chaingreen.consensus.blockchain import ReceiveBlockResult
from chaingreen.consensus.multiprocess_validation import PreValidationResult
from chaingreen.consensus.pot_iterations import is_overflow_block
from chaingreen.full_node.full_node_store import FullNodeStore
from chaingreen.full_node.signage_point import SignagePoint
from chaingreen.protocols import timelord_protocol
from chaingreen.protocols.timelord_protocol import NewInfusionPointVDF
from chaingreen.types.blockchain_format.sized_bytes import bytes32
from chaingreen.types.unfinished_block import UnfinishedBlock
from chaingreen.util.block_cache import BlockCache
from tests.block_tools import get_signage_point, create_block_tools
from chaingreen.util.hash import std_hash
from chaingreen.util.ints import uint8, uint32, uint64, uint128
from tests.setup_nodes import test_constants as test_constants_original
from tests.util.blockchain import create_blockchain
from tests.util.keyring import TempKeyring
def cleanup_keyring(keyring: TempKeyring):
keyring.cleanup()
temp_keyring = TempKeyring()
keychain = temp_keyring.get_keychain()
atexit.register(cleanup_keyring, temp_keyring) # Attempt to cleanup the temp keychain
test_constants = test_constants_original.replace(**{"DISCRIMINANT_SIZE_BITS": 32, "SUB_SLOT_ITERS_STARTING": 2 ** 12})
bt = create_block_tools(constants=test_constants, keychain=keychain)
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
log = logging.getLogger(__name__)
@pytest.fixture(scope="function")
async def empty_blockchain():
bc1, connection, db_path = await create_blockchain(test_constants)
yield bc1
await connection.close()
bc1.shut_down()
db_path.unlink()
@pytest.fixture(scope="function")
async def empty_blockchain_original():
bc1, connection, db_path = await create_blockchain(test_constants_original)
yield bc1
await connection.close()
bc1.shut_down()
db_path.unlink()
class TestFullNodeStore:
@pytest.mark.asyncio
async def test_basic_store(self, empty_blockchain, normalized_to_identity: bool = False):
blockchain = empty_blockchain
blocks = bt.get_consecutive_blocks(
10,
seed=b"1234",
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
store = FullNodeStore(test_constants)
unfinished_blocks = []
for block in blocks:
unfinished_blocks.append(
UnfinishedBlock(
block.finished_sub_slots,
block.reward_chain_block.get_unfinished(),
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
block.transactions_info,
block.transactions_generator,
[],
)
)
# Add/get candidate block
assert store.get_candidate_block(unfinished_blocks[0].get_hash()) is None
for height, unf_block in enumerate(unfinished_blocks):
store.add_candidate_block(unf_block.get_hash(), uint32(height), unf_block)
candidate = store.get_candidate_block(unfinished_blocks[4].get_hash())
assert candidate is not None
assert candidate[1] == unfinished_blocks[4]
store.clear_candidate_blocks_below(uint32(8))
assert store.get_candidate_block(unfinished_blocks[5].get_hash()) is None
assert store.get_candidate_block(unfinished_blocks[8].get_hash()) is not None
# Test seen unfinished blocks
h_hash_1 = bytes32(token_bytes(32))
assert not store.seen_unfinished_block(h_hash_1)
assert store.seen_unfinished_block(h_hash_1)
store.clear_seen_unfinished_blocks()
assert not store.seen_unfinished_block(h_hash_1)
# Add/get unfinished block
for height, unf_block in enumerate(unfinished_blocks):
assert store.get_unfinished_block(unf_block.partial_hash) is None
store.add_unfinished_block(uint32(height), unf_block, PreValidationResult(None, uint64(123532), None))
assert store.get_unfinished_block(unf_block.partial_hash) == unf_block
store.remove_unfinished_block(unf_block.partial_hash)
assert store.get_unfinished_block(unf_block.partial_hash) is None
blocks = bt.get_consecutive_blocks(
1,
skip_slots=5,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
)
sub_slots = blocks[0].finished_sub_slots
assert len(sub_slots) == 5
assert (
store.get_finished_sub_slots(
BlockCache({}),
None,
sub_slots[0].challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
)
== []
)
# Test adding non-connecting sub-slots genesis
assert store.get_sub_slot(test_constants.GENESIS_CHALLENGE) is None
assert store.get_sub_slot(sub_slots[0].challenge_chain.get_hash()) is None
assert store.get_sub_slot(sub_slots[1].challenge_chain.get_hash()) is None
assert store.new_finished_sub_slot(sub_slots[1], blockchain, None, None) is None
assert store.new_finished_sub_slot(sub_slots[2], blockchain, None, None) is None
# Test adding sub-slots after genesis
assert store.new_finished_sub_slot(sub_slots[0], blockchain, None, None) is not None
sub_slot = store.get_sub_slot(sub_slots[0].challenge_chain.get_hash())
assert sub_slot is not None
assert sub_slot[0] == sub_slots[0]
assert store.get_sub_slot(sub_slots[1].challenge_chain.get_hash()) is None
assert store.new_finished_sub_slot(sub_slots[1], blockchain, None, None) is not None
for i in range(len(sub_slots)):
assert store.new_finished_sub_slot(sub_slots[i], blockchain, None, None) is not None
slot_i = store.get_sub_slot(sub_slots[i].challenge_chain.get_hash())
assert slot_i is not None
assert slot_i[0] == sub_slots[i]
assert store.get_finished_sub_slots(BlockCache({}), None, sub_slots[-1].challenge_chain.get_hash()) == sub_slots
assert store.get_finished_sub_slots(BlockCache({}), None, std_hash(b"not a valid hash")) is None
assert (
store.get_finished_sub_slots(BlockCache({}), None, sub_slots[-2].challenge_chain.get_hash())
== sub_slots[:-1]
)
# Test adding genesis peak
await blockchain.receive_block(blocks[0])
peak = blockchain.get_peak()
peak_full_block = await blockchain.get_full_peak()
if peak.overflow:
store.new_peak(peak, peak_full_block, sub_slots[-2], sub_slots[-1], None, blockchain)
else:
store.new_peak(peak, peak_full_block, None, sub_slots[-1], None, blockchain)
assert store.get_sub_slot(sub_slots[0].challenge_chain.get_hash()) is None
assert store.get_sub_slot(sub_slots[1].challenge_chain.get_hash()) is None
assert store.get_sub_slot(sub_slots[2].challenge_chain.get_hash()) is None
if peak.overflow:
slot_3 = store.get_sub_slot(sub_slots[3].challenge_chain.get_hash())
assert slot_3 is not None
assert slot_3[0] == sub_slots[3]
else:
assert store.get_sub_slot(sub_slots[3].challenge_chain.get_hash()) is None
slot_4 = store.get_sub_slot(sub_slots[4].challenge_chain.get_hash())
assert slot_4 is not None
assert slot_4[0] == sub_slots[4]
assert (
store.get_finished_sub_slots(
blockchain,
peak,
sub_slots[-1].challenge_chain.get_hash(),
)
== []
)
# Test adding non genesis peak directly
blocks = bt.get_consecutive_blocks(
2,
skip_slots=2,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
for block in blocks:
await blockchain.receive_block(block)
sb = blockchain.block_record(block.header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(block.header_hash)
res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain)
assert res.added_eos is None
# Add reorg blocks
blocks_reorg = bt.get_consecutive_blocks(
20,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
for block in blocks_reorg:
res, _, fork_height, _ = await blockchain.receive_block(block)
if res == ReceiveBlockResult.NEW_PEAK:
if fork_height is not None and fork_height != block.height - 1:
fork_block = blockchain.block_record(blockchain.height_to_hash(fork_height))
else:
fork_block = None
sb = blockchain.block_record(block.header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(block.header_hash)
res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, fork_block, blockchain)
assert res.added_eos is None
# Add slots to the end
blocks_2 = bt.get_consecutive_blocks(
1,
block_list_input=blocks_reorg,
skip_slots=2,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
for slot in blocks_2[-1].finished_sub_slots:
store.new_finished_sub_slot(slot, blockchain, blockchain.get_peak(), await blockchain.get_full_peak())
assert store.get_sub_slot(sub_slots[3].challenge_chain.get_hash()) is None
assert store.get_sub_slot(sub_slots[4].challenge_chain.get_hash()) is None
# Test adding signage point
peak = blockchain.get_peak()
ss_start_iters = peak.ip_sub_slot_total_iters(test_constants)
for i in range(1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA):
sp = get_signage_point(
test_constants,
blockchain,
peak,
ss_start_iters,
uint8(i),
[],
peak.sub_slot_iters,
)
assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
blocks = blocks_reorg
while True:
blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
res, _, fork_height, _ = await blockchain.receive_block(blocks[-1])
if res == ReceiveBlockResult.NEW_PEAK:
if fork_height is not None and fork_height != blocks[-1].height - 1:
fork_block = blockchain.block_record(blockchain.height_to_hash(fork_height))
else:
fork_block = None
sb = blockchain.block_record(blocks[-1].header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(blocks[-1].header_hash)
res = store.new_peak(sb, blocks[-1], sp_sub_slot, ip_sub_slot, fork_block, blockchain)
assert res.added_eos is None
if sb.overflow and sp_sub_slot is not None:
assert sp_sub_slot != ip_sub_slot
break
peak = blockchain.get_peak()
assert peak.overflow
# Overflow peak should result in 2 finished sub slots
assert len(store.finished_sub_slots) == 2
# Add slots to the end, except for the last one, which we will use to test invalid SP
blocks_2 = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
skip_slots=3,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
for slot in blocks_2[-1].finished_sub_slots[:-1]:
store.new_finished_sub_slot(slot, blockchain, blockchain.get_peak(), await blockchain.get_full_peak())
finished_sub_slots = blocks_2[-1].finished_sub_slots
assert len(store.finished_sub_slots) == 4
# Test adding signage points for overflow blocks (sp_sub_slot)
ss_start_iters = peak.sp_sub_slot_total_iters(test_constants)
# for i in range(peak.signage_point_index, test_constants.NUM_SPS_SUB_SLOT):
# if i < peak.signage_point_index:
# continue
# latest = peak
# while latest.total_iters > peak.sp_total_iters(test_constants):
# latest = blockchain.blocks[latest.prev_hash]
# sp = get_signage_point(
# test_constants,
# blockchain.blocks,
# latest,
# ss_start_iters,
# uint8(i),
# [],
# peak.sub_slot_iters,
# )
# assert store.new_signage_point(i, blockchain.blocks, peak, peak.sub_slot_iters, sp)
# Test adding signage points for overflow blocks (ip_sub_slot)
for i in range(1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA):
sp = get_signage_point(
test_constants,
blockchain,
peak,
peak.ip_sub_slot_total_iters(test_constants),
uint8(i),
[],
peak.sub_slot_iters,
)
assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
# Test adding future signage point, a few slots forward (good)
saved_sp_hash = None
for slot_offset in range(1, len(finished_sub_slots)):
for i in range(
1,
test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA,
):
sp = get_signage_point(
test_constants,
blockchain,
peak,
peak.ip_sub_slot_total_iters(test_constants) + slot_offset * peak.sub_slot_iters,
uint8(i),
finished_sub_slots[:slot_offset],
peak.sub_slot_iters,
)
assert sp.cc_vdf is not None
saved_sp_hash = sp.cc_vdf.output.get_hash()
assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
# Test adding future signage point (bad)
for i in range(1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA):
sp = get_signage_point(
test_constants,
blockchain,
peak,
peak.ip_sub_slot_total_iters(test_constants) + len(finished_sub_slots) * peak.sub_slot_iters,
uint8(i),
finished_sub_slots[: len(finished_sub_slots)],
peak.sub_slot_iters,
)
assert not store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
# Test adding past signage point
sp = SignagePoint(
blocks[1].reward_chain_block.challenge_chain_sp_vdf,
blocks[1].challenge_chain_sp_proof,
blocks[1].reward_chain_block.reward_chain_sp_vdf,
blocks[1].reward_chain_sp_proof,
)
assert not store.new_signage_point(
blocks[1].reward_chain_block.signage_point_index,
blockchain,
peak,
blockchain.block_record(blocks[1].header_hash).sp_sub_slot_total_iters(test_constants),
sp,
)
# Get signage point by index
assert (
store.get_signage_point_by_index(
finished_sub_slots[0].challenge_chain.get_hash(),
uint8(4),
finished_sub_slots[0].reward_chain.get_hash(),
)
is not None
)
assert (
store.get_signage_point_by_index(finished_sub_slots[0].challenge_chain.get_hash(), uint8(4), std_hash(b"1"))
is None
)
# Get signage point by hash
assert store.get_signage_point(saved_sp_hash) is not None
assert store.get_signage_point(std_hash(b"2")) is None
# Test adding signage points before genesis
store.initialize_genesis_sub_slot()
assert len(store.finished_sub_slots) == 1
for i in range(1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA):
sp = get_signage_point(
test_constants,
BlockCache({}, {}),
None,
uint128(0),
uint8(i),
[],
peak.sub_slot_iters,
)
assert store.new_signage_point(uint8(i), blockchain, None, peak.sub_slot_iters, sp)
blocks_3 = bt.get_consecutive_blocks(
1,
skip_slots=2,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
for slot in blocks_3[-1].finished_sub_slots:
store.new_finished_sub_slot(slot, blockchain, None, None)
assert len(store.finished_sub_slots) == 3
finished_sub_slots = blocks_3[-1].finished_sub_slots
for slot_offset in range(1, len(finished_sub_slots) + 1):
for i in range(
1,
test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA,
):
sp = get_signage_point(
test_constants,
BlockCache({}, {}),
None,
slot_offset * peak.sub_slot_iters,
uint8(i),
finished_sub_slots[:slot_offset],
peak.sub_slot_iters,
)
assert store.new_signage_point(uint8(i), blockchain, None, peak.sub_slot_iters, sp)
# Test adding signage points after genesis
blocks_4 = bt.get_consecutive_blocks(
1,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
blocks_5 = bt.get_consecutive_blocks(
1,
block_list_input=blocks_4,
skip_slots=1,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
# If this is not the case, fix test to find a block that is
assert (
blocks_4[-1].reward_chain_block.signage_point_index
< test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA
)
await blockchain.receive_block(blocks_4[-1])
sb = blockchain.block_record(blocks_4[-1].header_hash)
store.new_peak(sb, blocks_4[-1], None, None, None, blockchain)
for i in range(
sb.signage_point_index + test_constants.NUM_SP_INTERVALS_EXTRA,
test_constants.NUM_SPS_SUB_SLOT,
):
if is_overflow_block(test_constants, uint8(i)):
finished_sub_slots = blocks_5[-1].finished_sub_slots
else:
finished_sub_slots = []
sp = get_signage_point(
test_constants,
blockchain,
sb,
uint128(0),
uint8(i),
finished_sub_slots,
peak.sub_slot_iters,
)
assert store.new_signage_point(uint8(i), empty_blockchain, sb, peak.sub_slot_iters, sp)
# Test future EOS cache
store.initialize_genesis_sub_slot()
blocks = bt.get_consecutive_blocks(
1,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
await blockchain.receive_block(blocks[-1])
while True:
blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
await blockchain.receive_block(blocks[-1])
sb = blockchain.block_record(blocks[-1].header_hash)
if sb.first_in_sub_slot:
break
assert len(blocks) >= 2
dependant_sub_slots = blocks[-1].finished_sub_slots
peak = blockchain.get_peak()
peak_full_block = await blockchain.get_full_peak()
for block in blocks[:-2]:
sb = blockchain.block_record(block.header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(block.header_hash)
peak = sb
peak_full_block = block
res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain)
assert res.added_eos is None
assert store.new_finished_sub_slot(dependant_sub_slots[0], blockchain, peak, peak_full_block) is None
block = blocks[-2]
sb = blockchain.block_record(block.header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(block.header_hash)
res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain)
assert res.added_eos == dependant_sub_slots[0]
assert res.new_signage_points == res.new_infusion_points == []
# Test future IP cache
store.initialize_genesis_sub_slot()
blocks = bt.get_consecutive_blocks(
60,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
)
for block in blocks[:5]:
await blockchain.receive_block(block)
sb = blockchain.block_record(block.header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(block.header_hash)
res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain)
assert res.added_eos is None
case_0, case_1 = False, False
for i in range(5, len(blocks) - 1):
prev_block = blocks[i]
block = blocks[i + 1]
new_ip = NewInfusionPointVDF(
block.reward_chain_block.get_unfinished().get_hash(),
block.reward_chain_block.challenge_chain_ip_vdf,
block.challenge_chain_ip_proof,
block.reward_chain_block.reward_chain_ip_vdf,
block.reward_chain_ip_proof,
block.reward_chain_block.infused_challenge_chain_ip_vdf,
block.infused_challenge_chain_ip_proof,
)
store.add_to_future_ip(new_ip)
await blockchain.receive_block(prev_block)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(prev_block.header_hash)
sb = blockchain.block_record(prev_block.header_hash)
res = store.new_peak(sb, prev_block, sp_sub_slot, ip_sub_slot, None, blockchain)
if len(block.finished_sub_slots) == 0:
case_0 = True
assert res.new_infusion_points == [new_ip]
else:
case_1 = True
assert res.new_infusion_points == []
found_ips: List[timelord_protocol.NewInfusionPointVDF] = []
for ss in block.finished_sub_slots:
ipvdf = store.new_finished_sub_slot(ss, blockchain, sb, prev_block)
assert ipvdf is not None
found_ips += ipvdf
assert found_ips == [new_ip]
# If flaky, increase the number of blocks created
assert case_0 and case_1
# Try to get two blocks in the same slot, such that we have
# SP, B2 SP .... SP B1
# i2 ......... i1
# Then do a reorg up to B2, removing all signage points after B2, but not before
log.warning(f"Adding blocks up to {blocks[-1]}")
for block in blocks:
await blockchain.receive_block(block)
log.warning(f"Starting loop")
while True:
log.warning("Looping")
blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=1)
assert (await blockchain.receive_block(blocks[-1]))[0] == ReceiveBlockResult.NEW_PEAK
peak = blockchain.get_peak()
sub_slots = await blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
store.new_peak(peak, blocks[-1], sub_slots[0], sub_slots[1], None, blockchain)
blocks = bt.get_consecutive_blocks(2, block_list_input=blocks, guarantee_transaction_block=True)
i3 = blocks[-3].reward_chain_block.signage_point_index
i2 = blocks[-2].reward_chain_block.signage_point_index
i1 = blocks[-1].reward_chain_block.signage_point_index
if (
len(blocks[-2].finished_sub_slots) == len(blocks[-1].finished_sub_slots) == 0
and not is_overflow_block(test_constants, signage_point_index=i2)
and not is_overflow_block(test_constants, signage_point_index=i1)
and i2 > i3 + 3
and i1 > (i2 + 3)
):
# We hit all the conditions that we want
all_sps: List[Optional[SignagePoint]] = [None] * test_constants.NUM_SPS_SUB_SLOT
def assert_sp_none(sp_index: int, is_none: bool):
sp_to_check: Optional[SignagePoint] = all_sps[sp_index]
assert sp_to_check is not None
assert sp_to_check.cc_vdf is not None
fetched = store.get_signage_point(sp_to_check.cc_vdf.output.get_hash())
assert (fetched is None) == is_none
if fetched is not None:
assert fetched == sp_to_check
for i in range(i3 + 1, test_constants.NUM_SPS_SUB_SLOT - 3):
finished_sub_slots = []
sp = get_signage_point(
test_constants,
blockchain,
peak,
uint128(peak.ip_sub_slot_total_iters(bt.constants)),
uint8(i),
finished_sub_slots,
peak.sub_slot_iters,
)
all_sps[i] = sp
assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
# Adding a new peak clears all SPs after that peak
assert (await blockchain.receive_block(blocks[-2]))[0] == ReceiveBlockResult.NEW_PEAK
peak = blockchain.get_peak()
sub_slots = await blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
store.new_peak(peak, blocks[-2], sub_slots[0], sub_slots[1], None, blockchain)
assert_sp_none(i2, False)
assert_sp_none(i2 + 1, False)
assert_sp_none(i1, True)
assert_sp_none(i1 + 1, True)
assert_sp_none(i1 + 4, True)
for i in range(i2, test_constants.NUM_SPS_SUB_SLOT):
if is_overflow_block(test_constants, uint8(i)):
blocks_alt = bt.get_consecutive_blocks(1, block_list_input=blocks[:-1], skip_slots=1)
finished_sub_slots = blocks_alt[-1].finished_sub_slots
else:
finished_sub_slots = []
sp = get_signage_point(
test_constants,
blockchain,
peak,
uint128(peak.ip_sub_slot_total_iters(bt.constants)),
uint8(i),
finished_sub_slots,
peak.sub_slot_iters,
)
all_sps[i] = sp
assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
assert_sp_none(i2, False)
assert_sp_none(i2 + 1, False)
assert_sp_none(i1, False)
assert_sp_none(i1 + 1, False)
assert_sp_none(i1 + 4, False)
assert (await blockchain.receive_block(blocks[-1]))[0] == ReceiveBlockResult.NEW_PEAK
peak = blockchain.get_peak()
sub_slots = await blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
# Do a reorg, which should remove everything after B2
store.new_peak(
peak,
blocks[-1],
sub_slots[0],
sub_slots[1],
(await blockchain.get_block_records_at([blocks[-2].height]))[0],
blockchain,
)
assert_sp_none(i2, False)
assert_sp_none(i2 + 1, False)
assert_sp_none(i1, True)
assert_sp_none(i1 + 1, True)
assert_sp_none(i1 + 4, True)
break
else:
for block in blocks[-2:]:
assert (await blockchain.receive_block(block))[0] == ReceiveBlockResult.NEW_PEAK
@pytest.mark.asyncio
async def test_basic_store_compact_blockchain(self, empty_blockchain):
await self.test_basic_store(empty_blockchain, True)
@pytest.mark.asyncio
async def test_long_chain_slots(self, empty_blockchain_original, default_1000_blocks):
blockchain = empty_blockchain_original
store = FullNodeStore(test_constants_original)
blocks = default_1000_blocks
peak = None
peak_full_block = None
for block in blocks:
for sub_slot in block.finished_sub_slots:
assert store.new_finished_sub_slot(sub_slot, blockchain, peak, peak_full_block) is not None
res, err, _, _ = await blockchain.receive_block(block)
assert res == ReceiveBlockResult.NEW_PEAK
peak = blockchain.get_peak()
peak_full_block = await blockchain.get_full_peak()
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
store.new_peak(peak, peak_full_block, sp_sub_slot, ip_sub_slot, None, blockchain)
| 44.746309 | 120 | 0.62773 |
7958c9e64466c108aab2695afeb3052514f6c64c | 7,472 | py | Python | cvxpy/constraints/exponential.py | ebezzam/cvxpy | 38f76c938bf19eed7fb0a39e7fd2395c8fc42489 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cvxpy/constraints/exponential.py | ebezzam/cvxpy | 38f76c938bf19eed7fb0a39e7fd2395c8fc42489 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cvxpy/constraints/exponential.py | ebezzam/cvxpy | 38f76c938bf19eed7fb0a39e7fd2395c8fc42489 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy.settings as s
from cvxpy.error import SolverError
import cvxpy.lin_ops.lin_utils as lu
from cvxpy.lin_ops.lin_op import VARIABLE
import cvxpy.utilities.performance_utils as pu
from cvxpy.constraints.nonlinear import NonlinearConstraint
from cvxpy.constraints.utilities import format_elemwise
import numpy as np
import math
class ExpCone(NonlinearConstraint):
"""A reformulated exponential cone constraint.
Operates elementwise on :math:`x, y, z`.
Original cone:
.. math::
K = \\{(x,y,z) \\mid y > 0, ye^{x/y} <= z\\}
\\cup \\{(x,y,z) \\mid x \\leq 0, y = 0, z \\geq 0\\}
Reformulated cone:
.. math::
K = \\{(x,y,z) \\mid y, z > 0, y\\log(y) + x \\leq y\\log(z)\\}
\\cup \\{(x,y,z) \\mid x \\leq 0, y = 0, z \\geq 0\\}
Parameters
----------
x : Variable
x in the exponential cone.
y : Variable
y in the exponential cone.
z : Variable
z in the exponential cone.
"""
def __init__(self, x, y, z, constr_id=None):
self.x = x
self.y = y
self.z = z
super(ExpCone, self).__init__(self._solver_hook,
[self.x, self.y, self.z],
constr_id)
def __str__(self):
return "ExpCone(%s, %s, %s)" % (self.x, self.y, self.z)
def __repr__(self):
return "ExpCone(%s, %s, %s)" % (self.x, self.y, self.z)
@property
def residual(self):
# TODO(akshayka): The projection should be implemented directly.
from cvxpy import Problem, Minimize, Variable, norm2, hstack
if self.x.value is None or self.y.value is None or self.z.value is None:
return None
x = Variable(self.x.shape)
y = Variable(self.y.shape)
z = Variable(self.z.shape)
constr = [ExpCone(x, y, z)]
obj = Minimize(norm2(hstack([x, y, z]) -
hstack([self.x.value, self.y.value, self.z.value])))
problem = Problem(obj, constr)
return problem.solve()
def format(self, eq_constr, leq_constr, dims, solver):
"""Formats EXP constraints for the solver.
Parameters
----------
eq_constr : list
A list of the equality constraints in the canonical problem.
leq_constr : list
A list of the inequality constraints in the canonical problem.
dims : dict
A dict with the dimensions of the conic constraints.
solver : str
The solver being called.
"""
if solver.name() == s.CVXOPT:
eq_constr += self.__CVXOPT_format[0]
elif solver.name() in [s.SCS, s.JULIA_OPT]:
leq_constr += self.__SCS_format[1]
elif solver.name() == s.ECOS:
leq_constr += self.__ECOS_format[1]
else:
raise SolverError("Solver does not support exponential cone.")
# Update dims.
dims[s.EXP_DIM] += self.num_cones()
@property
def size(self):
"""The number of entries in the combined cones.
"""
# TODO use size of dual variable(s) instead.
return sum(self.cone_sizes())
# @property
# def shape(self):
# """Represented as vectorized.
# """
# return (self.size,)
def num_cones(self):
"""The number of elementwise cones.
"""
return np.prod(self.args[0].shape, dtype=int)
def cone_sizes(self):
"""The dimensions of the exponential cones.
Returns
-------
list
A list of the sizes of the elementwise cones.
"""
return [3]*self.num_cones()
def is_dcp(self):
"""An exponential constraint is DCP if each argument is affine.
"""
return all(arg.is_affine() for arg in self.args)
def is_dgp(self):
return False
def canonicalize(self):
"""Canonicalizes by converting expressions to LinOps.
"""
arg_objs = []
arg_constr = []
for arg in self.args:
arg_objs.append(arg.canonical_form[0])
arg_constr + arg.canonical_form[1]
return 0, [ExpCone(*arg_objs)] + arg_constr
@pu.lazyprop
def __ECOS_format(self):
return ([], format_elemwise([self.x, self.z, self.y]))
@pu.lazyprop
def __SCS_format(self):
return ([], format_elemwise([self.x, self.y, self.z]))
@pu.lazyprop
def __CVXOPT_format(self):
constraints = []
for i, var in enumerate(self.vars_):
if var.type is not VARIABLE:
lone_var = lu.create_var(var.shape)
constraints.append(lu.create_eq(lone_var, var))
self.vars_[i] = lone_var
return (constraints, [])
def _solver_hook(self, vars_=None, scaling=None):
"""A function used by CVXOPT's nonlinear solver.
Based on f(x,y,z) = y * log(y) + x - y * log(z).
Parameters
----------
vars_: A cvxopt dense matrix with values for (x,y,z).
scaling: A scaling for the Hessian.
Returns
-------
_solver_hook() returns the constraint shape and a feasible point.
_solver_hook(x) returns the function value and gradient at x.
_solver_hook(x, z) returns the function value, gradient,
and (z scaled) Hessian at x.
"""
import cvxopt # Not necessary unless using cvxopt solver.
entries = int(np.prod(self.shape))
if vars_ is None:
x_init = entries*[0.0]
y_init = entries*[0.5]
z_init = entries*[1.0]
return entries, cvxopt.matrix(x_init + y_init + z_init)
# Unpack vars_
x = vars_[0:entries]
y = vars_[entries:2*entries]
z = vars_[2*entries:]
# Out of domain.
# TODO what if y == 0.0?
if min(y) <= 0.0 or min(z) <= 0.0:
return None
# Evaluate the function.
f = cvxopt.matrix(0., (entries, 1))
for i in range(entries):
f[i] = x[i] - y[i]*math.log(z[i]) + y[i]*math.log(y[i])
# Compute the gradient.
Df = cvxopt.matrix(0., (entries, 3*entries))
for i in range(entries):
Df[i, i] = 1.0
Df[i, entries+i] = math.log(y[i]) - math.log(z[i]) + 1.0
Df[i, 2*entries+i] = -y[i]/z[i]
if scaling is None:
return f, Df
# Compute the Hessian.
big_H = cvxopt.spmatrix(0, [], [], size=(3*entries, 3*entries))
for i in range(entries):
H = cvxopt.matrix([
[0.0, 0.0, 0.0],
[0.0, 1.0/y[i], -1.0/z[i]],
[0.0, -1.0/z[i], y[i]/(z[i]**2)],
])
big_H[i:3*entries:entries, i:3*entries:entries] = scaling[i]*H
return f, Df, big_H
| 32.34632 | 81 | 0.560225 |
7958cbafa0726a863a840b7dbb240d776ec36e20 | 2,873 | py | Python | alghoritms.py | juliaaz/ADS_lab1 | 0029a1b9b8c9c5877c6bd9cb2642aca157ad6c17 | [
"MIT"
] | null | null | null | alghoritms.py | juliaaz/ADS_lab1 | 0029a1b9b8c9c5877c6bd9cb2642aca157ad6c17 | [
"MIT"
] | null | null | null | alghoritms.py | juliaaz/ADS_lab1 | 0029a1b9b8c9c5877c6bd9cb2642aca157ad6c17 | [
"MIT"
] | null | null | null | """
Module which implements different sorts of an array of n elements.
"""
import time
def selection_sort(lst):
'''
lst -> (lst, int)
Performs sorting by selection method.
Returns sorted array and number of comparisons made.
:param lst: list to be sorted.
:return: sorted lst and number of comparisons.
'''
sel_counter = 0
length = len(lst)
for i in range(length):
minn_i = i
for key in range(i+1, length):
sel_counter += 1
if lst[minn_i] > lst[key]:
minn_i = key
lst[i], lst[minn_i] = lst[minn_i], lst[i]
return lst, sel_counter
def insertion_sort(lst):
'''
lst -> (lst, int)
Performs sorting by insertion method.
Returns sorted array and number of comparisons made.
:param lst: list to be sorted.
:return: sorted lst and number of comparisons.
'''
ins_counter = 0
for i in range(1, len(lst)):
key = lst[i]
flag = i - 1
ins_counter += 1
while flag > -1 and lst[flag] > key:
ins_counter += 1
lst[flag + 1] = lst[flag]
flag -= 1
lst[flag + 1] = key
return lst, ins_counter
def shell_sort(lst):
'''
lst -> (lst, int)
Performs sorting by shell method.
Returns sorted array and number of comparisons made.
:param lst: list to be sorted.
:return: sorted lst and number of comparisons.
'''
she_counter = 0
length = len(lst)
gap = length // 2
while gap:
for i in range(gap, length):
element = lst[i]
flag = i
she_counter += 1
while flag > gap-1 and lst[flag - gap] > element:
she_counter += 1
lst[flag] = lst[flag - gap]
flag -= gap
lst[flag] = element
gap //= 2
return lst, she_counter
def merge_sort(lst):
'''
lst -> (lst, int)
Performs sorting by merge method.
Returns sorted array and number of comparisons made.
:param lst: list to be sorted.
:return: sorted lst and number of comparisons.
'''
mer_counter = 0
length = len(lst)
if length <= 1:
return lst, mer_counter
middle = length//2
left_part, right_part = lst[:middle], lst[middle:]
res_left = merge_sort(left_part)
left_part = res_left[0]
res_right = merge_sort(right_part)
right_part = res_right[0]
mer_counter += res_left[1] + res_right[1]
left_part.append(float('inf'))
right_part.append(float('inf'))
idx_left, idx_right = 0, 0
for i in range(length):
if right_part[idx_right] < left_part[idx_left]:
lst[i] = right_part[idx_right]
idx_right += 1
else:
lst[i] = left_part[idx_left]
idx_left += 1
mer_counter += 1
return lst, mer_counter
| 26.601852 | 66 | 0.571876 |
7958cbe36129753e4b96eaa6f9a7c106b0dd909f | 71 | py | Python | Exercicios Python/ex047.py | ClaudioSiqueira/Exercicios-Python | 128387769b34b7d42aee5c1effda16de21216e10 | [
"MIT"
] | null | null | null | Exercicios Python/ex047.py | ClaudioSiqueira/Exercicios-Python | 128387769b34b7d42aee5c1effda16de21216e10 | [
"MIT"
] | null | null | null | Exercicios Python/ex047.py | ClaudioSiqueira/Exercicios-Python | 128387769b34b7d42aee5c1effda16de21216e10 | [
"MIT"
] | null | null | null | from time import sleep
for c in range(0, 51, 2):
print(c, end=' ')
| 17.75 | 25 | 0.605634 |
7958cbf1817bf7ac193f0098a883dc351e3eea6e | 17,715 | py | Python | a_star_n_puzzle.py | eugenechantk/cs152-harnessing-ai-algorithm | 5b8ff485c49e6c91699b2d72c2d6645e7aa6367c | [
"MIT"
] | null | null | null | a_star_n_puzzle.py | eugenechantk/cs152-harnessing-ai-algorithm | 5b8ff485c49e6c91699b2d72c2d6645e7aa6367c | [
"MIT"
] | null | null | null | a_star_n_puzzle.py | eugenechantk/cs152-harnessing-ai-algorithm | 5b8ff485c49e6c91699b2d72c2d6645e7aa6367c | [
"MIT"
] | 1 | 2018-10-25T15:53:43.000Z | 2018-10-25T15:53:43.000Z | import heapq
import numpy as np
import time
def flatten(board):
# if it's nested lists, flatten them. I do this with list comprehension taking each tile at a time from each sublist
if type(board[1])==list:
board = [item for sublist in board for item in sublist]
# else, it should be a list of ints or floats
elif type(board[1])==int or type(board[1])==float:
board = board
# if it's neither, it's a wrong input and will raise an error.
else:
raise ValueError("Class 'PuzzleNode' got values that are not a sublist of ints nor a flat list of ints.")
return board
"""
Class: PuzzleNode
Purpose: Object for each puzzle board created during search
Arg: None
Class Functions
__hash__(): return a hash value for the puzzle board to id the puzzle
__str__(): return a matrix representation of the puzzle board in string format
__eq__(others): return True if another PuzzleNode object is identical
get_moves(): return PuzzleNodes object that are the possible moves
of a puzzle
list_of_list(): transform the 1d array representation of the puzzle
into a multi-d array representation
verify_input: verify whether the puzzle is n*n, and has all the numbers
in the board
"""
class PuzzleNode():
def __init__(self, n, values, cost, parent, heuristic):
#parent of the candidate puzzle
self.parent = parent
#dimension of the puzzle
self.n = n
#value of the initial puzzle pieces, stored as a 1d array
self.tiles = flatten(values)
self.tiles=values
# To reconstruct the optimal solution, we need to store all the steps
# To easily access each of the puzzles we have gone through, we store the hash value associated with each puzzle
self.puzzleid = hash(tuple(self.tiles))
#Hash the puzzle to have the puzzleid, or just return the puzzleid if there is one
def __hash__(self):
if self.puzzleid is None:
self.puzzleid = hash(tuple(self.tiles))
return self.puzzleid
#Print out a grid of the board state
def __str__(self):
#To print a grid, it is easier to convert the 1d board array to text first
strings_list = [str(x) for x in self.tiles]
#Create n rows and n columns
rows = [" ".join(strings_list[i:i + self.n]) for i in xrange(0, self.n**2, self.n)]
#Break the rows into different lines
return "\n".join(rows)
# For checking if 2 candidate puzzles are equal
def __eq__(self, other):
return self.tiles == other.tiles
#For getting possible moves from the current state
def get_moves(self):
#get the index of where the 0 is
zeroPos = self.tiles.index(0)
n = self.n
candidates = []
#Swap appropriate tiles with the 0 tile
def swap(zeroPos,move,n):
temp = list(self.tiles)
swapPos = zeroPos + move
#Evaluating edge cases
if zeroPos%n == 0 and move == -1:
return
elif zeroPos%n == n-1 and move == 1:
return
elif zeroPos/n == 0 and move == -n:
return
elif zeroPos/n == n-1 and move == n:
return
else: #Swap tiles and create new PuzzleNode object to store the new board
temp[zeroPos],temp[swapPos] = temp[swapPos],temp[zeroPos]
result = PuzzleNode(self.n,temp,0,self.puzzleid,None)
return result
#Generate at most 4 candidate boards from the current board
if swap(zeroPos,1,n) is not None:
#print "+1 is added"
yield swap(zeroPos,1,n)
if swap(zeroPos,-1,n) is not None:
#print "-1 is added"
yield swap(zeroPos,-1,n)
if swap(zeroPos,n,n) is not None:
#print "+n is added"
yield swap(zeroPos,n,n)
if swap(zeroPos,-n,n) is not None:
#print "-n is added"
yield swap(zeroPos,-n,n)
#transform the tiles again from 1d array to list of lists
def list_of_list(self):
return [self.tiles[i:i+self.n] for i in xrange(0, self.n**2, self.n)]
#verify the validity of the initial board
def verify_input(self):
err = 0
reason = "Input was valid"
initial_state = self.tiles
n = self.n
#Check the dimension of the puzzle
if n<2 or n>=128:
err = -1
reason = "Puzzle size not valid"
#Check if the puzzle has the size of n^2
if len(initial_state) != n*n:
err = -1
reason = "Puzzle size is not n^2"
sorted_list = sorted(initial_state)
verified_list = range(n**2)
#Compare the puzzle list with all numbers from 0 to n^2-1
if sorted_list != verified_list:
err = -1
reason = "Puzzle does not contain all numbers from 0 to n^2-1"
#break the program when there is an error
if err == -1:
raise ValueError(reason)
return err, reason, initial_state
"""
Function isSolvable
Purpose: Determine whether a given board is solvable based on
inversion rule
Arg:
board: (list) a list_of_list representation of the board configuration
Return:
err: (int) -2 if the board is unsolvable; 0 if the board is solvable
reason: (str) the reason for the error code
"""
def isSolvable(board):
inversions = 0
n = int(len(board)**0.5)
zeroPos = board.index(0)
for i in xrange(len(board)):
for j in xrange(i+1,len(board)):
if board[i] > board[j] and board[j] != 0:
inversions += 1
if n%2 == 0: #grid width is even
if (zeroPos/n)%2 == 0: #0 tile on even row counting from bottom
if inversions%2 == 1: #inversions is odd is solvable
err = 0
reason = "The puzzle is solvable"
else:
err = -2
reason = "The puzzle's width is even, 0 tile on even row counting from bottom, inversions is even. Puzzle unsolvable"
else: #0 tile on odd row counting from bottom
if inversions%2 == 0: #inversions is even is solvable
err = 0
reason = "The puzzle is solvable"
else:
err = -2
reason = "The puzzle's width is even, 0 tile on odd row counting from bottom, inversions is odd. Puzzle unsolvable"
else: #grid width is odd
if inversions%2 == 0:
err = 0
reason = "The puzzle is solvable"
else:
err = -2
reason = "The puzzle's width is odd, and the inversions is odd. Puzzle unsolvable"
if err == -2:
raise ValueError(reason)
return err, reason
"""
Function: solvePuzzle
Purpose: Using A* search with heuristic to solve a n^n puzzle
Arg:
n: (int) dimension of the puzzle
state: (list) the initial puzzle board
heuristic: (function) the heuristic function used in the A* search
prnt: (boolean) whether or not to print the full solution
Return:
steps: (int) number of search steps before solving the puzzle
frontierSize: (int) largest frontier size during search
err: (int) 0 means no error; 1 means the puzzle is invalid
run_time: (time) the time needed to solve the puzzle
"""
def solvePuzzle (n, state, heuristic, prnt=False):
start_time = time.time()
run_time = 0.0
queue = [] #priority queue to determine the least costly node to search
total_cost = {} #total cost of the shortest path
heuristic_cost = {} #cache of previous heuristic cost of boards
visited = {} #the puzzle boards expanded and searched
steps_to_sol = [] #detailed steps towards solution
frontierSize = 0 #largest frontier size of the search tree
steps = -1 #number of steps needed to solve
tiles = flatten(state) #1d representation of the puzzle
#Defining current state and goal state
start = PuzzleNode(n, tiles, 0, None, heuristic)
goal = PuzzleNode(n, range(n**2),100,None,heuristic)
#verify whether the intial puzzle board is valid
err, reason, initial_state = start.verify_input()
#using isSolvable() to check whether the initial puzzle is solvable
if err == 0:
err, reason = isSolvable(start.tiles)
unsolved = True
#Initializing heap and total cost
heapq.heappush(queue,(0,start))
total_cost[start] = 0
if prnt:
if heuristic == manhattanDistance:
print "Solving using Manhattan Distance...\n"
elif heuristic == misplacedTiles:
print "Solving using Misplaced Tiles...\n"
print "Start solving puzzle from:\n"
print "{}\n".format(start.__str__())
#traverse through all the candidates until there is none
while unsolved:
steps += 1
#Select the least costly node to expand using priority queue
cost, current = heapq.heappop(queue)
current_cost = total_cost[current]
#Put the searched puzzle board into the visited store
visited[current] = current
#When the current board matches with the goal board
if current.tiles == goal.tiles:
unsolved = False
if prnt:
print "Puzzle Solved!\n"
print "Initial Puzzle Board:\n"
print "{}\n".format(start.__str__())
print "Final Puzzle Board:\n"
print "{}\n".format(current.__str__())
run_time = time.time()-start_time
break
if prnt:
print "Currently inspecting...\n"
print "{}\n".format(current.__str__())
#Evaluate every candidate move
candidates = current.get_moves()
for move in candidates:
"""For debugging
print "Inspecting Candidates...\n"
print "Evaluating this candidate:\n{}".format(move)
print "Steps now: {}\n".format(steps)
"""
#Prevent searching an already searched puzzle board
if move not in total_cost or total_cost[move] > current_cost + 1:
total_cost[move] = current_cost + 1
#Add the unaccounted heuristic cost into the cache
if move not in heuristic_cost:
#update the total cost of the move
total_cost[move] += heuristic(move.list_of_list())
#Push the path back to the priority queue
heapq.heappush(queue,(total_cost[move],move))
#Update the frontierSize
frontierSize = max(frontierSize,len(queue))
if prnt:
#printing the number of steps and frontier size of the solution
print "Number of steps:", steps
print "Frontier size:", frontierSize
print "Error: {} ({})".format(err,reason)
print "Runtime: {}\n".format(run_time)
#printing all the steps leading to the solution
print "Steps to solve the puzzle (in reverse order)..."
for searched_steps in visited:
print "{};".format(visited[searched_steps].list_of_list())
return steps, frontierSize, err, run_time
"""
Function: manhattanDistance
Purpose: One of the heuristics to solve the N-puzzle problem.
Calculate the manhattan distance of any given board
(the number of moves needed to transform any given board
to a complete board)
Arg:
board: (list) a list-of-lists representation of the puzzle board
n: (int) the dimension of the board
Return:
manDis: (int) the total manhattan distance of a given puzzle board
**Auxiliary function: manhattanDistance_per_tile(tiles,i,n)
Purpose: calculate the manhattan distance of a given tile in the board
Arg:
tiles: (int) the numeric value of the tile
i: (int) the position of the tile (array index of the board array)
n: (int) dimension of the given board
Return:
steps: (int) manhattan distance of the given tile in the given puzzle
"""
def manhattanDistance(board):
#Convert the board back to 1d array for easy manipulation
n = len(board[0])
tiles = flatten(board)
manDis = 0
#Sum the manhattanDistance of all the tiles
for i in tiles:
manDis += manhattanDistance_per_tile(tiles[i],i,n)
return manDis
def manhattanDistance_per_tile(tiles,i,n):
goalPos = tiles
currentPos = i
steps = 0
#Perform when the tile is not at its place
while currentPos != goalPos:
#Shift levels when the current position of the tile is not at the same level
if currentPos/n != goalPos/n:
if currentPos > goalPos:
currentPos -= n
else:
currentPos += n
steps += 1
#Move left or right depending on where the tile needs to go
else:
if currentPos > goalPos:
currentPos -= 1
else:
currentPos += 1
steps += 1
return steps
"""
Function: misplaceTiles
Purpose: One of the heuristics for the N-puzzle problem.
Calculate the number of misplaced tiles in a given puzzle board
Arg:
board: (list) a list-of-lists representation of a given puzzle board
Return:
misplace: (int) number of misplaced tiles in the given puzzle board
"""
def misplacedTiles(board):
tiles = flatten(board)
misplace = 0
for i in tiles:
if tiles[i] != i:
misplace += 1
return misplace
"""
Function: linearConflict
Purpose: Calculate the Manhattan Distance the board, accounting for steps
needed to bypass linear conflicts
Arg:
board: (list) the board given to calculate Manhattan Distance
Return:
total_moves: (int) The Manhattan Distance, accounting for steps
needed to bypass linear conflicts
Auxiliary Function: linearConflict_per_tile(tiles,i,n)
Arg: tiles - the puzzle board; i: the position of the tile currently
examining; n: dimension of the board
Return: Manhattan Distance of the individual tile (accounting for
linear conflict)
"""
def linearConflict(board):
n = len(board[0])
tiles = flatten(board)
total_moves = 0
#Sum the manhattanDistance of all the tiles
for i in xrange(len(tiles)):
total_moves += linearConflict_per_tile(tiles,i,n)
return total_moves
def linearConflict_per_tile(tiles,i,n):
goalPos = tiles[i]
currentPos = i
steps = 0
#Perform when the tile is not at its place
while currentPos != goalPos and tiles!=0:
#Shift levels when the current position of the tile is not at the same level
if currentPos/n != goalPos/n:
if currentPos > goalPos:
currentPos -= n
else:
currentPos += n
steps += 1
#Move left or right depending on where the tile needs to go
else:
if currentPos > goalPos:
currentPos -= 1
else:
currentPos += 1
steps += 1
#inspect conflict
currentRow = currentPos/n
nextRow = (currentPos+1)/n
if currentRow == nextRow and i%n != n-1:
currentGoalRow = goalPos/n
nextGoalRow = tiles[i+1]/n
if currentGoalRow == nextGoalRow and tiles[currentPos] > tiles[currentPos+1]:
steps += 2
return steps
"""
Function: nMaxSwap
Purpose: Calculate the number of direct swaps with 0 tile to solve
the n-puzzle
Arg:
board: (list) a list-of-list representation of the board
Return:
swaps: (int) number of direct swaps needed to solve the given puzzle
"""
def nMaxSwap(board):
n = len(board[0])
tiles = flatten(board)
swaps = 0
solved = [0 for i in range(len(tiles))]
while tiles != range(n**2):
zeroPos = tiles.index(0)
if zeroPos != 0:
swapPos = tiles.index(zeroPos)
tiles[zeroPos],tiles[swapPos] = tiles[swapPos],tiles[zeroPos]
solved[zeroPos] = 1
swaps += 1
else:
count = 1
while solved[count] == 1 or tiles[count] == count:
count += 1
continue
swapPos = count
tiles[zeroPos],tiles[swapPos] = tiles[swapPos],tiles[zeroPos]
swaps += 1
return swaps
#heuristics as a list of functions
heuristics = [manhattanDistance,misplacedTiles,nMaxSwap,linearConflict]
#Used to compare different heuristics
def test_heuristic(pset):
for boards in pset:
print "Solving this board: {}\n".format(boards)
stepsMan, frontierSizeMan, errMan, runtimeMan = solvePuzzle(len(boards[0]),boards,heuristics[0])
stepsMis, frontierSizeMis, errMis, runtimeMis = solvePuzzle(len(boards[0]),boards,heuristics[1])
print "\t Manhattan Distance vs Misplaced Tiles "
print "Steps: \t {} \t {}".format(stepsMan,stepsMis)
print "Frontier size: \t {} \t {}".format(frontierSizeMan,frontierSizeMis)
print "Runtime (sec): \t {0:.3f} \t {1:.3f}".format(runtimeMan,runtimeMis)
print "\n=======================================================\n"
test1 = [[5,7,6],[2,4,3],[8,1,0]]
test2 = [[7,0,8],[4,6,1],[5,3,2]]
test3 = [[2,3,7],[1,8,0],[6,5,4]]
unsolvable_test1 = [[2,1,0],[3,4,5],[6,7,8]]
pset = [test1,unsolvable_test1]
test_heuristic(pset) | 35.43 | 135 | 0.606266 |
7958cc9cda9df94ef07f1e1d1ba36279762d1f67 | 12,387 | py | Python | eureka/S5_lightcurve_fitting/plots_s5.py | astrojake/Eureka | 57cb0f1a576dfbbef9ba616b7a13a28c42efb876 | [
"MIT"
] | null | null | null | eureka/S5_lightcurve_fitting/plots_s5.py | astrojake/Eureka | 57cb0f1a576dfbbef9ba616b7a13a28c42efb876 | [
"MIT"
] | null | null | null | eureka/S5_lightcurve_fitting/plots_s5.py | astrojake/Eureka | 57cb0f1a576dfbbef9ba616b7a13a28c42efb876 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import corner
from scipy import stats
from copy import deepcopy
from .likelihood import computeRMS
from .utils import COLORS
def plot_fit(lc, model, meta, fitter, isTitle=True):
"""Plot the fitted model over the data. (Fig 5100)
Parameters
----------
lc: eureka.S5_lightcurve_fitting.lightcurve.LightCurve
The lightcurve data object
model: eureka.S5_lightcurve_fitting.models.CompositeModel
The fitted composite model
meta: MetaClass
The metadata object
fitter: str
The name of the fitter (for plot filename)
Returns
-------
None
Notes
-----
History:
- December 29, 2021 Taylor Bell
Moved plotting code to a separate function.
- January 7-22, 2022 Megan Mansfield
Adding ability to do a single shared fit across all channels
- February 28-March 1, 2022 Caroline Piaulet
Adding scatter_ppm parameter
"""
if type(fitter)!=str:
raise ValueError('Expected type str for fitter, instead received a {}'.format(type(fitter)))
model_sys_full = model.syseval()
model_phys_full, new_time = model.physeval(interp=meta.interp)
model_lc = model.eval()
for i, channel in enumerate(lc.fitted_channels):
flux = np.ma.MaskedArray.copy(lc.flux)
if "unc_fit" in lc.__dict__.keys():
unc = deepcopy(np.array(lc.unc_fit))
else:
unc = np.ma.MaskedArray.copy(lc.unc)
model = np.copy(model_lc)
model_sys = model_sys_full
model_phys = model_phys_full
color = lc.colors[i]
if lc.share:
flux = flux[channel*len(lc.time):(channel+1)*len(lc.time)]
unc = unc[channel*len(lc.time):(channel+1)*len(lc.time)]
model = model[channel*len(lc.time):(channel+1)*len(lc.time)]
model_sys = model_sys[channel*len(lc.time):(channel+1)*len(lc.time)]
model_phys = model_phys[channel*len(new_time):(channel+1)*len(new_time)]
residuals = flux - model
fig = plt.figure(int('51{}'.format(str(0).zfill(len(str(lc.nchannel))))), figsize=(8, 6))
plt.clf()
ax = fig.subplots(3,1)
ax[0].errorbar(lc.time, flux, yerr=unc, fmt='.', color='w', ecolor=color, mec=color)
ax[0].plot(lc.time, model, '.', ls='', ms=2, color='0.3', zorder = 10)
if isTitle:
ax[0].set_title(f'{meta.eventlabel} - Channel {channel} - {fitter}')
ax[0].set_ylabel('Normalized Flux', size=14)
ax[1].errorbar(lc.time, flux/model_sys, yerr=unc, fmt='.', color='w', ecolor=color, mec=color)
ax[1].plot(new_time, model_phys, color='0.3', zorder = 10)
ax[1].set_ylabel('Calibrated Flux', size=14)
ax[2].errorbar(lc.time, residuals*1e6, yerr=unc*1e6, fmt='.', color='w', ecolor=color, mec=color)
ax[2].plot(lc.time, np.zeros_like(lc.time), color='0.3', zorder=10)
ax[2].set_ylabel('Residuals (ppm)', size=14)
ax[2].set_xlabel(str(lc.time_units), size=14)
fname = 'figs/fig51{}_lc_{}.png'.format(str(channel).zfill(len(str(lc.nchannel))), fitter)
fig.savefig(meta.outputdir+fname, bbox_inches='tight', dpi=300)
if meta.hide_plots:
plt.close()
else:
plt.pause(0.2)
return
def plot_rms(lc, model, meta, fitter):
"""Plot an Allan plot to look for red noise. (Fig 5200)
Parameters
----------
lc: eureka.S5_lightcurve_fitting.lightcurve.LightCurve
The lightcurve data object
model: eureka.S5_lightcurve_fitting.models.CompositeModel
The fitted composite model
meta: MetaClass
The metadata object
fitter: str
The name of the fitter (for plot filename)
Returns
-------
None
Notes
-----
History:
- December 29, 2021 Taylor Bell
Moved plotting code to a separate function.
- January 7-22, 2022 Megan Mansfield
Adding ability to do a single shared fit across all channels
"""
if type(fitter)!=str:
raise ValueError('Expected type str for fitter, instead received a {}'.format(type(fitter)))
time = lc.time
model_lc = model.eval()
for channel in lc.fitted_channels:
flux = np.copy(lc.flux)
model = np.copy(model_lc)
if lc.share:
flux = flux[channel*len(lc.time):(channel+1)*len(lc.time)]
model = model[channel*len(lc.time):(channel+1)*len(lc.time)]
residuals = flux - model
residuals = residuals[np.argsort(time)]
rms, stderr, binsz = computeRMS(residuals, binstep=1)
normfactor = 1e-6
plt.rcParams.update({'legend.fontsize': 11}) # FINDME: this should not be done here but where the rcparams are defined for Eureka
plt.figure(int('52{}'.format(str(0).zfill(len(str(lc.nchannel))))), figsize=(8, 6))
plt.clf()
plt.suptitle(' Correlated Noise', size=16)
plt.loglog(binsz, rms / normfactor, color='black', lw=1.5, label='Fit RMS', zorder=3) # our noise
plt.loglog(binsz, stderr / normfactor, color='red', ls='-', lw=2, label='Std. Err.', zorder=1) # expected noise
plt.xlim(0.95, binsz[-1] * 2)
plt.ylim(stderr[-1] / normfactor / 2., stderr[0] / normfactor * 2.)
plt.xlabel("Bin Size", fontsize=14)
plt.ylabel("RMS (ppm)", fontsize=14)
plt.xticks(size=12)
plt.yticks(size=12)
plt.legend()
fname = 'figs/fig52{}_'.format(str(channel).zfill(len(str(lc.nchannel))))+'allanplot_'+fitter+'.png'
plt.savefig(meta.outputdir+fname, bbox_inches='tight', dpi=300)
if meta.hide_plots:
plt.close()
else:
plt.pause(0.2)
return
def plot_corner(samples, lc, meta, freenames, fitter):
"""Plot a corner plot. (Fig 5300)
Parameters
----------
samples: ndarray
The samples produced by the sampling algorithm
lc: eureka.S5_lightcurve_fitting.lightcurve.LightCurve
The lightcurve data object
freenames: iterable
The names of the fitted parameters
meta: MetaClass
The metadata object
fitter: str
The name of the fitter (for plot filename)
Returns
-------
None
Notes
-----
History:
- December 29, 2021 Taylor Bell
Moved plotting code to a separate function.
"""
fig = plt.figure(int('53{}'.format(str(0).zfill(len(str(lc.nchannel))))), figsize=(8, 6))
fig = corner.corner(samples, fig=fig, show_titles=True,quantiles=[0.16, 0.5, 0.84],title_fmt='.4', labels=freenames)
fname = 'figs/fig53{}_corner_{}.png'.format(str(lc.channel).zfill(len(str(lc.nchannel))), fitter)
fig.savefig(meta.outputdir+fname, bbox_inches='tight', pad_inches=0.05, dpi=250)
if meta.hide_plots:
plt.close()
else:
plt.pause(0.2)
return
def plot_chain(samples, lc, meta, freenames, fitter='emcee', burnin=False, nburn=0, nrows=3, ncols=4, nthin=1):
"""Plot the evolution of the chain to look for temporal trends (Fig 5400)
Parameters
----------
samples: ndarray
The samples produced by the sampling algorithm
lc: eureka.S5_lightcurve_fitting.lightcurve.LightCurve
The lightcurve data object
freenames: iterable
The names of the fitted parameters
meta: MetaClass
The metadata object
fitter: str
The name of the fitter (for plot filename)
burnin: bool
Whether or not the samples include the burnin phase
nburn: int
The number of burn-in steps that are discarded later
nrows: int
The number of rows to make per figure
ncols: int
The number of columns to make per figure
nthin: int
If >1, the plot will use every nthin point to help speed up computation and reduce clutter on the plot.
Returns
-------
None
Notes
-----
History:
- December 29, 2021 Taylor Bell
Moved plotting code to a separate function.
"""
nsubplots = nrows*ncols
nplots = int(np.ceil(len(freenames)/nsubplots))
k = 0
for plot_number in range(nplots):
fig, axes = plt.subplots(nrows, ncols, num=int('54{}'.format(str(0).zfill(len(str(lc.nchannel))))), sharex=True, figsize=(6*ncols, 4*nrows))
for j in range(ncols):
for i in range(nrows):
if k >= samples.shape[2]:
axes[i][j].set_axis_off()
continue
vals = samples[::nthin, :, k]
xvals = np.arange(samples.shape[0])[::nthin]
n3sig, n2sig, n1sig, med, p1sig, p2sig, p3sig = np.percentile(vals, [0.15,2.5,16,50,84,97.5,99.85], axis=1)
axes[i][j].fill_between(xvals, n3sig, p3sig, alpha=0.2, label=r'3$\sigma$')
axes[i][j].fill_between(xvals, n2sig, p2sig, alpha=0.2, label=r'2$\sigma$')
axes[i][j].fill_between(xvals, n1sig, p1sig, alpha=0.2, label=r'1$\sigma$')
axes[i][j].plot(xvals, med, label='Median')
axes[i][j].set_ylabel(freenames[k])
axes[i][j].set_xlim(0, samples.shape[0]-1)
for arr in [n3sig, n2sig, n1sig, med, p1sig, p2sig, p3sig]:
# Add some horizontal lines to make movement in walker population more obvious
axes[i][j].axhline(arr[0], ls='dotted', c='k', lw=1)
if burnin and nburn>0:
axes[i][j].axvline(nburn, ls='--', c='k', label='End of Burn-In')
if (j==(ncols-1) and i==(nrows//2)) or (k == samples.shape[2]-1):
axes[i][j].legend(loc=6, bbox_to_anchor=(1.01,0.5))
k += 1
fig.tight_layout(h_pad=0.0)
fname = 'figs/fig54{}'.format(str(lc.channel).zfill(len(str(lc.nchannel))))
if burnin:
fname += '_burninchain'
else:
fname += '_chain'
fname += '_{}'.format(fitter)
if nplots>1:
fname += '_plot{}of{}'.format(plot_number+1,nplots)
fname += '.png'
fig.savefig(meta.outputdir+fname, bbox_inches='tight', pad_inches=0.05, dpi=250)
if meta.hide_plots:
plt.close()
else:
plt.pause(0.2)
return
def plot_res_distr(lc, model, meta, fitter):
"""Plot the normalized distribution of residuals + a Gaussian. (Fig 5500)
Parameters
----------
lc: eureka.S5_lightcurve_fitting.lightcurve.LightCurve
The lightcurve data object
model: eureka.S5_lightcurve_fitting.models.CompositeModel
The fitted composite model
meta: MetaClass
The metadata object
fitter: str
The name of the fitter (for plot filename)
Returns
-------
None
Notes
-----
History:
- February 18, 2022 Caroline Piaulet
Created function
"""
if type(fitter)!=str:
raise ValueError('Expected type str for fitter, instead received a {}'.format(type(fitter)))
time = lc.time
model_lc = model.eval()
plt.figure(int('55{}'.format(str(0).zfill(len(str(lc.nchannel))))), figsize=(8, 6))
for channel in lc.fitted_channels:
flux = np.ma.MaskedArray.copy(lc.flux)
if "unc_fit" in lc.__dict__.keys():
unc = np.copy(np.array(lc.unc_fit))
else:
unc = np.ma.MaskedArray.copy(lc.unc)
model = np.copy(model_lc)
if lc.share:
flux = flux[channel*len(lc.time):(channel+1)*len(lc.time)]
unc = unc[channel*len(lc.time):(channel+1)*len(lc.time)]
model = model[channel*len(lc.time):(channel+1)*len(lc.time)]
residuals = flux - model
hist_vals = residuals/unc
hist_vals[~np.isfinite(hist_vals)] = np.nan # Mask out any infinities
n, bins, patches = plt.hist(hist_vals,alpha=0.5,color='b',edgecolor='b',lw=1)
x=np.linspace(-4.,4.,200)
px=stats.norm.pdf(x,loc=0,scale=1)
plt.plot(x,px*(bins[1]-bins[0])*len(residuals),'k-',lw=2)
plt.xlabel("Residuals/scatter", fontsize=14)
fname = 'figs/fig55{}_'.format(str(channel).zfill(len(str(lc.nchannel))))+'res_distri_'+fitter+'.png'
plt.savefig(meta.outputdir+fname, bbox_inches='tight', dpi=300)
if meta.hide_plots:
plt.close()
else:
plt.pause(0.2)
return
| 35.800578 | 148 | 0.602325 |
7958ccefc2462f91a6d124ffb52d09065c97b19e | 6,342 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/advpls_5dde8265ff1ecdeb96de20b298f80538.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/advpls_5dde8265ff1ecdeb96de20b298f80538.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/advpls_5dde8265ff1ecdeb96de20b298f80538.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class AdVpls(Base):
"""Helps to configure the attributes for the AD VPLS ranges.
The AdVpls class encapsulates a list of adVpls resources that are managed by the system.
A list of resources can be retrieved from the server using the AdVpls.find() method.
"""
__slots__ = ()
_SDM_NAME = 'adVpls'
_SDM_ATT_MAP = {
'NeighborAddress': 'neighborAddress',
'NextHopAddress': 'nextHopAddress',
'RemotePeAddress': 'remotePeAddress',
'RemoteVplsId': 'remoteVplsId',
'RemoteVsiId': 'remoteVsiId',
'RouteDistinguisher': 'routeDistinguisher',
'RouteTarget': 'routeTarget',
'SupportedLocally': 'supportedLocally',
}
def __init__(self, parent):
super(AdVpls, self).__init__(parent)
@property
def NeighborAddress(self):
"""
Returns
-------
- str: (Read Only) The descriptive identifier for the BGP neighbor.
"""
return self._get_attribute(self._SDM_ATT_MAP['NeighborAddress'])
@property
def NextHopAddress(self):
"""
Returns
-------
- str: (Read Only) A 4-octet IP address which indicates the next hop.
"""
return self._get_attribute(self._SDM_ATT_MAP['NextHopAddress'])
@property
def RemotePeAddress(self):
"""
Returns
-------
- str: (Read Only) The descriptive identifier for the remote PE.
"""
return self._get_attribute(self._SDM_ATT_MAP['RemotePeAddress'])
@property
def RemoteVplsId(self):
"""
Returns
-------
- str: (Read Only) The remote VPLS ID indicated by an IP or AS.
"""
return self._get_attribute(self._SDM_ATT_MAP['RemoteVplsId'])
@property
def RemoteVsiId(self):
"""
Returns
-------
- number: (Read Only) The remote VSI Id indicated by 4 bytes unsigned number.
"""
return self._get_attribute(self._SDM_ATT_MAP['RemoteVsiId'])
@property
def RouteDistinguisher(self):
"""
Returns
-------
- str: (Read Only) The route distinguisher indicated by the IP or AS number.
"""
return self._get_attribute(self._SDM_ATT_MAP['RouteDistinguisher'])
@property
def RouteTarget(self):
"""
Returns
-------
- str: (Read Only) The route target indicated by the IP or AS number.
"""
return self._get_attribute(self._SDM_ATT_MAP['RouteTarget'])
@property
def SupportedLocally(self):
"""
Returns
-------
- bool: (Read Only) The boolean value indicating whether it is supported locally.
"""
return self._get_attribute(self._SDM_ATT_MAP['SupportedLocally'])
def find(self, NeighborAddress=None, NextHopAddress=None, RemotePeAddress=None, RemoteVplsId=None, RemoteVsiId=None, RouteDistinguisher=None, RouteTarget=None, SupportedLocally=None):
"""Finds and retrieves adVpls resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve adVpls resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all adVpls resources from the server.
Args
----
- NeighborAddress (str): (Read Only) The descriptive identifier for the BGP neighbor.
- NextHopAddress (str): (Read Only) A 4-octet IP address which indicates the next hop.
- RemotePeAddress (str): (Read Only) The descriptive identifier for the remote PE.
- RemoteVplsId (str): (Read Only) The remote VPLS ID indicated by an IP or AS.
- RemoteVsiId (number): (Read Only) The remote VSI Id indicated by 4 bytes unsigned number.
- RouteDistinguisher (str): (Read Only) The route distinguisher indicated by the IP or AS number.
- RouteTarget (str): (Read Only) The route target indicated by the IP or AS number.
- SupportedLocally (bool): (Read Only) The boolean value indicating whether it is supported locally.
Returns
-------
- self: This instance with matching adVpls resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of adVpls data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the adVpls resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 38.436364 | 187 | 0.660833 |
7958cdbbdf26a7d16d56bba8accd4a23cd3e4a74 | 19,177 | py | Python | integration_tests/test_suites/celery-k8s-integration-test-suite/test_integration.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 4,606 | 2018-06-21T17:45:20.000Z | 2022-03-31T23:39:42.000Z | integration_tests/test_suites/celery-k8s-integration-test-suite/test_integration.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 6,221 | 2018-06-12T04:36:01.000Z | 2022-03-31T21:43:05.000Z | integration_tests/test_suites/celery-k8s-integration-test-suite/test_integration.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 619 | 2018-08-22T22:43:09.000Z | 2022-03-31T22:48:06.000Z | # pylint doesn't know about pytest fixtures
# pylint: disable=unused-argument
import datetime
import os
import time
import boto3
import pytest
from dagster import DagsterEventType
from dagster.core.storage.pipeline_run import PipelineRunStatus
from dagster.core.storage.tags import DOCKER_IMAGE_TAG
from dagster.core.test_utils import create_run_for_test
from dagster.utils import merge_dicts
from dagster.utils.yaml_utils import merge_yamls
from dagster_celery_k8s.launcher import CeleryK8sRunLauncher
from dagster_k8s.test import wait_for_job_and_get_raw_logs
from dagster_k8s_test_infra.helm import TEST_AWS_CONFIGMAP_NAME
from dagster_k8s_test_infra.integration_utils import image_pull_policy
from dagster_test.test_project import (
ReOriginatedExternalPipelineForTest,
get_test_project_environments_path,
get_test_project_workspace_and_external_pipeline,
)
IS_BUILDKITE = os.getenv("BUILDKITE") is not None
def get_celery_engine_config(dagster_docker_image, job_namespace):
return {
"execution": {
"celery-k8s": {
"config": merge_dicts(
(
{
"job_image": dagster_docker_image,
}
if dagster_docker_image
else {}
),
{
"job_namespace": job_namespace,
"image_pull_policy": image_pull_policy(),
"env_config_maps": ["dagster-pipeline-env"]
+ ([TEST_AWS_CONFIGMAP_NAME] if not IS_BUILDKITE else []),
},
)
}
},
}
def test_execute_on_celery_k8s_default( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
pipeline_name = "demo_pipeline_celery"
with get_test_project_workspace_and_external_pipeline(dagster_instance, pipeline_name) as (
workspace,
external_pipeline,
):
reoriginated_pipeline = ReOriginatedExternalPipelineForTest(external_pipeline)
run = create_run_for_test(
dagster_instance,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
external_pipeline_origin=reoriginated_pipeline.get_external_origin(),
pipeline_code_origin=reoriginated_pipeline.get_python_origin(),
)
dagster_instance.launch_run(run.run_id, workspace)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
updated_run = dagster_instance.get_run_by_id(run.run_id)
assert updated_run.tags[DOCKER_IMAGE_TAG] == dagster_docker_image
def test_execute_on_celery_k8s_image_from_origin( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
# Like the previous test, but the image is included in the pipeline origin
# rather than in the executor config
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(dagster_docker_image=None, job_namespace=helm_namespace),
)
pipeline_name = "demo_pipeline_celery"
with get_test_project_workspace_and_external_pipeline(
dagster_instance, pipeline_name, container_image=dagster_docker_image
) as (workspace, external_pipeline):
reoriginated_pipeline = ReOriginatedExternalPipelineForTest(
external_pipeline, container_image=dagster_docker_image
)
run = create_run_for_test(
dagster_instance,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
external_pipeline_origin=reoriginated_pipeline.get_external_origin(),
pipeline_code_origin=reoriginated_pipeline.get_python_origin(),
)
dagster_instance.launch_run(run.run_id, workspace)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
updated_run = dagster_instance.get_run_by_id(run.run_id)
assert updated_run.tags[DOCKER_IMAGE_TAG] == dagster_docker_image
def test_execute_subset_on_celery_k8s( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_subset.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
pipeline_name = "demo_pipeline_celery"
with get_test_project_workspace_and_external_pipeline(dagster_instance, pipeline_name) as (
workspace,
external_pipeline,
):
reoriginated_pipeline = ReOriginatedExternalPipelineForTest(external_pipeline)
run = create_run_for_test(
dagster_instance,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
solids_to_execute={"count_letters"},
external_pipeline_origin=reoriginated_pipeline.get_external_origin(),
pipeline_code_origin=reoriginated_pipeline.get_python_origin(),
)
dagster_instance.launch_run(run.run_id, workspace)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
def test_execute_on_celery_k8s_retry_pipeline( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
run_config = merge_dicts(
merge_yamls([os.path.join(get_test_project_environments_path(), "env_s3.yaml")]),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
pipeline_name = "retry_pipeline"
with get_test_project_workspace_and_external_pipeline(dagster_instance, pipeline_name) as (
workspace,
external_pipeline,
):
reoriginated_pipeline = ReOriginatedExternalPipelineForTest(external_pipeline)
run = create_run_for_test(
dagster_instance,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
external_pipeline_origin=reoriginated_pipeline.get_external_origin(),
pipeline_code_origin=reoriginated_pipeline.get_python_origin(),
)
dagster_instance.launch_run(run.run_id, workspace)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
stats = dagster_instance.get_run_stats(run.run_id)
assert stats.steps_succeeded == 1
assert DagsterEventType.STEP_START in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run.run_id)
if event.is_dagster_event
]
assert DagsterEventType.STEP_UP_FOR_RETRY in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run.run_id)
if event.is_dagster_event
]
assert DagsterEventType.STEP_RESTARTED in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run.run_id)
if event.is_dagster_event
]
assert DagsterEventType.STEP_SUCCESS in [
event.dagster_event.event_type
for event in dagster_instance.all_logs(run.run_id)
if event.is_dagster_event
]
def test_execute_on_celery_k8s_with_resource_requirements( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
pipeline_name = "resources_limit_pipeline"
with get_test_project_workspace_and_external_pipeline(dagster_instance, pipeline_name) as (
workspace,
external_pipeline,
):
reoriginated_pipeline = ReOriginatedExternalPipelineForTest(external_pipeline)
run = create_run_for_test(
dagster_instance,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
external_pipeline_origin=reoriginated_pipeline.get_external_origin(),
pipeline_code_origin=reoriginated_pipeline.get_python_origin(),
)
dagster_instance.launch_run(run.run_id, workspace)
result = wait_for_job_and_get_raw_logs(
job_name="dagster-run-%s" % run.run_id, namespace=helm_namespace
)
assert "PIPELINE_SUCCESS" in result, "no match, result: {}".format(result)
def _test_termination(dagster_instance, run_config):
pipeline_name = "resource_pipeline"
with get_test_project_workspace_and_external_pipeline(dagster_instance, pipeline_name) as (
workspace,
external_pipeline,
):
reoriginated_pipeline = ReOriginatedExternalPipelineForTest(external_pipeline)
run = create_run_for_test(
dagster_instance,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
external_pipeline_origin=reoriginated_pipeline.get_external_origin(),
pipeline_code_origin=reoriginated_pipeline.get_python_origin(),
)
dagster_instance.launch_run(run.run_id, workspace)
assert isinstance(dagster_instance.run_launcher, CeleryK8sRunLauncher)
# Wait for pipeline run to start
timeout = datetime.timedelta(0, 120)
start_time = datetime.datetime.now()
can_terminate = False
while datetime.datetime.now() < start_time + timeout:
if dagster_instance.run_launcher.can_terminate(run_id=run.run_id):
can_terminate = True
break
time.sleep(5)
assert can_terminate
# Wait for step to start
step_start_found = False
start_time = datetime.datetime.now()
while datetime.datetime.now() < start_time + timeout:
event_records = dagster_instance.all_logs(run.run_id)
for event_record in event_records:
if (
event_record.dagster_event
and event_record.dagster_event.event_type == DagsterEventType.STEP_START
):
step_start_found = True
break
if step_start_found:
break
time.sleep(5)
assert step_start_found
# Terminate run
assert dagster_instance.run_launcher.can_terminate(run_id=run.run_id)
assert dagster_instance.run_launcher.terminate(run_id=run.run_id)
# Check that pipeline run is marked as canceled
pipeline_run_status_canceled = False
start_time = datetime.datetime.now()
while datetime.datetime.now() < start_time + timeout:
pipeline_run = dagster_instance.get_run_by_id(run.run_id)
if pipeline_run.status == PipelineRunStatus.CANCELED:
pipeline_run_status_canceled = True
break
time.sleep(5)
assert pipeline_run_status_canceled
# Check that terminate cannot be called again
assert not dagster_instance.run_launcher.can_terminate(run_id=run.run_id)
assert not dagster_instance.run_launcher.terminate(run_id=run.run_id)
# Check for step failure and resource tear down
expected_events_found = False
start_time = datetime.datetime.now()
while datetime.datetime.now() < start_time + timeout:
step_failures_count = 0
resource_tear_down_count = 0
resource_init_count = 0
termination_request_count = 0
termination_success_count = 0
event_records = dagster_instance.all_logs(run.run_id)
for event_record in event_records:
if event_record.dagster_event:
if event_record.dagster_event.event_type == DagsterEventType.STEP_FAILURE:
step_failures_count += 1
elif (
event_record.dagster_event.event_type == DagsterEventType.PIPELINE_CANCELING
):
termination_request_count += 1
elif (
event_record.dagster_event.event_type == DagsterEventType.PIPELINE_CANCELED
):
termination_success_count += 1
elif event_record.message:
if "initializing s3_resource_with_context_manager" in event_record.message:
resource_init_count += 1
if "tearing down s3_resource_with_context_manager" in event_record.message:
resource_tear_down_count += 1
if (
step_failures_count == 1
and resource_init_count == 1
and resource_tear_down_count == 1
and termination_request_count == 1
and termination_success_count == 1
):
expected_events_found = True
break
time.sleep(5)
assert expected_events_found
s3 = boto3.resource(
"s3", region_name="us-west-1", use_ssl=True, endpoint_url=None
).meta.client
bucket = "dagster-scratch-80542c2"
key = "resource_termination_test/{}".format(run.run_id)
assert s3.get_object(Bucket=bucket, Key=key)
def test_execute_on_celery_k8s_with_termination( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, helm_namespace
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image, job_namespace=helm_namespace
),
)
_test_termination(dagster_instance, run_config)
@pytest.fixture(scope="function")
def set_dagster_k8s_pipeline_run_namespace_env(helm_namespace):
try:
old_value = os.getenv("DAGSTER_K8S_PIPELINE_RUN_NAMESPACE")
os.environ["DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"] = helm_namespace
yield
finally:
if old_value is not None:
os.environ["DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"] = old_value
def test_execute_on_celery_k8s_with_env_var_and_termination( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, set_dagster_k8s_pipeline_run_namespace_env
):
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image,
job_namespace={"env": "DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"},
),
)
_test_termination(dagster_instance, run_config)
def test_execute_on_celery_k8s_with_hard_failure( # pylint: disable=redefined-outer-name
dagster_docker_image, dagster_instance, set_dagster_k8s_pipeline_run_namespace_env
):
run_config = merge_dicts(
merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
get_celery_engine_config(
dagster_docker_image=dagster_docker_image,
job_namespace={"env": "DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"},
),
),
{"solids": {"hard_fail_or_0": {"config": {"fail": True}}}},
)
pipeline_name = "hard_failer"
with get_test_project_workspace_and_external_pipeline(dagster_instance, pipeline_name) as (
workspace,
external_pipeline,
):
reoriginated_pipeline = ReOriginatedExternalPipelineForTest(external_pipeline)
run = create_run_for_test(
dagster_instance,
pipeline_name=pipeline_name,
run_config=run_config,
mode="default",
external_pipeline_origin=reoriginated_pipeline.get_external_origin(),
pipeline_code_origin=reoriginated_pipeline.get_python_origin(),
)
dagster_instance.launch_run(run.run_id, workspace)
assert isinstance(dagster_instance.run_launcher, CeleryK8sRunLauncher)
# Check that pipeline run is marked as failed
pipeline_run_status_failure = False
start_time = datetime.datetime.now()
timeout = datetime.timedelta(0, 120)
while datetime.datetime.now() < start_time + timeout:
pipeline_run = dagster_instance.get_run_by_id(run.run_id)
if pipeline_run.status == PipelineRunStatus.FAILURE:
pipeline_run_status_failure = True
break
time.sleep(5)
assert pipeline_run_status_failure
# Check for step failure for hard_fail_or_0.compute
start_time = datetime.datetime.now()
step_failure_found = False
while datetime.datetime.now() < start_time + timeout:
event_records = dagster_instance.all_logs(run.run_id)
for event_record in event_records:
if event_record.dagster_event:
if (
event_record.dagster_event.event_type == DagsterEventType.STEP_FAILURE
and event_record.dagster_event.step_key == "hard_fail_or_0"
):
step_failure_found = True
break
time.sleep(5)
assert step_failure_found
| 37.974257 | 100 | 0.655316 |
7958d02a6b13ff5846dece7f43f5a1a4e7749b3b | 10,010 | py | Python | wagtail/contrib/sitemaps/tests.py | Nawarrr/wagtail | 4db71de5a2af19086026605be8fcb92c4be623aa | [
"BSD-3-Clause"
] | null | null | null | wagtail/contrib/sitemaps/tests.py | Nawarrr/wagtail | 4db71de5a2af19086026605be8fcb92c4be623aa | [
"BSD-3-Clause"
] | null | null | null | wagtail/contrib/sitemaps/tests.py | Nawarrr/wagtail | 4db71de5a2af19086026605be8fcb92c4be623aa | [
"BSD-3-Clause"
] | null | null | null | import datetime
import pytz
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.shortcuts import get_current_site
from django.test import RequestFactory, TestCase, override_settings
from django.utils import timezone
from wagtail.core.models import Page, PageViewRestriction, Site
from wagtail.tests.testapp.models import EventIndex, SimplePage
from .sitemap_generator import Sitemap
class TestSitemapGenerator(TestCase):
def setUp(self):
self.home_page = Page.objects.get(id=2)
self.child_page = self.home_page.add_child(
instance=SimplePage(
title="Hello world!",
slug="hello-world",
content="hello",
live=True,
last_published_at=datetime.datetime(
2017, 1, 1, 12, 0, 0, tzinfo=pytz.utc
),
latest_revision_created_at=datetime.datetime(
2017, 2, 1, 12, 0, 0, tzinfo=pytz.utc
),
)
)
self.unpublished_child_page = self.home_page.add_child(
instance=SimplePage(
title="Unpublished",
slug="unpublished",
content="hello",
live=False,
)
)
self.protected_child_page = self.home_page.add_child(
instance=SimplePage(
title="Protected",
slug="protected",
content="hello",
live=True,
)
)
PageViewRestriction.objects.create(
page=self.protected_child_page, password="hello"
)
self.page_with_no_last_publish_date = self.home_page.add_child(
instance=SimplePage(
title="I have no last publish date :-(",
slug="no-last-publish-date",
content="hello",
live=True,
latest_revision_created_at=datetime.datetime(
2017, 2, 1, 12, 0, 0, tzinfo=pytz.utc
),
)
)
self.site = Site.objects.get(is_default_site=True)
root_page = Page.objects.get(depth=1)
self.other_site_homepage = root_page.add_child(
instance=SimplePage(
title="Another site", slug="another-site", content="bonjour", live=True
)
)
Site.objects.create(
hostname="other.example.com", port=80, root_page=self.other_site_homepage
)
# Clear the cache to that runs are deterministic regarding the sql count
ContentType.objects.clear_cache()
def get_request_and_django_site(self, url):
request = RequestFactory().get(url)
request.META["HTTP_HOST"] = self.site.hostname
request.META["SERVER_PORT"] = self.site.port
return request, get_current_site(request)
def assertDatesEqual(self, actual, expected):
# Compare dates as naive or timezone-aware according to USE_TZ
if not settings.USE_TZ:
expected = timezone.make_naive(expected)
return self.assertEqual(actual, expected)
def test_items(self):
request, django_site = self.get_request_and_django_site("/sitemap.xml")
sitemap = Sitemap(request)
pages = sitemap.items()
self.assertIn(self.child_page.page_ptr.specific, pages)
self.assertNotIn(self.unpublished_child_page.page_ptr.specific, pages)
self.assertNotIn(self.protected_child_page.page_ptr.specific, pages)
def test_get_urls_without_request(self):
request, django_site = self.get_request_and_django_site("/sitemap.xml")
req_protocol = request.scheme
sitemap = Sitemap()
with self.assertNumQueries(17):
urls = [
url["location"]
for url in sitemap.get_urls(1, django_site, req_protocol)
]
self.assertIn("http://localhost/", urls) # Homepage
self.assertIn("http://localhost/hello-world/", urls) # Child page
def test_get_urls_with_request_site_cache(self):
request, django_site = self.get_request_and_django_site("/sitemap.xml")
req_protocol = request.scheme
sitemap = Sitemap(request)
# pre-seed find_for_request cache, so that it's not counted towards the query count
Site.find_for_request(request)
with self.assertNumQueries(14):
urls = [
url["location"]
for url in sitemap.get_urls(1, django_site, req_protocol)
]
self.assertIn("http://localhost/", urls) # Homepage
self.assertIn("http://localhost/hello-world/", urls) # Child page
@override_settings(WAGTAIL_I18N_ENABLED=True)
def test_get_urls_without_request_with_i18n(self):
request, django_site = self.get_request_and_django_site("/sitemap.xml")
req_protocol = request.scheme
sitemap = Sitemap()
with self.assertNumQueries(19):
urls = [
url["location"]
for url in sitemap.get_urls(1, django_site, req_protocol)
]
self.assertIn("http://localhost/", urls) # Homepage
self.assertIn("http://localhost/hello-world/", urls) # Child page
@override_settings(WAGTAIL_I18N_ENABLED=True)
def test_get_urls_with_request_site_cache_with_i18n(self):
request, django_site = self.get_request_and_django_site("/sitemap.xml")
req_protocol = request.scheme
sitemap = Sitemap(request)
# pre-seed find_for_request cache, so that it's not counted towards the query count
Site.find_for_request(request)
with self.assertNumQueries(16):
urls = [
url["location"]
for url in sitemap.get_urls(1, django_site, req_protocol)
]
self.assertIn("http://localhost/", urls) # Homepage
self.assertIn("http://localhost/hello-world/", urls) # Child page
def test_get_urls_uses_specific(self):
request, django_site = self.get_request_and_django_site("/sitemap.xml")
req_protocol = request.scheme
# Add an event page which has an extra url in the sitemap
self.home_page.add_child(
instance=EventIndex(
title="Events",
slug="events",
live=True,
)
)
sitemap = Sitemap(request)
urls = [
url["location"] for url in sitemap.get_urls(1, django_site, req_protocol)
]
self.assertIn("http://localhost/events/", urls) # Main view
self.assertIn("http://localhost/events/past/", urls) # Sub view
def test_lastmod_uses_last_published_date(self):
request, django_site = self.get_request_and_django_site("/sitemap.xml")
req_protocol = request.scheme
sitemap = Sitemap(request)
urls = sitemap.get_urls(1, django_site, req_protocol)
child_page_lastmod = [
url["lastmod"]
for url in urls
if url["location"] == "http://localhost/hello-world/"
][0]
self.assertDatesEqual(
child_page_lastmod, datetime.datetime(2017, 1, 1, 12, 0, 0, tzinfo=pytz.utc)
)
# if no last_publish_date is defined, use latest revision date
child_page_lastmod = [
url["lastmod"]
for url in urls
if url["location"] == "http://localhost/no-last-publish-date/"
][0]
self.assertDatesEqual(
child_page_lastmod, datetime.datetime(2017, 2, 1, 12, 0, 0, tzinfo=pytz.utc)
)
def test_latest_lastmod(self):
# give the homepage a lastmod
self.home_page.last_published_at = datetime.datetime(
2017, 3, 1, 12, 0, 0, tzinfo=pytz.utc
)
self.home_page.save()
request, django_site = self.get_request_and_django_site("/sitemap.xml")
req_protocol = request.scheme
sitemap = Sitemap(request)
sitemap.get_urls(1, django_site, req_protocol)
self.assertDatesEqual(
sitemap.latest_lastmod,
datetime.datetime(2017, 3, 1, 12, 0, 0, tzinfo=pytz.utc),
)
def test_latest_lastmod_missing(self):
# ensure homepage does not have lastmod
self.home_page.last_published_at = None
self.home_page.save()
request, django_site = self.get_request_and_django_site("/sitemap.xml")
req_protocol = request.scheme
sitemap = Sitemap(request)
sitemap.get_urls(1, django_site, req_protocol)
self.assertFalse(hasattr(sitemap, "latest_lastmod"))
def test_non_default_site(self):
request = RequestFactory().get("/sitemap.xml")
request.META["HTTP_HOST"] = "other.example.com"
request.META["SERVER_PORT"] = 80
sitemap = Sitemap(request)
pages = sitemap.items()
self.assertIn(self.other_site_homepage.page_ptr.specific, pages)
self.assertNotIn(self.child_page.page_ptr.specific, pages)
class TestIndexView(TestCase):
def test_index_view(self):
response = self.client.get("/sitemap-index.xml")
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/xml")
class TestSitemapView(TestCase):
def test_sitemap_view(self):
response = self.client.get("/sitemap.xml")
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/xml")
def test_sitemap_view_with_current_site_middleware(self):
with self.modify_settings(
MIDDLEWARE={
"append": "django.contrib.sites.middleware.CurrentSiteMiddleware",
}
):
response = self.client.get("/sitemap.xml")
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/xml")
| 35 | 91 | 0.61978 |
7958d26558926feff1845e9c57a0580d3c94fca6 | 1,913 | py | Python | python/GafferDispatchUITest/__init__.py | sebaDesmet/gaffer | 47b2d093c40452bd77947e3b5bd0722a366c8d59 | [
"BSD-3-Clause"
] | 1 | 2016-07-31T09:55:09.000Z | 2016-07-31T09:55:09.000Z | python/GafferDispatchUITest/__init__.py | rkoschmitzky/gaffer | ec6262ae1292767bdeb9520d1447d65a4a511884 | [
"BSD-3-Clause"
] | 2 | 2017-08-23T21:35:45.000Z | 2018-01-29T08:59:33.000Z | python/GafferDispatchUITest/__init__.py | rkoschmitzky/gaffer | ec6262ae1292767bdeb9520d1447d65a4a511884 | [
"BSD-3-Clause"
] | 1 | 2020-12-21T12:33:49.000Z | 2020-12-21T12:33:49.000Z | ##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
from DocumentationTest import DocumentationTest
if __name__ == "__main__":
unittest.main()
| 44.488372 | 77 | 0.687925 |
7958d3abcba282265d44d1f4bc2a059eae35b2f2 | 297 | py | Python | jobsapp/migrations/0015_delete_applicant1.py | Deepanjalli/job_portal6 | 2869de5dca16a88f840ce0e4a26fe2edba3e9cae | [
"MIT"
] | null | null | null | jobsapp/migrations/0015_delete_applicant1.py | Deepanjalli/job_portal6 | 2869de5dca16a88f840ce0e4a26fe2edba3e9cae | [
"MIT"
] | 4 | 2020-06-06T01:42:22.000Z | 2021-09-08T01:50:57.000Z | jobsapp/migrations/0015_delete_applicant1.py | Deepanjalli/job_portal6 | 2869de5dca16a88f840ce0e4a26fe2edba3e9cae | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-02-21 07:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jobsapp', '0014_auto_20200221_0651'),
]
operations = [
migrations.DeleteModel(
name='Applicant1',
),
]
| 17.470588 | 47 | 0.609428 |
7958d44d161db8cbcc45bb2365bbc211564f09a6 | 5,659 | py | Python | SOFTWARE/Python/pyA20/pyA20/utilities/color.py | GitHubRepoDescription/OLINUXINO-forked | 7b7587c3b1b96af5c2eccef671449a4a4f6b993b | [
"Unlicense"
] | 880 | 2015-01-05T08:44:23.000Z | 2022-03-25T09:09:19.000Z | SOFTWARE/Python/pyA20/pyA20/utilities/color.py | tinkertux/OLINUXINO | b4bd32fef9218f9210aedf321286f8c50ca52259 | [
"Unlicense"
] | 79 | 2017-09-28T02:34:36.000Z | 2021-12-06T10:03:52.000Z | SOFTWARE/Python/pyA20/pyA20/utilities/color.py | tinkertux/OLINUXINO | b4bd32fef9218f9210aedf321286f8c50ca52259 | [
"Unlicense"
] | 672 | 2015-01-04T06:30:01.000Z | 2022-03-29T10:47:59.000Z | __author__ = 'stefan'
class Colors:
def __init__(self):
self = self
def test(self):
print "Regular"
print self.Black + "Black" + self.Color_Off
print self.Red + "Red" + self.Color_Off
print self.Green + "Green" + self.Color_Off
print self.Yellow + "Yellow" + self.Color_Off
print self.Blue + "Blue" + self.Color_Off
print self.Purple + "Purple" + self.Color_Off
print self.Cyan + "Cyan" + self.Color_Off
print self.White + "White" + self.Color_Off
print ""
print "Bold"
print self.BBlack + "BBlack" + self.Color_Off
print self.BRed + "BRed" + self.Color_Off
print self.BGreen + "BGreen" + self.Color_Off
print self.BYellow + "BYellow" + self.Color_Off
print self.BBlue + "BBlue" + self.Color_Off
print self.BPurple + "BPurple" + self.Color_Off
print self.BCyan + "BCyan" + self.Color_Off
print self.BWhite + "BWhite" + self.Color_Off
print ""
print "Underline"
print self.UBlack + "UBlack" + self.Color_Off
print self.URed + "URed" + self.Color_Off
print self.UGreen + "UGreen" + self.Color_Off
print self.UYellow + "UYellow" + self.Color_Off
print self.UBlue + "UBlue" + self.Color_Off
print self.UPurple + "UPurple" + self.Color_Off
print self.UCyan + "UCyan" + self.Color_Off
print self.UWhite + "UWhite" + self.Color_Off
print ""
print "Background"
print self.On_Black + "On_Black" + self.Color_Off
print self.On_Red + "On_Red" + self.Color_Off
print self.On_Green + "On_Green" + self.Color_Off
print self.On_Yellow + "On_Yellow" + self.Color_Off
print self.On_Blue + "On_Blue" + self.Color_Off
print self.On_Purple + "On_Purple" + self.Color_Off
print self.On_Cyan + "On_Cyan" + self.Color_Off
print self.On_White + "On_White" + self.Color_Off
print ""
print "High Intensity"
print self.IBlack + "IBlack" + self.Color_Off
print self.IRed + "IRed" + self.Color_Off
print self.IGreen + "IGreen" + self.Color_Off
print self.IYellow + "IYellow" + self.Color_Off
print self.IBlue + "IBlue" + self.Color_Off
print self.IPurple + "IPurple" + self.Color_Off
print self.ICyan + "ICyan" + self.Color_Off
print self.IWhite + "IWhite" + self.Color_Off
print ""
print "Bold High Intensity"
print self.BIBlack + "BIBlack" + self.Color_Off
print self.BIRed + "BIRed" + self.Color_Off
print self.BIGreen + "BIGreen" + self.Color_Off
print self.BIYellow + "BIYellow" + self.Color_Off
print self.BIBlue + "BIBlue" + self.Color_Off
print self.BIPurple + "BIPurple" + self.Color_Off
print self.BICyan + "BICyan" + self.Color_Off
print self.BIWhite + "BIWhite" + self.Color_Off
print ""
print "High Intensity backgrounds"
print self.On_IBlack + "On_IBlack" + self.Color_Off
print self.On_IRed + "On_IRed" + self.Color_Off
print self.On_IGreen + "On_IGreen" + self.Color_Off
print self.On_IYellow + "On_IYellow" + self.Color_Off
print self.On_IBlue + "On_IBlue" + self.Color_Off
print self.On_IPurple + "On_IPurple" + self.Color_Off
print self.On_ICyan + "On_ICyan" + self.Color_Off
print self.On_IWhite + "On_IWhite" + self.Color_Off
print ""
# Reset
Color_Off = '\033[0m' # Text Reset
Black = '\033[0;30m' # Black
Red = '\033[0;31m' # Red
Green = '\033[0;32m' # Green
Yellow = '\033[0;33m' # Yellow
Blue = '\033[0;34m' # Blue
Purple = '\033[0;35m' # Purple
Cyan = '\033[0;36m' # Cyan
White = '\033[0;37m' # White
# Bold
BBlack = '\033[1;30m' # Black
BRed = '\033[1;31m' # Red
BGreen = '\033[1;32m' # Green
BYellow = '\033[1;33m' # Yellow
BBlue = '\033[1;34m' # Blue
BPurple = '\033[1;35m' # Purple
BCyan = '\033[1;36m' # Cyan
BWhite = '\033[1;37m' # White
# Underline
UBlack = '\033[4;30m' # Black
URed = '\033[4;31m' # Red
UGreen = '\033[4;32m' # Green
UYellow = '\033[4;33m' # Yellow
UBlue = '\033[4;34m' # Blue
UPurple = '\033[4;35m' # Purple
UCyan = '\033[4;36m' # Cyan
UWhite = '\033[4;37m' # White
# Background
On_Black = '\033[40m' # Black
On_Red = '\033[41m' # Red
On_Green = '\033[42m' # Green
On_Yellow = '\033[43m' # Yellow
On_Blue = '\033[44m' # Blue
On_Purple = '\033[45m' # Purple
On_Cyan = '\033[46m' # Cyan
On_White = '\033[47m' # White
# High Intensity
IBlack = '\033[0;90m' # Black
IRed = '\033[0;91m' # Red
IGreen = '\033[0;92m' # Green
IYellow = '\033[0;93m' # Yellow
IBlue = '\033[0;94m' # Blue
IPurple = '\033[0;95m' # Purple
ICyan = '\033[0;96m' # Cyan
IWhite = '\033[0;97m' # White
# Bold High Intensity
BIBlack = '\033[1;90m' # Black
BIRed = '\033[1;91m' # Red
BIGreen = '\033[1;92m' # Green
BIYellow = '\033[1;93m' # Yellow
BIBlue = '\033[1;94m' # Blue
BIPurple = '\033[1;95m' # Purple
BICyan = '\033[1;96m' # Cyan
BIWhite = '\033[1;97m' # White
# High Intensity backgrounds
On_IBlack = '\033[0;100m' # Black
On_IRed = '\033[0;101m' # Red
On_IGreen = '\033[0;102m' # Green
On_IYellow = '\033[0;103m' # Yellow
On_IBlue = '\033[0;104m' # Blue
On_IPurple = '\033[0;105m' # Purple
On_ICyan = '\033[0;106m' # Cyan
On_IWhite = '\033[0;107m' # White | 36.275641 | 61 | 0.582435 |
7958d6e9a3dcff58ac4799e8c64ccd26e3fe19e5 | 2,382 | py | Python | scripts/qt1/pyqt_sw08_LCD_Slider_Sender.py | ProfJust/Ruhr-TurtleBot-Competition-RTC- | 5c2425bee331b4d5033757a9425676932d111775 | [
"Unlicense",
"MIT"
] | null | null | null | scripts/qt1/pyqt_sw08_LCD_Slider_Sender.py | ProfJust/Ruhr-TurtleBot-Competition-RTC- | 5c2425bee331b4d5033757a9425676932d111775 | [
"Unlicense",
"MIT"
] | null | null | null | scripts/qt1/pyqt_sw08_LCD_Slider_Sender.py | ProfJust/Ruhr-TurtleBot-Competition-RTC- | 5c2425bee331b4d5033757a9425676932d111775 | [
"Unlicense",
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QWidget, QLCDNumber, QSlider,
QPushButton, QVBoxLayout,
QHBoxLayout, QApplication, QLabel)
class Example(QWidget):
def __init__(self): # Konstrukor
# OJ super().__init__()
# Konstruktor der Elternklasse aufrufen
super(Example, self).__init__()
self.initUI()
def initUI(self):
# Instanziierung der Widgets
lcd = QLCDNumber(self)
self.sld = QSlider(Qt.Horizontal, self)
pbLess = QPushButton('<')
pbMore = QPushButton('>')
self.lblStatus = QLabel('Statuszeile')
# BOX-Layout mit Widgets füllen
vbox = QVBoxLayout()
# 1.Reihe
vbox.addWidget(lcd)
# 2.Reihe
vbox.addWidget(self.sld)
# 3.Reihe
hbox = QHBoxLayout()
hbox.addWidget(pbLess)
hbox.addWidget(pbMore)
vbox.addLayout(hbox)
# 4.Reihe
vbox.addWidget(self.lblStatus)
# Alle Boxen ins Window setzen
self.setLayout(vbox)
# Signal und Slot verbinden
self.sld.valueChanged.connect(lcd.display)
self.sld.valueChanged.connect(lcd.display)
pbLess.clicked.connect(self.SlotKlick)
pbMore.clicked.connect(self.SlotKlick)
# pbLess.clicked.connect(self.SlotLess)
# pbMore.clicked.connect(self.SlotMore)
# Fenster Konfigurieren
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('Signal and slot')
self.show()
# def SlotMore(self):
# wert = self.sld.value()
# wert = wert+1
# self.sld.setValue(wert)
#
# def SlotLess(self):
# wert = self.sld.value()
# wert = wert-1
# self.sld.setValue(wert)
def SlotKlick(self):
sender = self.sender()
self.lblStatus.setText(sender.text() + ' was pressed')
if sender.text() == '<':
wert = self.sld.value()
wert = wert-1
self.sld.setValue(wert)
else:
wert = self.sld.value()
wert = wert+1
self.sld.setValue(wert)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | 29.407407 | 63 | 0.555835 |
7958d6ee4dff75ffea481b6e6822470ff72b43c2 | 7,430 | py | Python | tests/e2e/logging/test_openshift-logging.py | b-ranto/ocs-ci | 75cc7c1b58f0d2d9c428bdfb3e9a204639c592c1 | [
"MIT"
] | null | null | null | tests/e2e/logging/test_openshift-logging.py | b-ranto/ocs-ci | 75cc7c1b58f0d2d9c428bdfb3e9a204639c592c1 | [
"MIT"
] | null | null | null | tests/e2e/logging/test_openshift-logging.py | b-ranto/ocs-ci | 75cc7c1b58f0d2d9c428bdfb3e9a204639c592c1 | [
"MIT"
] | null | null | null | """
This file contains the testcases for openshift-logging
"""
import logging
import pytest
import random
from tests import helpers, disruption_helpers
from ocs_ci.ocs import constants
from ocs_ci.ocs.resources.pod import get_all_pods, get_pod_obj
from ocs_ci.utility.retry import retry
from ocs_ci.framework.testlib import E2ETest, workloads, tier1, ignore_leftovers
from ocs_ci.utility import deployment_openshift_logging as ocp_logging_obj
logger = logging.getLogger(__name__)
@pytest.fixture()
def setup_fixture(install_logging):
"""
Installs openshift-logging
"""
logger.info("Testcases execution post deployment of openshift-logging")
@pytest.mark.usefixtures(
setup_fixture.__name__
)
@ignore_leftovers
class Testopenshiftloggingonocs(E2ETest):
"""
The class contains tests to verify openshift-logging backed by OCS.
"""
@pytest.fixture()
def create_pvc_and_deploymentconfig_pod(self, request, pvc_factory):
"""
"""
def finalizer():
helpers.delete_deploymentconfig_pods(pod_obj)
request.addfinalizer(finalizer)
# Create pvc
pvc_obj = pvc_factory()
# Create service_account to get privilege for deployment pods
sa_name = helpers.create_serviceaccount(pvc_obj.project.namespace)
helpers.add_scc_policy(sa_name=sa_name.name, namespace=pvc_obj.project.namespace)
pod_obj = helpers.create_pod(
interface_type=constants.CEPHBLOCKPOOL,
pvc_name=pvc_obj.name,
namespace=pvc_obj.project.namespace,
sa_name=sa_name.name,
dc_deployment=True
)
helpers.wait_for_resource_state(resource=pod_obj, state=constants.STATUS_RUNNING)
return pod_obj, pvc_obj
@retry(ModuleNotFoundError, tries=10, delay=200, backoff=3)
def validate_project_exists(self, pvc_obj):
"""
This function checks whether the new project exists in the
EFK stack
"""
pod_list = get_all_pods(namespace='openshift-logging')
elasticsearch_pod = [
pod.name for pod in pod_list if pod.name.startswith('elasticsearch')
]
elasticsearch_pod_obj = get_pod_obj(
name=elasticsearch_pod[1], namespace='openshift-logging'
)
project_index = elasticsearch_pod_obj.exec_cmd_on_pod(
command='indices', out_yaml_format=False
)
project = pvc_obj.project.namespace
if project in project_index:
logger.info(f'The project {project} exists in the EFK stack')
for item in project_index.split("\n"):
if project in item:
logger.info(item.strip())
assert 'green' in item.strip(), f"Project {project} is Unhealthy"
else:
raise ModuleNotFoundError
def get_elasticsearch_pod_obj(self):
"""
This function returns the Elasticsearch pod obj
"""
pod_list = get_all_pods(namespace=constants.OPENSHIFT_LOGGING_NAMESPACE)
elasticsearch_pod = [
pod for pod in pod_list if pod.name.startswith('elasticsearch')
]
elasticsearch_pod_obj = random.choice(elasticsearch_pod)
return elasticsearch_pod_obj
@pytest.mark.polarion_id("OCS-657")
@tier1
def test_create_new_project_to_verify_logging(self, create_pvc_and_deploymentconfig_pod):
"""
This function creates new project to verify logging in EFK stack
1. Creates new project
2. Creates PVC
3. Creates Deployment pod in the new_project and run-io on the app pod
4. Logs into the EFK stack and check for new_project
5. And checks for the file_count in the new_project in EFK stack
"""
pod_obj, pvc_obj = create_pvc_and_deploymentconfig_pod
# Running IO on the app_pod
pod_obj.run_io(storage_type='fs', size=6000)
self.validate_project_exists(pvc_obj)
@pytest.mark.polarion_id("OCS-650")
@workloads
def test_respin_osd_pods_to_verify_logging(self, create_pvc_and_deploymentconfig_pod):
"""
This function creates projects before and after respin of osd
and verify project existence in EFK stack.
1. Creates new project with PVC and app-pods
2. Respins osd
3. Logs into the EFK stack and checks for the health of cluster-logging
4. Logs into the EFK stack and checks project existence
5. Checks for the shards of the project in the EFK stack
6. Creates new project and checks the existence again
"""
# Create 1st project and app_pod
dc_pod_obj, dc_pvc_obj = create_pvc_and_deploymentconfig_pod
project1 = dc_pvc_obj.project.namespace
# Delete the OSD pod
disruption = disruption_helpers.Disruptions()
disruption.set_resource(resource='osd')
disruption.delete_resource()
# Check the health of the cluster-logging
assert ocp_logging_obj.check_health_of_clusterlogging()
# Check for the 1st project created in EFK stack before the respin
self.validate_project_exists(dc_pvc_obj)
# Check the files in the project
elasticsearch_pod_obj = self.get_elasticsearch_pod_obj()
project1_filecount = elasticsearch_pod_obj.exec_cmd_on_pod(
command=f'es_util --query=project.{project1}.*/_count'
)
assert project1_filecount['_shards']['successful'] != 0, (
f"No files found in project {project1}"
)
logger.info(f'Total number of files in project 1 {project1_filecount}')
# Create another app_pod in new project
pod_obj, pvc_obj = create_pvc_and_deploymentconfig_pod
project2 = pvc_obj.project.namespace
# Check the 2nd project exists in the EFK stack
self.validate_project_exists(pvc_obj)
project2_filecount = elasticsearch_pod_obj.exec_cmd_on_pod(
command=f'es_util --query=project.{project2}.*/_count', out_yaml_format=True
)
assert project2_filecount['_shards']['successful'] != 0, (
f"No files found in project {project2}"
)
logger.info(f'Total number of files in the project 2 {project2_filecount}')
@pytest.mark.polarion_id("OCS-651")
@workloads
def test_respin_elasticsearch_pod(self, create_pvc_and_deploymentconfig_pod):
"""
Test to verify respin of elasticsearch pod has no functional impact
on logging backed by OCS.
"""
elasticsearch_pod_obj = self.get_elasticsearch_pod_obj()
# Respin the elastic-search pod
elasticsearch_pod_obj.delete(force=True)
# Checks the health of logging cluster after a respin
assert ocp_logging_obj.check_health_of_clusterlogging()
# Checks .operations index
es_pod_obj = self.get_elasticsearch_pod_obj()
operations_index = es_pod_obj.exec_cmd_on_pod(
command='es_util --query=.operations.*/_search?pretty', out_yaml_format=True
)
assert operations_index['_shards']['failed'] == 0, (
"Unable to access the logs of .operations from ES pods"
)
# Creates new-project and app-pod and checks the logs are retained
pod_obj, pvc_obj = create_pvc_and_deploymentconfig_pod
self.validate_project_exists(pvc_obj)
| 35.21327 | 93 | 0.679677 |
7958d717c5e32215c7397d3fabafb1f9b71cf8e0 | 344 | py | Python | Codewars/Test/Array_leaders_array_series_3_test.py | maxcohen31/A-bored-math-student | 007beb4dabf7b4406f48e9a3a967c29d032eab89 | [
"MIT"
] | null | null | null | Codewars/Test/Array_leaders_array_series_3_test.py | maxcohen31/A-bored-math-student | 007beb4dabf7b4406f48e9a3a967c29d032eab89 | [
"MIT"
] | null | null | null | Codewars/Test/Array_leaders_array_series_3_test.py | maxcohen31/A-bored-math-student | 007beb4dabf7b4406f48e9a3a967c29d032eab89 | [
"MIT"
] | null | null | null | import unittest
from Array_leaders_array_series_3_ import array_leaders2
class Array_Leader(unittest.TestCase):
def test_1(self):
self.assertEqual(array_leaders2([0, -29, 3]), [0, 3])
def test_2(self):
self.assertEqual(array_leaders2([0,1, 2, 3, 4]), [4])
if __name__ == '__main__':
unittest.main() | 28.666667 | 65 | 0.656977 |
7958d8aa71573dfc5469fc07e9691ec050d207e2 | 7,084 | py | Python | discord/models/emoji.py | sakurazaki/discord-continued | 7210363f3a531879fbff8be35b346a272f1845c9 | [
"MIT"
] | null | null | null | discord/models/emoji.py | sakurazaki/discord-continued | 7210363f3a531879fbff8be35b346a272f1845c9 | [
"MIT"
] | null | null | null | discord/models/emoji.py | sakurazaki/discord-continued | 7210363f3a531879fbff8be35b346a272f1845c9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .asset import Asset
from . import utils
from .partial_emoji import _EmojiTag
from .user import User
class Emoji(_EmojiTag):
"""Represents a custom emoji.
Depending on the way this object was created, some of the attributes can
have a value of ``None``.
.. container:: operations
.. describe:: x == y
Checks if two emoji are the same.
.. describe:: x != y
Checks if two emoji are not the same.
.. describe:: hash(x)
Return the emoji's hash.
.. describe:: iter(x)
Returns an iterator of ``(field, value)`` pairs. This allows this class
to be used as an iterable in list/dict/etc constructions.
.. describe:: str(x)
Returns the emoji rendered for discord.
Attributes
-----------
name: :class:`str`
The name of the emoji.
id: :class:`int`
The emoji's ID.
require_colons: :class:`bool`
If colons are required to use this emoji in the client (:PJSalt: vs PJSalt).
animated: :class:`bool`
Whether an emoji is animated or not.
managed: :class:`bool`
If this emoji is managed by a Twitch integration.
guild_id: :class:`int`
The guild ID the emoji belongs to.
available: :class:`bool`
Whether the emoji is available for use.
user: Optional[:class:`User`]
The user that created the emoji. This can only be retrieved using :meth:`Guild.fetch_emoji` and
having the :attr:`~Permissions.manage_emojis` permission.
"""
__slots__ = ('require_colons', 'animated', 'managed', 'id', 'name', '_roles', 'guild_id',
'_client', 'user', 'available')
def __init__(self, *, guild, client, data):
self.guild_id = guild.id
self._client = client
self._from_data(data)
def _from_data(self, emoji):
self.require_colons = emoji['require_colons']
self.managed = emoji['managed']
self.id = int(emoji['id'])
self.name = emoji['name']
self.animated = emoji.get('animated', False)
self.available = emoji.get('available', True)
self._roles = utils.SnowflakeList(map(int, emoji.get('roles', [])))
user = emoji.get('user')
self.user = User(client=self._client, data=user) if user else None
def _iterator(self):
for attr in self.__slots__:
if attr[0] != '_':
value = getattr(self, attr, None)
if value is not None:
yield (attr, value)
def __iter__(self):
return self._iterator()
def __str__(self):
if self.animated:
return '<a:{0.name}:{0.id}>'.format(self)
return "<:{0.name}:{0.id}>".format(self)
def __repr__(self):
return '<Emoji id={0.id} name={0.name!r} animated={0.animated} managed={0.managed}>'.format(self)
def __eq__(self, other):
return isinstance(other, _EmojiTag) and self.id == other.id
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.id >> 22
@property
def created_at(self):
""":class:`datetime.datetime`: Returns the emoji's creation time in UTC."""
return utils.snowflake_time(self.id)
@property
def url(self):
""":class:`Asset`: Returns the asset of the emoji.
This is equivalent to calling :meth:`url_as` with
the default parameters (i.e. png/gif detection).
"""
return self.url_as(format=None)
@property
def roles(self):
"""List[:class:`Role`]: A :class:`list` of roles that is allowed to use this emoji.
If roles is empty, the emoji is unrestricted.
"""
guild = self.guild
if guild is None:
return []
return [role for role in guild.roles if self._roles.has(role.id)]
@property
def guild(self):
""":class:`Guild`: The guild this emoji belongs to."""
return self._client._get_guild(self.guild_id)
def url_as(self, *, format=None, static_format="png"):
"""Returns an :class:`Asset` for the emoji's url.
The format must be one of 'webp', 'jpeg', 'jpg', 'png' or 'gif'.
'gif' is only valid for animated emojis.
Parameters
-----------
format: Optional[:class:`str`]
The format to attempt to convert the emojis to.
If the format is ``None``, then it is automatically
detected as either 'gif' or static_format, depending on whether the
emoji is animated or not.
static_format: Optional[:class:`str`]
Format to attempt to convert only non-animated emoji's to.
Defaults to 'png'
Raises
-------
InvalidArgument
Bad image format passed to ``format`` or ``static_format``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
"""
return Asset._from_emoji(self._client, self, format=format, static_format=static_format)
def is_usable(self):
""":class:`bool`: Whether the bot can use this emoji.
.. versionadded:: 1.3
"""
if not self.available:
return False
if not self._roles:
return True
emoji_roles, my_roles = self._roles, self.guild.me._roles
return any(my_roles.has(role_id) for role_id in emoji_roles)
async def delete(self, *, reason=None):
"""|coro|
Deletes the custom emoji.
You must have :attr:`~Permissions.manage_emojis` permission to
do this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this emoji. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to delete emojis.
HTTPException
An error occurred deleting the emoji.
"""
await self._client.http.delete_custom_emoji(self.guild.id, self.id, reason=reason)
async def edit(self, *, name=None, roles=None, reason=None):
r"""|coro|
Edits the custom emoji.
You must have :attr:`~Permissions.manage_emojis` permission to
do this.
Parameters
-----------
name: :class:`str`
The new emoji name.
roles: Optional[list[:class:`Role`]]
A :class:`list` of :class:`Role`\s that can use this emoji. Leave empty to make it available to everyone.
reason: Optional[:class:`str`]
The reason for editing this emoji. Shows up on the audit log.
Raises
-------
Forbidden
You are not allowed to edit emojis.
HTTPException
An error occurred editing the emoji.
"""
name = name or self.name
if roles:
roles = [role.id for role in roles]
await self._client.http.edit_custom_emoji(self.guild.id, self.id, name=name, roles=roles, reason=reason)
| 30.934498 | 117 | 0.582722 |
7958d9d533bfba560da54eb8f9174606cf1da719 | 1,249 | py | Python | deepocr/utils/fonts.py | das-projects/deepOCR | ffc6db691605b7b4837da9619ab6e918fa1c18de | [
"Apache-2.0"
] | 1 | 2022-01-28T09:48:34.000Z | 2022-01-28T09:48:34.000Z | deepocr/utils/fonts.py | das-projects/deepOCR | ffc6db691605b7b4837da9619ab6e918fa1c18de | [
"Apache-2.0"
] | null | null | null | deepocr/utils/fonts.py | das-projects/deepOCR | ffc6db691605b7b4837da9619ab6e918fa1c18de | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2022, Arijit Das.
# Code adapted from doctr and huggingface
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
import logging
import platform
from typing import Optional
from PIL import ImageFont
__all__ = ['get_font']
def get_font(font_family: Optional[str] = None, font_size: int = 13) -> ImageFont.ImageFont:
"""Resolves a compatible ImageFont for the system
Args:
font_family: the font family to use
font_size: the size of the font upon rendering
Returns:
the Pillow font
"""
# Font selection
if font_family is None:
try:
font = ImageFont.truetype("FreeMono.ttf" if platform.system() == "Linux" else "Arial.ttf", font_size)
except OSError:
font = ImageFont.load_default()
logging.warning("unable to load recommended font family. Loading default PIL font,"
"font size issues may be expected."
"To prevent this, it is recommended to specify the value of 'font_family'.")
else:
font = ImageFont.truetype(font_family, font_size)
return font
| 32.025641 | 113 | 0.659728 |
7958da063f16f9d2135f735adeaacf714e071f8d | 6,838 | py | Python | Register.py | arshita1123/Student-Result-Management-System | 4d5f64796ed3b1060c05cb8724381cefcd126158 | [
"BSD-3-Clause"
] | null | null | null | Register.py | arshita1123/Student-Result-Management-System | 4d5f64796ed3b1060c05cb8724381cefcd126158 | [
"BSD-3-Clause"
] | null | null | null | Register.py | arshita1123/Student-Result-Management-System | 4d5f64796ed3b1060c05cb8724381cefcd126158 | [
"BSD-3-Clause"
] | null | null | null | from tkinter import *
from PIL import Image,ImageTk
from tkinter import ttk,messagebox
import sqlite3
import os
class register_ui:
def __init__(self,root):
self.root=root
self.root.title("Registeration Window")
self.root.geometry("1350x700+0+0")
self.root.config(bg='white')
### Required Variables##
self.var1 = StringVar()
self.var2 = StringVar()
self.var3 = StringVar()
self.var4 = StringVar()
self.var5 = StringVar()
self.var6 = StringVar()
self.var7 = StringVar()
self.var8 = StringVar()
self.var9 = IntVar()
### Background Image##
self.bg_img = ImageTk.PhotoImage(file='Images/b2.jpg')
bg_label = Label(self.root,image=self.bg_img)
bg_label.place(x=250,y=0,relwidth=1,relheight=1)
###Side Image##
self.side_img = ImageTk.PhotoImage(file='Images/side.png')
side_label = Label(self.root,image=self.side_img)
side_label.place(x=80,y=100,width=400,height=500)
###CONTENT SECTION##
### Registeration Frame##
frame1 = Frame(self.root,bg='white')
frame1.place(x=480,y=100,width=700,height=500)
title = Label(frame1,text='REGISTER HERE',font=('Times New Roman',20,'bold'),bg='white',fg='green')
title.place(x=50,y=30)
### Labels##
label1 = Label(frame1,text='First Name',font=('Times New Roman',15,'bold'),bg='white',fg='grey')
label1.place(x=50,y=100)
label2 = Label(frame1,text='Last Name',font=('Times New Roman',15,'bold'),bg='white',fg='grey')
label2.place(x=370,y=100)
label3 = Label(frame1,text='Contact No.',font=('Times New Roman',15,'bold'),bg='white',fg='grey')
label3.place(x=50,y=170)
label4 = Label(frame1,text='Email.',font=('Times New Roman',15,'bold'),bg='white',fg='grey')
label4.place(x=370,y=170)
label5 = Label(frame1,text='Security Question',font=('Times New Roman',15,'bold'),bg='white',fg='grey')
label5.place(x=50,y=240)
label6 = Label(frame1,text='Answer',font=('Times New Roman',15,'bold'),bg='white',fg='grey')
label6.place(x=370,y=240)
label7 = Label(frame1,text='Password',font=('Times New Roman',15,'bold'),bg='white',fg='grey')
label7.place(x=50,y=310)
label8 = Label(frame1,text='Confirm Password',font=('Times New Roman',15,'bold'),bg='white',fg='grey')
label8.place(x=370,y=310)
### Entries##
self.entry1 = Entry(frame1,textvariable=self.var1,font=("times new roman",15),bg='lightgray')
self.entry1.place(x=50,y=130,width=250)
self.entry2 = Entry(frame1,textvariable=self.var2,font=("times new roman",15),bg='lightgray')
self.entry2.place(x=370,y=130,width=250)
self.entry3 = Entry(frame1,textvariable=self.var3,font=("times new roman",15),bg='lightgray')
self.entry3.place(x=50,y=200,width=250)
self.entry4 = Entry(frame1,textvariable=self.var4,font=("times new roman",15),bg='lightgray')
self.entry4.place(x=370,y=200,width=250)
self.entry5 = ttk.Combobox(frame1,textvariable=self.var5,font=("times new roman",13),justify=CENTER,values=("Your First Pet Name","Your Birth Place","Your Best Friend Name"),state='readonly')
self.entry5.set("Select")
self.entry5.place(x=50,y=270,width=250)
self.entry6 = Entry(frame1,textvariable=self.var6,font=("times new roman",15),bg='lightgray')
self.entry6.place(x=370,y=270,width=250)
self.entry7 = Entry(frame1,textvariable=self.var7,font=("times new roman",15),bg='lightgray')
self.entry7.place(x=50,y=340,width=250)
self.entry8 = Entry(frame1,textvariable=self.var8,font=("times new roman",15),bg='lightgray')
self.entry8.place(x=370,y=340,width=250)
### Terms##
self.chk = Checkbutton(frame1,variable=self.var9,text="I Agree the Terms and Conditions",onvalue=1,offvalue=0,bg='white',font=("times new roman",12))
self.chk.place(x=50,y=380)
### Submission#
self.btn_image = ImageTk.PhotoImage(file='Images/register.png')
button1 = Button(frame1,image=self.btn_image,bd=0,cursor='hand2',command=self.Register)
button1.place(x=50,y=420)
button2 = Button(self.root,text='Sign In',command=self.LogInWindow,font=("Times New Roman",20),bd=0,cursor='hand2')
button2.place(x=200,y=460,width=180)
### METHODS FOR FUNCTIONALITY##
def Register(self):
if self.var1.get()=='' or self.var3.get()=='' or self.var4.get()=='' or self.var5.get()=='Select' or self.var6.get()=='' or self.var7.get()=='' or self.var8.get()=='':
messagebox.showerror("Error","All fields are required",parent=self.root)
elif self.var7.get()!=self.var8.get():
messagebox.showerror("Error","Password and Confirm Password shoud be same",parent=self.root)
elif self.var9.get()==0:
messagebox.showerror("Error","Please Agree our terms and conditions",parent=self.root)
else:
try:
obj1 = sqlite3.connect("Database.db")
obj = obj1.cursor()
except:
messagebox.showerror("Error","Error in processing",parent=self.root)
try:
obj.execute("SELECT * FROM Register WHERE Email=?",(self.var4.get(),))
data = obj.fetchone()
if data!=None:
messagebox.showerror("Error","User with this Email already Exists",parent=self.root)
else:
obj.execute("SELECT * FROM Register WHERE Contact=?",(self.var3.get(),))
data = obj.fetchone()
if data!=None:
messagebox.showerror("Error","User with this Contact No. already Exists",parent=self.root)
else:
obj.execute("INSERT INTO Register(FName,LName,Email,Contact,Ques,Ans,Pwd) values(?,?,?,?,?,?,?)",(self.var1.get(),self.var2.get(),self.var4.get(),self.var3.get(),self.var5.get(),self.var6.get(),self.var7.get()))
obj1.commit()
messagebox.showinfo("Message","Registeration Successful")
self.Clear()
except:
messagebox.showerror("Error","Unable to register",parent=self.root)
def Clear(self):
self.var1.set("")
self.var2.set("")
self.var3.set("")
self.var4.set("")
self.var5.set("Select")
self.var6.set("")
self.var7.set("")
self.var8.set("")
def LogInWindow(self):
self.root.destroy()
os.system("python LogIn.py")
obj1=Tk()
obj2 = register_ui(obj1)
obj1.mainloop()
| 43.278481 | 235 | 0.597836 |
7958da85d573b212e0b1c0bcd8122d19e9ec3a7b | 3,670 | py | Python | src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py | sunjiao123sun/transformers | c994eca173f0eaf818caf4cc93148a4b040a6b04 | [
"Apache-2.0"
] | 2 | 2019-04-12T12:22:38.000Z | 2019-07-15T02:49:42.000Z | src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py | evanzd/transformers | c60e0e1ee45f4bf1017736b146c51729f120bb83 | [
"Apache-2.0"
] | 2 | 2022-01-13T04:20:10.000Z | 2022-03-12T01:04:07.000Z | src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py | evanzd/transformers | c60e0e1ee45f4bf1017736b146c51729f120bb83 | [
"Apache-2.0"
] | 1 | 2022-02-22T08:18:08.000Z | 2022-02-22T08:18:08.000Z | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Blenderbot checkpoint."""
import argparse
import torch
from ...models.bart import BartConfig, BartForConditionalGeneration
from ...utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
PATTERNS = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def rename_state_dict_key(k):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
k = k.replace(parlai_name, hf_name)
if k.startswith("encoder"):
k = k.replace(".attn", ".self_attn")
k = k.replace("norm1", "self_attn_layer_norm")
k = k.replace("norm2", "final_layer_norm")
elif k.startswith("decoder"):
k = k.replace("norm1", "self_attn_layer_norm")
k = k.replace("norm2", "encoder_attn_layer_norm")
k = k.replace("norm3", "final_layer_norm")
return k
def rename_layernorm_keys(sd):
keys = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
v = sd.pop(k)
new_k = k.replace("layernorm_embedding", "layer_norm")
assert new_k not in sd
sd[new_k] = v
IGNORE_KEYS = ["START"]
@torch.no_grad()
def convert_parlai_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_json_path):
"""
Copy/paste/tweak model's weights to our BERT structure.
"""
model = torch.load(checkpoint_path, map_location="cpu")
sd = model["model"]
cfg = BartConfig.from_json_file(config_json_path)
m = BartForConditionalGeneration(cfg)
valid_keys = m.model.state_dict().keys()
failures = []
mapping = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
new_k = rename_state_dict_key(k)
if new_k not in valid_keys:
failures.append([k, new_k])
else:
mapping[new_k] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(sd)
m.model.load_state_dict(mapping, strict=True)
m.half()
m.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
args = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 31.913043 | 111 | 0.676022 |
7958da87b25f428e3a4969ae29e9a5065e3d6493 | 951 | py | Python | yang_modules/pyangbind_demo.py | ksator/automation_and_telemetry_workshop | ecc29115f590837bc12ba51a6a98986981799048 | [
"Apache-2.0"
] | 7 | 2021-05-27T09:13:07.000Z | 2022-01-09T13:15:24.000Z | yang_modules/pyangbind_demo.py | ksator/automation_and_telemetry_workshop | ecc29115f590837bc12ba51a6a98986981799048 | [
"Apache-2.0"
] | null | null | null | yang_modules/pyangbind_demo.py | ksator/automation_and_telemetry_workshop | ecc29115f590837bc12ba51a6a98986981799048 | [
"Apache-2.0"
] | 1 | 2021-06-29T16:01:38.000Z | 2021-06-29T16:01:38.000Z | from oc_bgp import openconfig_bgp
import pyangbind.lib.pybindJSON as pybindJSON
oc=openconfig_bgp()
oc.bgp.global_.config.as_="65002"
oc.bgp.peer_groups.peer_group.add("XYZ")
oc.bgp.peer_groups.peer_group["XYZ"].config.peer_group_name="XYZ"
oc.bgp.peer_groups.peer_group["XYZ"].config.peer_as=65002
oc.bgp.neighbors.neighbor.add("10.10.10.154")
oc.bgp.neighbors.neighbor["10.10.10.154"].config.neighbor_address="10.10.10.154"
oc.bgp.neighbors.neighbor["10.10.10.154"].config.peer_group="XYZ"
oc.bgp.neighbors.neighbor["10.10.10.154"].config.enabled=True
oc.bgp.neighbors.neighbor.add("10.10.10.157")
oc.bgp.neighbors.neighbor["10.10.10.157"].config.neighbor_address="10.10.10.157"
oc.bgp.neighbors.neighbor["10.10.10.157"].config.peer_group="XYZ"
oc.bgp.neighbors.neighbor["10.10.10.157"].config.enabled=True
#print(pybindJSON.dumps(oc.bgp, mode="ietf"))
f=open("../gnmi/test.json", "w")
f.write(pybindJSON.dumps(oc.bgp, mode="ietf"))
f.close
| 33.964286 | 80 | 0.760252 |
7958da8827a3dc3d948eba6118b9337b8fbf2985 | 2,858 | py | Python | tests/components/sleepiq/test_init.py | zalke/home-assistant | a31e49c857722c0723dc5297cd83cbce0f8716f6 | [
"Apache-2.0"
] | 2 | 2019-07-28T18:56:14.000Z | 2019-07-28T18:56:17.000Z | tests/components/sleepiq/test_init.py | zalke/home-assistant | a31e49c857722c0723dc5297cd83cbce0f8716f6 | [
"Apache-2.0"
] | null | null | null | tests/components/sleepiq/test_init.py | zalke/home-assistant | a31e49c857722c0723dc5297cd83cbce0f8716f6 | [
"Apache-2.0"
] | 1 | 2018-04-29T02:14:32.000Z | 2018-04-29T02:14:32.000Z | """The tests for the SleepIQ component."""
import unittest
from unittest.mock import MagicMock, patch
import requests_mock
from homeassistant import setup
import homeassistant.components.sleepiq as sleepiq
from tests.common import load_fixture, get_test_home_assistant
def mock_responses(mock, single=False):
"""Mock responses for SleepIQ."""
base_url = 'https://prod-api.sleepiq.sleepnumber.com/rest/'
if single:
suffix = '-single'
else:
suffix = ''
mock.put(
base_url + 'login',
text=load_fixture('sleepiq-login.json'))
mock.get(
base_url + 'bed?_k=0987',
text=load_fixture('sleepiq-bed{}.json'.format(suffix)))
mock.get(
base_url + 'sleeper?_k=0987',
text=load_fixture('sleepiq-sleeper.json'))
mock.get(
base_url + 'bed/familyStatus?_k=0987',
text=load_fixture('sleepiq-familystatus{}.json'.format(suffix)))
class TestSleepIQ(unittest.TestCase):
"""Tests the SleepIQ component."""
def setUp(self):
"""Initialize values for this test case class."""
self.hass = get_test_home_assistant()
self.username = 'foo'
self.password = 'bar'
self.config = {
'sleepiq': {
'username': self.username,
'password': self.password,
}
}
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@requests_mock.Mocker()
def test_setup(self, mock):
"""Test the setup."""
mock_responses(mock)
# We're mocking the load_platform discoveries or else the platforms
# will be setup during tear down when blocking till done, but the mocks
# are no longer active.
with patch(
'homeassistant.helpers.discovery.load_platform', MagicMock()):
assert sleepiq.setup(self.hass, self.config)
@requests_mock.Mocker()
def test_setup_login_failed(self, mock):
"""Test the setup if a bad username or password is given."""
mock.put('https://prod-api.sleepiq.sleepnumber.com/rest/login',
status_code=401,
json=load_fixture('sleepiq-login-failed.json'))
response = sleepiq.setup(self.hass, self.config)
assert not response
def test_setup_component_no_login(self):
"""Test the setup when no login is configured."""
conf = self.config.copy()
del conf['sleepiq']['username']
assert not setup.setup_component(self.hass, sleepiq.DOMAIN, conf)
def test_setup_component_no_password(self):
"""Test the setup when no password is configured."""
conf = self.config.copy()
del conf['sleepiq']['password']
assert not setup.setup_component(self.hass, sleepiq.DOMAIN, conf)
| 32.850575 | 79 | 0.63296 |
7958da97cbe31bc01b58ce683cb003a54bd1dedd | 2,094 | py | Python | plugins/cortex_v2/komand_cortex_v2/connection/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/cortex_v2/komand_cortex_v2/connection/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/cortex_v2/komand_cortex_v2/connection/schema.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
API_KEY = "api_key"
HOST = "host"
PORT = "port"
PROTOCOL = "protocol"
PROXY = "proxy"
VERIFY = "verify"
class ConnectionSchema(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"api_key": {
"$ref": "#/definitions/credential_secret_key",
"title": "API Key",
"description": "API key assigned to the user",
"order": 6
},
"host": {
"type": "string",
"title": "Host",
"description": "Cortex host e.g. cortex.company.com or 10.3.4.50",
"order": 1
},
"port": {
"type": "string",
"title": "Port",
"description": "Cortex API port e.g. 9999",
"default": "9999",
"order": 2
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "HTTP protocol",
"enum": [
"HTTP",
"HTTPS"
],
"order": 3
},
"proxy": {
"type": "object",
"title": "Proxy",
"description": "An optional dictionary containing proxy data, with HTTP or HTTPS as the key, and the proxy URL as the value",
"order": 4
},
"verify": {
"type": "boolean",
"title": "SSL Verify",
"description": "Verify the certificate",
"default": true,
"order": 5
}
},
"required": [
"api_key",
"host",
"port",
"protocol",
"verify"
],
"definitions": {
"credential_secret_key": {
"id": "credential_secret_key",
"type": "object",
"title": "Credential: Secret Key",
"description": "A shared secret key",
"properties": {
"secretKey": {
"type": "string",
"title": "Secret Key",
"displayType": "password",
"description": "The shared secret key",
"format": "password"
}
},
"required": [
"secretKey"
]
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 21.8125 | 131 | 0.505731 |
7958db603c6b3fd3c19a7c75dc92c195bbc21ab8 | 524 | py | Python | invera/task/migrations/0005_auto_20210112_2300.py | LeoLeiva/todo-challenge | f6f24f53758eb4e425c91516bcab7af8cad66814 | [
"MIT"
] | null | null | null | invera/task/migrations/0005_auto_20210112_2300.py | LeoLeiva/todo-challenge | f6f24f53758eb4e425c91516bcab7af8cad66814 | [
"MIT"
] | null | null | null | invera/task/migrations/0005_auto_20210112_2300.py | LeoLeiva/todo-challenge | f6f24f53758eb4e425c91516bcab7af8cad66814 | [
"MIT"
] | 1 | 2021-01-10T20:19:42.000Z | 2021-01-10T20:19:42.000Z | # Generated by Django 2.2.17 on 2021-01-13 02:00
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('task', '0004_auto_20210112_1842'),
]
operations = [
migrations.AlterField(
model_name='inveratask',
name='created',
field=models.DateTimeField(default=datetime.datetime(2021, 1, 13, 2, 0, 0, 541625, tzinfo=utc), verbose_name='Creado'),
),
]
| 24.952381 | 131 | 0.645038 |
7958dbaad853c6b30095cb293f9387bb9c73d13b | 2,294 | py | Python | main.py | johndpope/StyleSegments | f6f63df0806c0c90db31f51c64fefe299cdec36a | [
"Unlicense",
"MIT"
] | 1 | 2021-01-15T20:05:45.000Z | 2021-01-15T20:05:45.000Z | main.py | johndpope/StyleSegments | f6f63df0806c0c90db31f51c64fefe299cdec36a | [
"Unlicense",
"MIT"
] | 1 | 2021-01-20T08:45:32.000Z | 2021-01-21T11:04:16.000Z | main.py | johndpope/StyleSegments | f6f63df0806c0c90db31f51c64fefe299cdec36a | [
"Unlicense",
"MIT"
] | 2 | 2021-01-15T20:05:51.000Z | 2021-01-20T03:19:06.000Z | from config import images_paths, ims_config, combined_folder, overwrite, mask_blur_size
from segmentation.segment_image import transfer_styles
from style_transfer.style_transfer import StyleTransferModel
import cv2
import numpy as np
from pathlib import Path
if __name__ == "__main__":
# Get all the masks
masks = {}
for im_path in images_paths:
masks[im_path] = {}
for seg_model in ims_config[im_path.name]["seg_models"]:
masks[im_path][seg_model] = transfer_styles(im_path, seg_model)
# Get all the styles
style_model = StyleTransferModel(images_paths, ims_config, overwrite=overwrite)
styles = style_model.run()
# Combine the two
for im_path in images_paths:
for i, seg_model in enumerate(ims_config[im_path.name]["seg_models"]):
for style in ims_config[im_path.name]["styles"]:
# Get the data for this image, style and model
seg_class = ims_config[im_path.name]["class"][i]
mask = masks[im_path][seg_model].astype("uint8")
stylized = cv2.cvtColor(styles[im_path][style], cv2.COLOR_RGB2BGR)
# Apply mask and get final image
original = cv2.imread(im_path.as_posix())
original = cv2.resize(original, stylized.shape[:2][::-1])
mask = cv2.resize(mask, stylized.shape[:2][::-1])
mask = (mask == seg_class).astype("uint8")
mask = cv2.blur(mask * 255, (mask_blur_size, mask_blur_size)) / 255
mask = np.expand_dims(mask, 2)
mask = np.repeat(mask, 3, axis=2)
output = (original.astype(float) * (1 - mask) + stylized.astype(float) * mask).astype("uint8")
impath = combined_folder / (im_path.stem + "_" + seg_model + "_" + Path(style).stem + ".png")
cv2.imwrite(impath.as_posix(), output)
print(f"\nSaved final image to {impath}")
# Show outputs
cv2.imshow("Original image", original)
cv2.imshow("Stylized image", stylized)
cv2.imshow("Mask", (mask * 255).astype("uint8")[:, :, 0])
cv2.imshow("Final image", output)
cv2.waitKey()
print("\n***** DONE *****")
| 44.115385 | 110 | 0.597646 |
7958dc544bd682861b4101d3e9a76f5a80a56fed | 4,542 | py | Python | usdmanager/constants.py | J-Mo63/usdmanager | 6ded425e8c47f5e81be5252aeba9698a64808978 | [
"Apache-2.0"
] | 215 | 2019-03-29T20:45:40.000Z | 2022-03-22T12:00:32.000Z | usdmanager/constants.py | J-Mo63/usdmanager | 6ded425e8c47f5e81be5252aeba9698a64808978 | [
"Apache-2.0"
] | 19 | 2019-03-29T21:40:00.000Z | 2022-03-23T16:35:57.000Z | usdmanager/constants.py | J-Mo63/usdmanager | 6ded425e8c47f5e81be5252aeba9698a64808978 | [
"Apache-2.0"
] | 33 | 2019-03-29T21:47:55.000Z | 2022-03-21T21:57:21.000Z | #
# Copyright 2018 DreamWorks Animation L.L.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Constant values
"""
# USD file extensions.
# Expandable with custom file formats.
# First in each tuple is preferred extension for that format (e.g. in Save dialog).
USD_AMBIGUOUS_EXTS = ("usd",) # Can be ASCII or crate.
USD_ASCII_EXTS = ("usda",) # Can ONLY be ASCII.
USD_CRATE_EXTS = ("usdc",) # Can ONLY be Crate.
USD_ZIP_EXTS = ("usdz",)
USD_EXTS = USD_AMBIGUOUS_EXTS + USD_ASCII_EXTS + USD_CRATE_EXTS + USD_ZIP_EXTS
# File filters for the File > Open... and File > Save As... dialogs.
FILE_FILTER = (
"USD Files (*.{})".format(" *.".join(USD_EXTS)),
"USD - ASCII (*.{})".format(" *.".join(USD_AMBIGUOUS_EXTS + USD_ASCII_EXTS)),
"USD - Crate (*.{})".format(" *.".join(USD_AMBIGUOUS_EXTS + USD_CRATE_EXTS)),
"USD - Zip (*.{})".format(" *.".join(USD_ZIP_EXTS)),
"All Files (*)"
)
# Format of the currently active file. Also, the index in the file filter list for that type.
# Used for things such as differentiating between file types when using the generic .usd extension.
FILE_FORMAT_USD = 0 # Generic USD file (usda or usdc)
FILE_FORMAT_USDA = 1 # ASCII USD file
FILE_FORMAT_USDC = 2 # Binary USD crate file
FILE_FORMAT_USDZ = 3 # Zip-compressed USD package
FILE_FORMAT_NONE = 4 # Generic text file
# Default template for display files with links.
# When dark theme is enabled, this is overridden in __init__.py.
HTML_BODY = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><style type="text/css">
a.mayNotExist {{color:#C90}}
a.binary {{color:#69F}}
.badLink {{color:red}}
</style></head><body style="white-space:pre">{}</body></html>"""
# Set a length limit on parsing for links and syntax highlighting on long lines. 999 chosen semi-arbitrarily to speed
# up things like crate files with really long timeSamples lines that otherwise lock up the UI.
# TODO: Potentially truncate the display of long lines, too, since it can slow down interactivity of the Qt UI. Maybe make it a [...] link to display the full line again?
LINE_CHAR_LIMIT = 999
# Truncate loading files with more lines than this.
# Display can slow down and/or become unusable with too many lines.
# This number is less important than the total number of characters and can be overridden in Preferences.
LINE_LIMIT = 50000
# Truncate loading files with more total chars than this.
# QString crashes at ~2.1 billion chars, but display slows down way before that.
CHAR_LIMIT = 100000000
# Number of recent files and tabs to remember.
RECENT_FILES = 20
RECENT_TABS = 10
# Shell character escape codes that can be converted for HTML display.
TTY2HTML = (
('[0m', '</span>'),
('\x1b[40m', '<span style="background-color:black">'),
('\x1b[44m', '<span style="background-color:blue">'),
('\x1b[46m', '<span style="background-color:cyan">'),
('\x1b[42m', '<span style="background-color:green">'),
('\x1b[45m', '<span style="background-color:magenta">'),
('\x1b[41m', '<span style="background-color:red">'),
('\x1b[47m', '<span style="background-color:white">'),
('\x1b[43m', '<span style="background-color:yellow">'),
('\x1b[30m', '<span style="font-family:monospace; color:black">'),
('\x1b[34m', '<span style="font-family:monospace; color:#0303ab">'),
('\x1b[36m', '<span style="font-family:monospace; color:cyan">'),
('\x1b[32m', '<span style="font-family:monospace; color:#38bc38">'),
('\x1b[35m', '<span style="font-family:monospace; color:magenta">'),
('\x1b[31m', '<span style="font-family:monospace; color:#aa0000">'),
('\x1b[37m', '<span style="font-family:monospace; color:gray">'),
('\x1b[33m', '<span style="font-family:monospace; color:#bd7d3e">'),
('\x1b[7m', '<span style="color:white; background-color:black">'),
('\x1b[0m', '<span style="color:#38bc38">'),
('\x1b[4m', '<span style="color:#38bc38; text-decoration:underline">'),
('\x1b[1m', '<span style="font-weight:bold">')
)
| 46.824742 | 170 | 0.690004 |
7958dc7614a600a58910dad733ec3b2a18e8a5e3 | 75,166 | py | Python | pyNastran/converters/nastran/gui/results_helper.py | 214929177/pyNastran | 73032d6ffd445ef085c124dde6b5e90a516a5b6a | [
"BSD-3-Clause"
] | null | null | null | pyNastran/converters/nastran/gui/results_helper.py | 214929177/pyNastran | 73032d6ffd445ef085c124dde6b5e90a516a5b6a | [
"BSD-3-Clause"
] | null | null | null | pyNastran/converters/nastran/gui/results_helper.py | 214929177/pyNastran | 73032d6ffd445ef085c124dde6b5e90a516a5b6a | [
"BSD-3-Clause"
] | 1 | 2021-10-14T03:52:44.000Z | 2021-10-14T03:52:44.000Z | """Interface for converting OP2 results to the GUI format"""
# pylint: disable=C1801, C0103
from __future__ import annotations
import os
from collections import defaultdict
from typing import Tuple, Dict, Union, Any, TYPE_CHECKING
import numpy as np
from numpy.linalg import norm # type: ignore
from pyNastran.gui.gui_objects.gui_result import GuiResult, GuiResultIDs
from pyNastran.gui.gui_objects.displacements import (
DisplacementResults, ForceTableResults) #, TransientElementResults
from pyNastran.op2.result_objects.stress_object import (
_get_nastran_header,
get_rod_stress_strain,
get_bar_stress_strain, get_bar100_stress_strain, get_beam_stress_strain,
get_plate_stress_strain, get_solid_stress_strain
)
from pyNastran.gui.gui_objects.gui_result import GridPointForceResult
from .geometry_helper import NastranGuiAttributes
from .stress import (
get_spring_stress_strains, get_rod_stress_strains,
get_bar_stress_strains, get_beam_stress_strains,
get_plate_stress_strains, get_composite_plate_stress_strains,
get_solid_stress_strains)
from .force import get_spring_force, get_bar_force, get_plate_force
if TYPE_CHECKING: # pragma: no cover
from pyNastran.op2.op2 import OP2
from pyNastran.gui.gui_objects.settings import Settings
#from pyNastran.op2.result_objects.design_response import Desvars
GuiResults = Union[GuiResult, GuiResultIDs, GridPointForceResult]
class NastranGuiResults(NastranGuiAttributes):
"""Defines OP2 specific methods NastranIO"""
def __init__(self):
super(NastranGuiResults, self).__init__()
def _fill_grid_point_forces(self, cases, model, key, icase,
form_dict, header_dict, keys_map):
if key not in model.grid_point_forces:
return icase
grid_point_forces = model.grid_point_forces[key]
case = grid_point_forces
if not case.is_real:
#raise RuntimeError(grid_point_forces.is_real)
return icase
subcase_id = key[0]
title = 'Grid Point Forces'
header = 'Grid Point Forces'
nastran_res = GridPointForceResult(subcase_id, header, title, grid_point_forces)
itime = 0
cases[icase] = (nastran_res, (itime, 'Grid Point Forces'))
formii = ('Grid Point Forces', icase, [])
form_dict[(key, itime)].append(formii)
dt = case._times[itime]
header = _get_nastran_header(case, dt, itime)
header_dict[(key, itime)] = header
keys_map[key] = (case.subtitle, case.label,
case.superelement_adaptivity_index, case.pval_step)
icase += 1
return icase
def _fill_op2_oug_oqg(self, cases, model: OP2, key, icase: int,
form_dict, header_dict, keys_map, log) -> int:
"""
loads nodal results bector results (e.g., dispalcements/temperatures)
"""
nnodes = self.nnodes
node_ids = self.node_ids
icase = _fill_nastran_displacements(
cases, model, key, icase,
form_dict, header_dict, keys_map,
self.xyz_cid0,
nnodes, node_ids, log, dim_max=self.gui.settings.dim_max)
icase = _fill_nastran_displacements(
cases, model, key, icase,
form_dict, header_dict, keys_map,
self.xyz_cid0,
nnodes, node_ids, log, dim_max=self.gui.settings.dim_max,
prefix='acoustic',
)
icase = _fill_nastran_temperatures(
cases, model, key, icase,
form_dict, header_dict, keys_map,
nnodes, log)
return icase
def _fill_op2_gpstress(self, cases, model: OP2,
times, key, icase: int,
form_dict, header_dict, keys_map) -> int:
"""Creates the time accurate grid point stress objects"""
if key in model.grid_point_stress_discontinuities:
case = model.grid_point_stress_discontinuities[key]
self.log.warning('skipping grid_point_stress_discontinuities')
if key in model.grid_point_stresses_volume_principal:
case = model.grid_point_stresses_volume_principal[key]
self.log.warning('skipping grid_point_stresses_volume_principal')
icase = _fill_op2_grid_point_surface_stresses(
self.element_ids,
cases, model,
times, key, icase,
form_dict, header_dict, keys_map)
icase = _fill_op2_grid_point_stresses_volume_direct(
self.node_ids,
cases, model,
times, key, icase,
form_dict, header_dict, keys_map)
return icase
def _fill_op2_centroidal_strain_energy(self, cases: Dict[int, GuiResults], model: OP2,
times, key, icase: int,
form_dict, header_dict, keys_map) -> int:
"""Creates the time accurate strain energy objects"""
case = None
# (isubcase, analysis_code, sort_method,
# count, ogs, superelement_adaptivity_index, pval_step) = key ????
subcase_id = key[0]
strain_energy = model.op2_results.strain_energy
strain_energies = [
# results_dict, name, flag of the element being supported
(strain_energy.cquad4_strain_energy, 'CQUAD4', True),
(strain_energy.cquad8_strain_energy, 'CQUAD8', True),
(strain_energy.cquadr_strain_energy, 'CQUADR', True),
(strain_energy.cquadx_strain_energy, 'CQUADX', True),
(strain_energy.ctria3_strain_energy, 'CTRIA3', True),
(strain_energy.ctria6_strain_energy, 'CTRIA6', True),
(strain_energy.ctriar_strain_energy, 'CTRIAR', True),
(strain_energy.ctriax_strain_energy, 'CTRIAX', True),
(strain_energy.ctriax6_strain_energy, 'CTRIAX6', True),
(strain_energy.ctetra_strain_energy, 'CTETRA', True),
(strain_energy.cpenta_strain_energy, 'CPENTA', True),
(strain_energy.chexa_strain_energy, 'CHEXA', True),
(strain_energy.cpyram_strain_energy, 'CPYRAM', True),
(strain_energy.crod_strain_energy, 'CROD', True),
(strain_energy.ctube_strain_energy, 'CTUBE', True),
(strain_energy.conrod_strain_energy, 'CONROD', True),
(strain_energy.cbar_strain_energy, 'CBAR', True),
(strain_energy.cbeam_strain_energy, 'CBEAM', True),
(strain_energy.cgap_strain_energy, 'CGAP', True),
(strain_energy.celas1_strain_energy, 'CELAS1', True),
(strain_energy.celas2_strain_energy, 'CELAS2', True),
(strain_energy.celas3_strain_energy, 'CELAS3', True),
(strain_energy.celas4_strain_energy, 'CELAS4', True),
(strain_energy.cdum8_strain_energy, 'CDUM8', False),
(strain_energy.cbush_strain_energy, 'CBUSH', True),
#(strain_energy.chexa8fd_strain_energy, '', False),
(strain_energy.cbend_strain_energy, 'CBEND', False),
(strain_energy.dmig_strain_energy, 'DMIG', False),
(strain_energy.genel_strain_energy, 'GENEL', False),
(strain_energy.cshear_strain_energy, 'CSHEAR', True),
(strain_energy.conm2_strain_energy, 'CONM2', False),
]
# find the cases that have results for this key
has_strain_energy = [key in res[0] for res in strain_energies]
if not any(has_strain_energy):
return icase
itrue = has_strain_energy.index(True)
unused_ese0 = strain_energies[itrue][0]
#times = ese0._times
#fmt = '%g'
#header = ''
#form0 = ('Element Strain Energy', None, [])
#op2.strain_energy[1]
#type=StrainEnergyObject ntimes=3 nelements=16
#energy, percent, density
#modes = [1, 2, 3]
nelements = self.nelements
eids = self.element_ids
for itime, unused_dt in enumerate(times):
ese = np.full(nelements, np.nan, dtype='float32')
percent = np.full(nelements, np.nan, dtype='float32')
strain_energy_density = np.full(nelements, np.nan, dtype='float32')
for istrain_energy, is_true in enumerate(has_strain_energy):
if not is_true:
continue
resdict, name, unused_flag = strain_energies[istrain_energy]
case = resdict[key]
dt = case._times[itime]
header = _get_nastran_header(case, dt, itime)
header_dict[(key, itime)] = header
keys_map[key] = (case.subtitle, case.label,
case.superelement_adaptivity_index, case.pval_step)
if case.is_complex:
continue
data = case.data
itotals = np.where(case.element[itime, :] == 100000000)[0]
assert len(itotals) == 1, itotals
itotal = itotals[0]
eidsi2 = case.element[itime, :itotal]
# find eids2i in eids
i = np.searchsorted(eids, eidsi2)
#if 0 and name == 'CELAS1': # pragma: no cover
## check that the elements were mapped correctly
#eids_actual = self.element_ids[i]
#for eid in eids_actual:
#element = self.model.elements[eid]
#assert element.type == name, element
#assert np.all(eids_actual == eidsi2)
if len(i) != len(np.unique(i)):
msg = 'Strain Energy i%s=%s is not unique because there are missing elements' % (name, str(i))
model.log.warning(msg)
continue
# verifies the try-except is what we think it is (missing elements)
esei = data[itime, :itotal, 0]
try:
ese[i] = esei
percent[i] = data[itime, :itotal, 1]
strain_energy_density[i] = data[itime, :itotal, 2]
except IndexError:
model.log.warning('error reading Strain Energy')
continue
# helicopter.dat
#CBEAM : 10
#CQUAD4 : 11388
#CROD : 544
#CTRIA3 : 151
# nelements = 12093
if np.any(np.isfinite(ese)):
ese_res = GuiResult(subcase_id, header='Strain Energy: ' + header,
title='Strain Energy', data_format='%.3e',
location='centroid', scalar=ese)
percent_res = GuiResult(subcase_id, header='Percent of Total: '+ header,
title='Percent of Total', data_format='%.3f',
location='centroid', scalar=percent)
cases[icase] = (ese_res, (subcase_id, 'Strain Energy'))
cases[icase + 1] = (percent_res, (subcase_id, 'Percent'))
form_dict[(key, itime)].append(('Strain Energy', icase, []))
form_dict[(key, itime)].append(('Percent', icase + 1, []))
icase += 2
if np.any(np.isfinite(strain_energy_density)):
sed_res = GuiResult(subcase_id, header='Strain Energy Density: ' + header,
title='Strain Energy Density', data_format='%.3e',
location='centroid', scalar=strain_energy_density)
cases[icase] = (sed_res, (subcase_id, 'Strain Energy Density'))
form_dict[(key, itime)].append(('Strain Energy Density', icase, []))
icase += 1
return icase
def _create_op2_time_centroidal_force_arrays(self, model, nelements, key, itime,
header_dict, keys_map):
"""
creates the following force outputs:
- fx, fy, fz, mx, my, mz
- thermal_load
"""
element_ids = self.element_ids
fx = np.full(nelements, np.nan, dtype='float32') # axial
fy = np.full(nelements, np.nan, dtype='float32') # shear_y
fz = np.full(nelements, np.nan, dtype='float32') # shear_z
rx = np.full(nelements, np.nan, dtype='float32') # torque
ry = np.full(nelements, np.nan, dtype='float32') # bending_y
rz = np.full(nelements, np.nan, dtype='float32') # bending_z
is_element_on = np.zeros(nelements, dtype='float32') # torque
unused_fmt = '%g'
header = ''
unused_form0 = ('Force', None, [])
case = None
found_force = False
for res_type in (model.conrod_force, model.crod_force, model.ctube_force):
if key in res_type:
found_force = True
case = res_type[key]
if case.is_complex:
continue
keys_map[key] = (case.subtitle, case.label,
case.superelement_adaptivity_index, case.pval_step)
data = case.data
if case.nonlinear_factor is None:
unused_ntimes = data.shape[:1]
eids = case.element
dt = case._times[itime]
header = _get_nastran_header(case, dt, itime)
header_dict[(key, itime)] = header
#eids_to_find = intersect1d(self.element_ids, eids)
i = np.searchsorted(element_ids, eids)
assert np.array_equal(element_ids[i], eids)
fxi = data[itime, :, 0]
rxi = data[itime, :, 1]
if fxi.size != i.size:
msg = 'fx.size=%s i.size=%s fx=%s eids_to_find=%s' % (
fxi.size, i.size, fxi, eids)
raise RuntimeError(msg)
fx[i] = fxi
rx[i] = rxi
is_element_on[i] = 1.
else:
continue
if key in model.cbar_force:
found_force = True
case = model.cbar_force[key] # type: np.ndarray
if case.element_type == 34:
## CBAR-34
if case.is_real:
eids = case.element
i = np.searchsorted(element_ids, eids)
is_element_on[i] = 1.
dt = case._times[itime]
header = _get_nastran_header(case, dt, itime)
header_dict[(key, itime)] = header
keys_map[key] = (case.subtitle, case.label,
case.superelement_adaptivity_index, case.pval_step)
#[bending_moment_a1, bending_moment_a2, bending_moment_b1, bending_moment_b2,
# shear1, shear2, axial, torque]
#fx[i] = case.data[:, :, 6]
#fy[i] = case.data[:, :, 4]
#fz[i] = case.data[:, :, 5]
if i.size == 1:
rxi = case.data[itime, :, 7].max()
ryi = np.vstack([case.data[itime, :, 0], case.data[itime, :, 2]]).max()
rzi = np.vstack([case.data[itime, :, 1], case.data[itime, :, 3]]).max()
else:
rxi = case.data[itime, :, 7]#.max(axis=0)
ryi = np.vstack([case.data[itime, :, 0], case.data[itime, :, 2]]).max(axis=0)
rzi = np.vstack([case.data[itime, :, 1], case.data[itime, :, 3]]).max(axis=0)
unused_rzv = rzi
# rza = array([case.data[itime, :, 1], case.data[itime, :, 3]])#.max(axis=0)
# rzh = hstack([case.data[itime, :, 1], case.data[itime, :, 3]])#.max(axis=0)
# print(rzv.shape, rzv.shape, rzv.shape)
assert rxi.size == i.size, 'rx.size=%s i.size=%s rx=%s' % (rxi.size, i.size, rxi)
assert ryi.size == i.size, 'ry.size=%s i.size=%s ry=%s' % (ryi.size, i.size, ryi)
assert rzi.size == i.size, 'rz.size=%s i.size=%s rz=%s' % (rzi.size, i.size, rzi)
rx[i] = rxi
ry[i] = ryi
rz[i] = rzi
elif case.element_type == 100:
## CBAR-100
eids = case.element
ueids = np.unique(eids)
dt = case._times[itime]
header = _get_nastran_header(case, dt, itime)
header_dict[(key, itime)] = header
keys_map[key] = (case.subtitle, case.label,
case.superelement_adaptivity_index, case.pval_step)
j = np.searchsorted(self.element_ids, ueids)
di = j[1:-1] - j[0:-2]
if len(di) == 0:
# pload1
self.log_error('Error loading CBAR-100 forces; failed slicing element_ids')
else:
is_element_on[j] = 1.
if di.max() != 2:
#print('di =', np.unique(di))
# [station, bending_moment1, bending_moment2, shear1, shear2, axial, torque]
ii = 0
unused_eid_old = eids[0]
fxi = defaultdict(list)
fyi = defaultdict(list)
fzi = defaultdict(list)
rxi = defaultdict(list)
ryi = defaultdict(list)
rzi = defaultdict(list)
for ii, eid in enumerate(eids):
fxi[eid].append(case.data[:, ii, 5])
fyi[eid].append(case.data[:, ii, 3])
fzi[eid].append(case.data[:, ii, 4])
rxi[eid].append(case.data[:, ii, 6])
ryi[eid].append(case.data[:, ii, 1])
rzi[eid].append(case.data[:, ii, 2])
#if eidi == eid_old:
# fx[ii] = array([case.data[:, j, 5], case.data[:, j, 5]]).max(axis=0)
#else:
for ii, eidi in zip(j, eids[j]):
fx[ii] = max(fxi[eidi])
fy[ii] = max(fyi[eidi])
fz[ii] = max(fyi[eidi])
rx[ii] = max(rxi[eidi])
ry[ii] = max(ryi[eidi])
rz[ii] = max(rzi[eidi])
else:
# [station, bending_moment1, bending_moment2, shear1, shear2, axial, torque]
neids = len(np.unique(eids)) * 2
if len(eids) != len(np.unique(eids)) * 2:
msg = 'CBAR-100 Error: len(eids)=%s neids=%s' % (len(eids), neids)
raise RuntimeError(msg)
fx[i] = np.array(
[case.data[itime, ::-1, 5],
case.data[itime, 1::-1, 5]]).max(axis=0)
fy[i] = np.array(
[case.data[itime, ::-1, 3],
case.data[itime, 1::-1, 3]]).max(axis=0)
fz[i] = np.array(
[case.data[itime, ::-1, 4],
case.data[itime, 1::-1, 4]]).max(axis=0)
rx[i] = np.array(
[case.data[itime, ::-1, 6],
case.data[itime, 1::-1, 6]]).max(axis=0)
ry[i] = np.array(
[case.data[itime, ::-1, 1],
case.data[itime, 1::-1, 1]]).max(axis=0)
rz[i] = np.array(
[case.data[itime, ::-1, 2],
case.data[itime, 1::-1, 2]]).max(axis=0)
else:
raise NotImplementedError(case)
return found_force, fx, fy, fz, rx, ry, rz, is_element_on
def _fill_op2_time_centroidal_force(self, cases, model: OP2,
key: Tuple[Any, int], icase: int, itime: int,
form_dict: Dict[Any, Any],
#form_dict: Dict[Tuple[Any, int], Any],
header_dict: Dict[Any, Any],
keys_map: Dict[Any, Any]) -> int:
"""
Creates the time accurate force objects
"""
nelements = self.nelements
out = self._create_op2_time_centroidal_force_arrays(
model, nelements, key, itime, header_dict, keys_map)
found_force, fx, fy, fz, rx, ry, rz, is_element_on = out
#new_cases = True
subcase_id = key[2]
if found_force:
fmt = '%.4f'
# header = _get_nastran_header(case, dt, itime)
#num_on = nelements
num_off = 0
if itime == 0 and is_element_on.min() == 0.0:
icase = self.save_filtered_forces(key, itime, icase, is_element_on,
subcase_id, cases, form_dict)
is_fx = np.any(np.isfinite(fx)) and np.nanmin(fx) != np.nanmax(fx)
is_fy = np.any(np.isfinite(fy)) and np.nanmin(fy) != np.nanmax(fy)
is_fz = np.any(np.isfinite(fz)) and np.nanmin(fz) != np.nanmax(fz)
is_rx = np.any(np.isfinite(rx)) and np.nanmin(rx) != np.nanmax(rx)
#is_ry = np.any(np.isfinite(ry)) and np.nanmin(ry) != np.nanmax(ry)
#is_rz = np.any(np.isfinite(rz)) and np.nanmin(rz) != np.nanmax(rz)
if is_fx or is_rx and not num_off == nelements:
# header = _get_nastran_header(case, dt, itime)
header = header_dict[(key, itime)]
if is_fx:
fx_res = GuiResult(subcase_id, header=f'Axial: {header}', title='Axial',
location='centroid', scalar=fx)
form_dict[(key, itime)].append(('Axial', icase, []))
cases[icase] = (fx_res, (subcase_id, 'Axial'))
icase += 1
if is_fy:
fy_res = GuiResult(subcase_id, header=f'ShearY: {header}', title='ShearY',
location='centroid', scalar=fy)
form_dict[(key, itime)].append(('ShearY', icase, []))
cases[icase] = (fy_res, (subcase_id, 'ShearY'))
icase += 1
if is_fz:
fz_res = GuiResult(subcase_id, header=f'ShearZ: {header}', title='ShearZ',
location='centroid', scalar=fz)
form_dict[(key, itime)].append(('ShearZ', icase, []))
cases[icase + 2] = (fz_res, (subcase_id, 'ShearZ'))
icase += 1
if is_rx:
mx_res = GuiResult(subcase_id, header=f'Torsion: {header}', title='Torsion',
location='centroid', scalar=rx)
my_res = GuiResult(subcase_id, header=f'BendingY: {header}', title='BendingY',
location='centroid', scalar=ry)
mz_res = GuiResult(subcase_id, header=f'BendingZ: {header}', title='BendingZ',
location='centroid', scalar=rz)
form_dict[(key, itime)].append(('Torsion', icase, []))
form_dict[(key, itime)].append(('BendingY', icase + 1, []))
form_dict[(key, itime)].append(('BendingZ', icase + 2, []))
cases[icase] = (mx_res, (subcase_id, 'Torsion'))
cases[icase + 1] = (my_res, (subcase_id, 'BendingY'))
cases[icase + 2] = (mz_res, (subcase_id, 'BendingZ'))
icase += 3
is_axial = np.full(nelements, -1, dtype='int8')
is_shear_y = np.full(nelements, -1, dtype='int8')
is_shear_z = np.full(nelements, -1, dtype='int8')
is_torsion = np.full(nelements, -1, dtype='int8')
is_bending_y = np.full(nelements, -1, dtype='int8')
is_bending_z = np.full(nelements, -1, dtype='int8')
arrays = [
(is_axial, fx), (is_shear_y, fy), (is_shear_z, fz),
(is_torsion, rx), (is_bending_y, ry), (is_bending_z, rz),
]
for is_array, force in arrays:
iany = np.where(is_element_on)
iwhere = np.where(np.abs(force) > 0.0)[0]
is_array[iany] = 0
is_array[iwhere] = 1
#is_axial[np.where(np.abs(fx) > 0.0)[0]] = 1
#is_shear_y[np.where(np.abs(fy) > 0.0)[0]] = 1
#is_shear_z[np.where(np.abs(fz) > 0.0)[0]] = 1
#is_torsion[np.where(np.abs(rx) > 0.0)[0]] = 1
#is_bending_y[np.where(np.abs(ry) > 0.0)[0]] = 1
#is_bending_z[np.where(np.abs(rz) > 0.0)[0]] = 1
#is_bending[where(abs(rx) > 0.0)[0]] = 1
is_fx_res = GuiResult(subcase_id, header='IsAxial', title='IsAxial',
location='centroid', scalar=is_axial, data_format=fmt,
mask_value=-1)
is_fy_res = GuiResult(subcase_id, header='IsShearY', title='IsShearY',
location='centroid', scalar=is_shear_y, data_format=fmt,
mask_value=-1)
is_fz_res = GuiResult(subcase_id, header='IsShearZ', title='IsShearZ',
location='centroid', scalar=is_shear_z, data_format=fmt,
mask_value=-1)
is_mx_res = GuiResult(subcase_id, header='IsTorsion', title='IsTorsion',
location='centroid', scalar=is_torsion, data_format=fmt,
mask_value=-1)
is_my_res = GuiResult(subcase_id, header='IsBendingY', title='IsBendingY',
location='centroid', scalar=is_bending_y, data_format=fmt,
mask_value=-1)
is_mz_res = GuiResult(subcase_id, header='IsBendingZ', title='IsBendingZ',
location='centroid', scalar=is_bending_z, data_format=fmt,
mask_value=-1)
cases[icase] = (is_fx_res, (subcase_id, 'IsAxial'))
cases[icase + 1] = (is_fy_res, (subcase_id, 'IsShearY'))
cases[icase + 2] = (is_fz_res, (subcase_id, 'IsShearZ'))
cases[icase + 3] = (is_mx_res, (subcase_id, 'IsTorsion'))
cases[icase + 4] = (is_my_res, (subcase_id, 'IsBendingY'))
cases[icase + 5] = (is_mz_res, (subcase_id, 'IsBendingZ'))
form_dict[(key, itime)].append(('IsAxial', icase, []))
form_dict[(key, itime)].append(('IsShearY', icase + 1, []))
form_dict[(key, itime)].append(('IsShearZ', icase + 2, []))
form_dict[(key, itime)].append(('IsTorsion', icase + 3, []))
form_dict[(key, itime)].append(('IsBendingY', icase + 4, []))
form_dict[(key, itime)].append(('IsBendingZ', icase + 5, []))
icase += 6
return icase
def save_filtered_forces(self, key, itime, icase, is_element_on, subcase_id, cases, form_dict):
ioff = np.where(is_element_on == 0)[0]
num_off = len(ioff)
eids_off = []
for eid in self.element_ids[ioff]:
element = self.model.elements[eid]
if element.type not in ['CTRIA3', 'CQUAD4', 'CHEXA', 'CPENTA', 'CTETRA',
'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4', 'CSHEAR',
'CQUADR', 'CTRIAR', 'CQUAD8', 'CTRIA6', 'CVISC',
'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CTUBE',
'CONROD', 'CROD']:
eids_off.append(eid)
for eid in eids_off[:20]:
element = self.model.elements[eid]
print(element.rstrip())
if eids_off:
print('force_eids_off = %s; n=%s' % (eids_off, num_off))
self.log_error('force_eids_off = %s; n=%s' % (eids_off, num_off))
force_on_res = GuiResult(subcase_id, header='Force - IsElementOn',
title='Force\nIsElementOn',
location='centroid', scalar=is_element_on)
cases[icase] = (force_on_res, (subcase_id, 'Force\nIsElementOn'))
form_dict[(key, itime)].append(('Force - IsElementOn', icase, []))
#num_on -= num_off
icase += 1
return icase
def _fill_op2_time_centroidal_composite_stress(self, cases, model, key, icase: int, itime: int,
form_dict: Dict[Any, Any],
header_dict: Dict[Any, Any],
keys_map: Dict[Any, Any],
is_stress: int=True) -> int:
nelements = self.nelements
#oxx = np.full(nelements, np.nan, dtype='float32')
#oyy = np.full(nelements, np.nan, dtype='float32')
#txy = np.full(nelements, np.nan, dtype='float32')
#tyz = np.full(nelements, np.nan, dtype='float32')
#txz = np.full(nelements, np.nan, dtype='float32')
#max_principal = np.full(nelements, np.nan, dtype='float32') # max
#min_principal = np.full(nelements, np.nan, dtype='float32') # min
#ovm = np.full(nelements, np.nan, dtype='float32')
if is_stress:
stress_obj = self.stress[key]
word = 'Stress'
fmt = '%.3f'
else:
stress_obj = self.strain[key]
word = 'Strain'
fmt = '%.4e'
vm_word = None
if len(stress_obj.composite_data_dict):
print(stress_obj)
out = stress_obj.set_composite_stress_by_layer(
key, itime, nelements, header_dict,
)
vm_word, element_ids, oxx, oyy, txy, tyz, txz, max_principal, min_principal, ovm = out
if vm_word is None:
return icase
#form0 = (word, None, [])
#unused_formis = form0[2]
subcase_id = key[2]
if np.any(np.isfinite(oxx)):
header = header_dict[(key, itime)]
oxx_res = GuiResultIDs(subcase_id, header=word + f'XX: {header}', title=word + 'XX',
location='centroid',
ids=element_ids, scalar=oxx, data_format=fmt)
cases[icase] = (oxx_res, (subcase_id, word + 'XX'))
form_dict[(key, itime)].append((word + 'XX', icase, []))
icase += 1
return icase
def _fill_op2_centroidal_stress(self, cases, model, times, key, icase_old,
form_dict, header_dict, keys_map) -> int:
"""Creates the time accurate stress objects"""
icase = icase_old
settings = self.settings # type: Settings
if settings.nastran_stress:
for itime, unused_dt in enumerate(times):
# shell stress
try:
icase = self._fill_op2_time_centroidal_stress(
cases, model, key, icase_old, itime, form_dict, header_dict, keys_map,
is_stress=True)
except IndexError:
self.log.error('problem getting stress...')
break
if icase == icase_old:
return icase
#self.settings.nastran_plate_stress
eids = self.element_ids
if settings.nastran_plate_stress:
icase = get_plate_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map, is_stress=True)
icase = get_plate_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map, is_stress=True,
prefix='modal_contribution',
)
if settings.nastran_composite_plate_stress:
icase = get_composite_plate_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map,
self.stress[key].composite_data_dict, self.log, is_stress=True)
if settings.nastran_rod_stress:
icase = get_rod_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map, is_stress=True)
if settings.nastran_bar_stress:
icase = get_bar_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map, is_stress=True)
if settings.nastran_beam_stress:
icase = get_beam_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map, is_stress=True)
icase = get_solid_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map, is_stress=True)
icase = get_spring_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map, is_stress=True)
return icase
def _fill_op2_centroidal_force(self, cases, model, times, key, icase,
force_dict, header_dict, keys_map) -> int:
"""Creates the time accurate force objects"""
settings = self.settings # type: Settings
if settings.nastran_force:
for itime, unused_dt in enumerate(times):
try:
icase = self._fill_op2_time_centroidal_force(
cases, model, key, icase, itime,
force_dict, header_dict, keys_map)
except IndexError:
self.log.error('problem getting force...')
break
eids = self.element_ids
if settings.nastran_bar_force:
icase = get_bar_force(
eids, cases, model, times, key, icase,
force_dict, header_dict, keys_map)
if settings.nastran_beam_force:
#icase = get_beam_force(
#eids, cases, model, times, key, icase,
#force_dict, header_dict, keys_map)
if key in model.cbeam_force:
model.log.warning('skipping nastran beam force')
if settings.nastran_plate_force:
icase = get_plate_force(
eids, cases, model, times, key, icase,
force_dict, header_dict, keys_map)
#if key in model.ctria3_force or key in model.cquad4_force:
#model.log.warning('skipping nastran plate force')
if settings.nastran_spring_force:
icase = get_spring_force(
eids, cases, model, times, key, icase,
force_dict, header_dict, keys_map)
#if any([key in force for force in
#[model.celas1_force, model.celas2_force,
#model.celas3_force, model.celas4_force]]):
#model.log.warning('skipping nastran spring force')
if settings.nastran_cbush_force:
if key in model.cbush_force:
model.log.warning('skipping nastran bush force')
#if key in model.bush1d_force:
#model.log.warning('skipping nastran bush1d force')
if settings.nastran_gap_force:
if key in model.cgap_force:
model.log.warning('skipping nastran gap force')
return icase
def _fill_op2_centroidal_strain(self, cases, model, times, key, icase,
form_dict, header_dict, keys_map) -> int:
"""Creates the time accurate strain objects"""
settings = self.settings # type: Settings
if settings.nastran_strain:
for itime, unused_dt in enumerate(times):
try:
icase = self._fill_op2_time_centroidal_stress(
cases, model, key, icase, itime, form_dict, header_dict, keys_map,
is_stress=False)
except IndexError:
self.log.error('problem getting strain...')
break
eids = self.element_ids
if settings.nastran_composite_plate_strain:
icase = get_plate_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map, is_stress=False)
icase = get_plate_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map, is_stress=False,
prefix='modal_contribution',
)
if settings.nastran_composite_plate_strain:
icase = get_composite_plate_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map,
self.strain[key].composite_data_dict, self.log, is_stress=False)
if settings.nastran_rod_strain:
icase = get_rod_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map, is_stress=False)
if settings.nastran_bar_strain:
icase = get_bar_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map, is_stress=False)
if settings.nastran_beam_strain:
icase = get_beam_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map, is_stress=False)
icase = get_solid_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map, is_stress=False)
icase = get_spring_stress_strains(
eids, cases, model, times, key, icase,
form_dict, header_dict, keys_map, is_stress=False)
return icase
def _fill_op2_time_centroidal_stress(self, cases, model: OP2,
key, icase: int, itime: int,
form_dict: Dict[Any, Any],
header_dict: Dict[Any, Any],
keys_map: Dict[Any, Any],
is_stress=True) -> int:
"""Creates the time accurate stress objects"""
#new_cases = True
#assert isinstance(subcase_id, int), type(subcase_id)
assert isinstance(icase, int), icase
#assert isinstance(itime, int), type(itime)
assert is_stress in [True, False], is_stress
eids = self.element_ids
assert len(eids) > 0, eids
nelements = self.nelements
is_element_on = np.zeros(nelements, dtype='int8') # is the element supported
oxx = np.full(nelements, np.nan, dtype='float32')
oyy = np.full(nelements, np.nan, dtype='float32')
ozz = np.full(nelements, np.nan, dtype='float32')
txy = np.full(nelements, np.nan, dtype='float32')
tyz = np.full(nelements, np.nan, dtype='float32')
txz = np.full(nelements, np.nan, dtype='float32')
max_principal = np.full(nelements, np.nan, dtype='float32') # max
mid_principal = np.full(nelements, np.nan, dtype='float32') # mid
min_principal = np.full(nelements, np.nan, dtype='float32') # min
#max_shear = np.full(nelements, np.nan, dtype='float32')
ovm = np.full(nelements, np.nan, dtype='float32')
vm_word = None
#-------------------------------------------------------------
#vm_word = get_spring_stress_strain(
#model, key, is_stress, vm_word, itime,
#oxx, txy,
#max_principal, min_principal, ovm, is_element_on,
#eids, header_dict, keys_map)
#-------------------------------------------------------------
vm_word = get_rod_stress_strain(
model, key, is_stress, vm_word, itime,
oxx, txy,
max_principal, min_principal, ovm, is_element_on,
eids, header_dict, keys_map)
vm_word = get_bar_stress_strain(
model, key, is_stress, vm_word, itime,
oxx,
max_principal, min_principal, ovm, is_element_on,
eids, header_dict, keys_map)
vm_word = get_bar100_stress_strain(
model, key, is_stress, vm_word, itime,
oxx,
max_principal, min_principal, ovm, is_element_on,
eids, header_dict, keys_map)
vm_word = get_beam_stress_strain(
model, key, is_stress, vm_word, itime,
oxx,
max_principal, min_principal, ovm, is_element_on,
header_dict, keys_map, self.eid_map)
#-------------------------------------------------------------
vm_word = get_plate_stress_strain(
model, key, is_stress, vm_word, itime,
oxx, oyy, txy, max_principal, min_principal, ovm, is_element_on,
eids, header_dict, keys_map)
#vm_word = get_shear_stress_strain(
#model, key, is_stress, vm_word, itime,
#oxx, txy,
#max_principal, min_principal, ovm, is_element_on,
#eids, header_dict, keys_map)
if is_stress:
stress_obj = self.stress[key]
else:
stress_obj = self.strain[key]
if len(stress_obj.composite_data_dict):
str(stress_obj)
vm_word = stress_obj.set_composite_stress_old(
key, itime, oxx, oyy, txy, tyz, txz,
max_principal, min_principal, ovm,
is_element_on, header_dict,
)
vm_word = get_solid_stress_strain(
model, key, is_stress, vm_word, itime,
oxx, oyy, ozz, txy, tyz, txz,
max_principal, mid_principal, min_principal, ovm, is_element_on,
eids, header_dict, keys_map)
if is_stress:
word = 'Stress'
fmt = '%.3f'
else:
word = 'Strain'
fmt = '%.4e'
# a form is the table of output...
# Subcase 1 <--- formi - form_isubcase
# Time 1
# Stress <--- form0 - the root level
# oxx <--- formis - form_itime_stress
# oyy
# ozz
if vm_word is None:
#print('vm_word is None')
return icase
form0 = (word, None, [])
unused_formis = form0[2]
subcase_id = key[2]
header = header_dict[(key, itime)]
formi = []
form_dict[(key, itime)].append(('Combined ' + word, None, formi))
if is_stress and itime == 0:
if is_element_on.min() == 0: # if all elements aren't on
print_empty_elements(self.model, eids, is_element_on, self.log_error)
is_element_on = np.isfinite(oxx)
is_element_on = is_element_on.astype('|i1')
stress_res = GuiResult(
subcase_id, header=f'Stress - isElementOn: {header}', title='Stress\nisElementOn',
location='centroid', scalar=is_element_on, mask_value=0, data_format=fmt)
cases[icase] = (stress_res, (subcase_id, 'Stress - isElementOn'))
formi.append(('Stress - IsElementOn', icase, []))
icase += 1
#print('max/min', max_principal.max(), max_principal.min())
# header = _get_nastran_header(case, dt, itime)
if np.any(np.isfinite(oxx)):
oxx_res = GuiResult(subcase_id, header=word + f'XX: {header}', title=word + 'XX',
location='centroid', scalar=oxx, data_format=fmt)
cases[icase] = (oxx_res, (subcase_id, word + 'XX'))
formi.append((word + 'XX', icase, []))
icase += 1
if np.any(np.isfinite(oyy)):
oyy_res = GuiResult(subcase_id, header=word + f'YY: {header}', title=word + 'YY',
location='centroid', scalar=oyy, data_format=fmt)
cases[icase] = (oyy_res, (subcase_id, word + 'YY'))
formi.append((word + 'YY', icase, []))
icase += 1
if np.any(np.isfinite(ozz)):
ozz_res = GuiResult(subcase_id, header=word + f'ZZ: {header}', title=word + 'ZZ',
location='centroid', scalar=ozz, data_format=fmt)
cases[icase] = (ozz_res, (subcase_id, word + 'ZZ'))
formi.append((word + 'ZZ', icase, []))
icase += 1
if np.any(np.isfinite(txy)):
oxy_res = GuiResult(subcase_id, header=word + f'XY: {header}', title=word + 'XY',
location='centroid', scalar=txy, data_format=fmt)
cases[icase] = (oxy_res, (subcase_id, word + 'XY'))
formi.append((word + 'XY', icase, []))
icase += 1
if np.any(np.isfinite(tyz)):
oyz_res = GuiResult(subcase_id, header=word + f'YZ: {header}', title=word + 'YZ',
location='centroid', scalar=tyz, data_format=fmt)
cases[icase] = (oyz_res, (subcase_id, word + 'YZ'))
formi.append((word + 'YZ', icase, []))
icase += 1
if np.any(np.isfinite(txz)):
oxz_res = GuiResult(subcase_id, header=word + f'XZ: {header}', title=word + 'XZ',
location='centroid', scalar=txz, data_format=fmt)
cases[icase] = (oxz_res, (subcase_id, word + 'XZ'))
formi.append((word + 'XZ', icase, []))
icase += 1
if np.any(np.isfinite(max_principal)):
maxp_res = GuiResult(subcase_id, header=f'MaxPrincipal: {header}', title='MaxPrincipal',
location='centroid', scalar=max_principal, data_format=fmt)
cases[icase] = (maxp_res, (subcase_id, 'MaxPrincipal'))
formi.append(('Max Principal', icase, []))
icase += 1
if np.any(np.isfinite(mid_principal)):
midp_res = GuiResult(subcase_id, header=f'MidPrincipal: {header}', title='MidPrincipal',
location='centroid', scalar=mid_principal, data_format=fmt)
cases[icase] = (midp_res, (subcase_id, 'MidPrincipal'))
formi.append(('Mid Principal', icase, []))
icase += 1
if np.any(np.isfinite(min_principal)):
minp_res = GuiResult(subcase_id, header=f'MinPrincipal: {header}', title='MinPrincipal',
location='centroid', scalar=min_principal, data_format=fmt)
cases[icase] = (minp_res, (subcase_id, 'MinPrincipal'))
formi.append(('Min Principal', icase, []))
icase += 1
if vm_word is not None:
ovm_res = GuiResult(subcase_id, header=f'{vm_word}: {header}', title=vm_word,
location='centroid', scalar=ovm, data_format=fmt)
cases[icase] = (ovm_res, (subcase_id, vm_word))
formi.append((vm_word, icase, []))
icase += 1
#, case, header, form0
return icase
def fill_responses(cases, model: OP2, icase):
"""adds the optimization responses"""
form_optimization = []
#fractional_mass_response = model.op2_results.responses.fractional_mass_response
#if fractional_mass_response is not None:
#print(fractional_mass_response)
des_filename = model.des_filename
if os.path.exists(des_filename):
des_desvars = read_des_filename(des_filename)
if des_desvars:
subcase_id = 0
#eids = des_desvars['eids']
fractional_mass = des_desvars['fractional_mass']
minp_res = GuiResult(subcase_id, header='Fractional Mass', title='% Mass',
location='centroid', scalar=fractional_mass, ) # data_format=fmt
cases[icase] = (minp_res, (subcase_id, 'Fractional Mass'))
form_optimization.append(('Fractional Mass', icase, []))
icase += 1
#f06_filename = model.f06_filename
#print('f06_filename =', f06_filename)
#from pyNastran.f06.dev.read_sol_200 import read_sol_200
#read_sol_200(f06_filename)
#desvars = model.op2_results.responses.desvars # type: Desvars
#if desvars is not None:
#itop = np.where(desvars.label == 'TOPVAR')[0]
#if len(itop):
#print(desvars)
#print('itop =', itop)
#asdf
#form_optimization.append(('TOPVAR', icase, []))
#minp_res = GuiResult(subcase_id, header=f'MinPrincipal: {header}', title='MinPrincipal',
#location='centroid', scalar=min_principal, data_format=fmt)
#cases[icase] = (minp_res, (subcase_id, 'MinPrincipal'))
#desvars.internal_id = np.zeros(ndesvars, dtype='int32')
#desvars.desvar_id = np.zeros(ndesvars, dtype='int32')
#desvars.label = np.zeros(ndesvars, dtype='|U8')
#desvars.lower = np.zeros(ndesvars, dtype='float32')
#desvars.upper = np.zeros(ndesvars, dtype='float32')
#desvars.delxv = np.zeros(ndesvars, dtype='float32')
#desvars.dunno = np.zeros(ndesvars, dtype='float32')
return icase, form_optimization
def _fill_nastran_displacements(cases, model: OP2, key, icase: int,
form_dict, header_dict, keys_map,
xyz_cid0,
nnodes: int, node_ids, log, dim_max: float=1.0,
prefix: str='') -> int:
"""
loads the nodal dispalcements/velocity/acceleration/eigenvector/spc/mpc forces
"""
if prefix == 'acoustic':
results = model.op2_results.acoustic
displacement_like = [
(results.displacements, 'Acoustic Displacement', True),
]
elif prefix == '':
displacement_like = [
# slot, name, deflects
# TODO: what is a velocity/acceleration?
# is it a fringe, displacement, force?
(model.displacements, 'Displacement', True),
(model.velocities, 'Velocity', False),
(model.accelerations, 'Acceleration', False),
(model.eigenvectors, 'Eigenvectors', True),
(model.spc_forces, 'SPC Forces', False),
(model.mpc_forces, 'MPC Forces', False),
(model.contact_forces, 'Contact Forces', False),
(model.glue_forces, 'Glue Forces', False),
(model.load_vectors, 'LoadVectors', False),
(model.applied_loads, 'AppliedLoads', False),
(model.force_vectors, 'ForceVectors', False),
]
else: # pragma: no cover
raise NotImplementedError(prefix)
for (result, name, deflects) in displacement_like:
if key not in result:
continue
for t123_offset in [0, 3]:
#if t123_offset == 3:
#continue
try:
icase = _fill_nastran_ith_displacement(
result, name, deflects, t123_offset,
cases, model, key, icase,
form_dict, header_dict, keys_map,
xyz_cid0,
nnodes, node_ids, log, dim_max=dim_max)
except ValueError:
if not t123_offset == 3:
raise
log.error('skipping %s result; t123_offset=%s; type=%s' % (
name, t123_offset, result[key].__class__.__name__))
return icase
def _fill_nastran_ith_displacement(result, name: str, deflects: bool, t123_offset,
cases, model: OP2, key, icase: int,
form_dict: Dict[Tuple[Any, Any], str],
header_dict: Dict[Tuple[Any, Any], str],
keys_map: Dict[str, Any],
xyz_cid0,
nnodes: int, node_ids, log, dim_max: float=1.0) -> int:
"""helper for ``_fill_nastran_displacements`` to unindent the code a bit"""
if t123_offset == 0:
title1 = name + ' T_XYZ'
else:
assert t123_offset == 3, t123_offset
title1 = name + ' R_XYZ'
#title2 = name + ' R_XYZ'
case = result[key]
subcase_idi = case.isubcase
if not hasattr(case, 'data'):
print('str(%s) has no data...' % case.__class.__name__)
return icase
if not case.is_sort1:
log.warning('Skipping because SORT2\n' + str(case))
return icase
t123, tnorm, ntimes = _get_t123_tnorm(case, node_ids, nnodes,
t123_offset=t123_offset)
titles = []
scales = []
headers = []
#if deflects:
if deflects:
nastran_res = DisplacementResults(subcase_idi, titles, headers,
xyz_cid0, t123, tnorm,
scales,
uname=name)
#dmax = []
for itime in range(ntimes):
dt = case._times[itime]
#if name == 'Displacement':
# (6673, )
#normiii = np.linalg.norm(t123[itime, :, :], axis=1)
#print(normiii.shape)
#print('Displacement; itime=%s time=%s tnorm=%s' % (
#itime, dt, normiii.max()))
#dmax.append(normiii.max())
tnorm_abs_max = get_tnorm_abs_max(case, t123, tnorm, itime)
# mode = 2; freq = 75.9575 Hz
header = _get_nastran_header(case, dt, itime)
header_dict[(key, itime)] = header
keys_map[key] = (case.subtitle, case.label,
case.superelement_adaptivity_index, case.pval_step)
#if tnorm_abs_max == 0.0:
#scale = self.displacement_scale_factor
#else:
#scale = self.displacement_scale_factor / tnorm_abs_max
scale = dim_max
if tnorm_abs_max > 0.0:
scale = dim_max / tnorm_abs_max * 0.10
scales.append(scale)
titles.append(title1)
headers.append(f'{title1}: {header}')
cases[icase] = (nastran_res, (itime, title1)) # do I keep this???
formii = (title1, icase, [])
form_dict[(key, itime)].append(formii)
icase += 1
#if name == 'Displacement':
# Displacement; itime=361 time=3.61 tnorm=1.46723
#print('dmax = ', max(dmax))
#pass
nastran_res.save_defaults()
else:
nastran_res = ForceTableResults(subcase_idi, titles, headers,
t123, tnorm,
scales, #deflects=deflects,
uname=name)
for itime in range(ntimes):
dt = case._times[itime]
header = _get_nastran_header(case, dt, itime)
header_dict[(key, itime)] = header
keys_map[key] = (case.subtitle, case.label,
case.superelement_adaptivity_index, case.pval_step)
#tnorm_abs_max = get_tnorm_abs_max(case, t123, tnorm, itime)
#tnorm_abs_max = tnorm.max()
scale = 1.
scales.append(scale)
titles.append(title1)
headers.append(f'{title1}: {header}')
cases[icase] = (nastran_res, (itime, title1)) # do I keep this???
formii = (title1, icase, [])
form_dict[(key, itime)].append(formii)
icase += 1
nastran_res.save_defaults()
return icase
def _fill_nastran_temperatures(cases, model: OP2, key, icase: int,
form_dict, header_dict, keys_map, nnodes: int, log) -> int:
"""loads the nodal temperatures"""
#nids = self.node_ids
temperature_like = [
(model.temperatures, 'Temperature'),
]
for (result, name) in temperature_like:
if key not in result:
continue
case = result[key]
subcase_idi = case.isubcase
if not hasattr(case, 'data'):
continue
if not case.is_sort1:
log.warning('Skipping because SORT2\n' + str(case))
continue
assert case.is_sort1, case.is_sort1
ntimes = case.ntimes
for itime in range(ntimes):
dt = case._times[itime]
header = _get_nastran_header(case, dt, itime)
header_dict[(key, itime)] = header
keys_map[key] = (case.subtitle, case.label,
case.superelement_adaptivity_index, case.pval_step)
loads = case.data[itime, :, :]
nxyz = norm(loads[:, :3], axis=1)
assert len(nxyz) == nnodes, 'len(nxyz)=%s nnodes=%s' % (
len(nxyz), nnodes)
temp_res = GuiResult(subcase_idi, header=f'{name}: {header}', title=name,
location='node', scalar=loads[:, 0])
cases[icase] = (temp_res, (0, name))
form_dict[(key, itime)].append((name, icase, []))
icase += 1
return icase
def print_empty_elements(model, element_ids, is_element_on, log_error):
"""prints the first 20 elements that aren't supportedas part of the stress results"""
ioff = np.where(is_element_on == 0)[0]
eids_off = []
for eid in element_ids[ioff]:
element = model.elements[eid]
if element.type not in ['CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CVISC']:
eids_off.append(eid)
print('stress_eids_off = %s' % np.array(element_ids[ioff]))
log_error('stress_eids_off = %s' % element_ids[ioff])
for eid in eids_off[:20]:
element = model.elements[eid]
print(element.rstrip())
print('-----------------------------------')
def _get_t123_tnorm(case, nids, nnodes: int, t123_offset: int=0):
"""
helper method for _fill_op2_oug_oqg
Parameters
----------
case : DisplacementArray, ForceArray, etc.
the OP2 result object???
nids : (nnodes,) int ndarray
the nodes in the model???
nnodes : int
the number of nodes in the model???
t123_offset : int; default=0
0 : translations / forces
3 : rotations / moments
Returns
-------
t123 : (ntimes, nnodes, 3) float ndarray
the translations or rotations
tnorm : (ntimes, 3) float ndarray
???
ntimes : int
number of times
"""
assert case.is_sort1, case.is_sort1
itime0 = 0
t1 = case.data[itime0, :, 0]
ndata = t1.shape[0]
if nnodes != ndata:
#print('nnodes=%s ndata=%s' % (nnodes, ndata))
nidsi = case.node_gridtype[:, 0]
#assert len(nidsi) == nnodes, 'nidsi=%s nnodes=%s' % (nidsi, nnodes)
j = np.searchsorted(nids, nidsi) # searching for nidsi
try:
if not np.allclose(nids[j], nidsi):
msg = 'nids[j]=%s nidsi=%s' % (nids[j], nidsi)
raise RuntimeError(msg)
except IndexError:
msg = 'node_ids = %s\n' % list(nids)
msg += 'nidsi in disp = %s\n' % list(nidsi)
raise IndexError(msg)
# (itime, nnodes, xyz)
# (901, 6673, 3)
t123 = case.data[:, :, t123_offset:t123_offset+3]
ntimes = case.ntimes
if nnodes != ndata:
dtype = t123.dtype.name
t123i = np.zeros((ntimes, nnodes, 3), dtype=dtype)
t123i[:, j, :] = t123
t123 = t123i
# (itime, nnodes, xyz)
# tnorm (901, 3)
tnorm = norm(t123, axis=2) # I think this is wrong...
#print('tnorm.shape ', tnorm.shape)
assert len(tnorm) == t123.shape[0]
else:
# (itime, nnodes, xyz)
# tnorm (901, 3)
# float32s are apparently buggy in numpy if you have small numbers
# see models/elements/loadstep_elememnts.op2
try:
tnorm = norm(t123, axis=1)
except FloatingPointError:
dtype_map = {
'float32': 'float64',
'complex64': 'complex128',
}
dtype = dtype_map[t123.dtype.name]
t123 = t123.astype(dtype=dtype)
tnorm = norm(t123, axis=1)
#print('skipping %s' % name)
#print(t123.max(axis=1))
#for itime, ti in enumerate(t123):
#print('itime=%s' % itime)
#print(ti.tolist())
assert len(tnorm) == t123.shape[0]
assert t123.shape[0] == ntimes, 'shape=%s expected=(%s, %s, 3)' % (t123.shape, ntimes, nnodes)
assert t123.shape[1] == nnodes, 'shape=%s expected=(%s, %s, 3)' % (t123.shape, ntimes, nnodes)
return t123, tnorm, ntimes
def _get_times(model, key):
"""
Get the times/frequencies/eigenvalues/loadsteps used on a given
subcase
"""
table_types = model.get_table_types()
is_real = True
is_data = False
is_static = False
times = None
for table_type in table_types:
if not model.has_result(table_type) or table_type.startswith('responses.'):
#model.log.debug('no table_type=%s' % table_type)
continue
table = model.get_result(table_type)
if len(table) == 0:
continue
#print(key, table, type(table))
if key in table:
is_data = True
case = table[key]
#print(case)
is_real = case.is_real
# you're presumably looking here because of a bug
# are you sure the keys are the right length?
#print("is_real=%r nonlinear_factor=%r _times=%s" % (
#is_real, case.nonlinear_factor, case._times))
if case.nonlinear_factor is not None:
times = case._times
is_static = False
else:
is_static = True
times = np.zeros(1, dtype='int32')
#print('times = ', times)
break
#return is_data, is_static, is_real, times
return is_data, is_static, is_real, times
def get_tnorm_abs_max(case, t123, tnorm, itime):
"""
The normalization value is consistent for static, frequency, transient,
and load step cases, but is independent for modal cases.
"""
if case.analysis_code in [1, 5, 6, 10, 11]:
# dependent
# 1-statics
# 5-frequency
# 6-transient
# 10-nonlinear statics
# 11-old nonlinear statics
tnorm_abs_max = tnorm.max()
elif case.analysis_code in [2, 7, 8, 9]:
# independent
# 2-eigenvectors
# 7-pre-buckling
# 8-post-buckling
# 9-complex eigenvalues
tnorm_abs_max = np.linalg.norm(t123[itime, :, :], axis=1).max()
else:
raise NotImplementedError(f'analysis_code={case.analysis_code}\ncase:\n{case}')
return tnorm_abs_max
def read_des_filename(des_filename):
"""
DESIGN CYCLE : 30
1
Topology Optimization Element Density Distribution
Total number of element 3912
1115 0
0.1408992E-01
1116 0
0.1628276E-01
"""
with open(des_filename, 'r') as des_file:
lines = des_file.readlines()
i = 0
word, ncycles_str = lines[0].split(':')
word = word.strip()
assert word == 'DESIGN CYCLE'
unused_ncycles = int(ncycles_str)
i += 3
assert lines[i].startswith('Total number of element'), lines[i]
nelements = int(lines[i].split()[-1])
i += 1
eids = []
fractional_mass = []
for unused_ielement in range(nelements):
#print(lines[i].strip())
eid, zero = lines[i].split()
frac = float(lines[i+1])
assert zero == '0', lines[i].strip()
eids.append(eid)
fractional_mass.append(frac)
i += 2
eids = np.array(eids, dtype='int32')
fractional_mass = np.array(fractional_mass, dtype='float32')
desvars = {
'eids' : eids,
'fractional_mass' : fractional_mass,}
return desvars
def _get_stress_table_types() -> List[str]: # pragma: no cover
"""
Gets the list of Nastran stress objects that the GUI supports
"""
table_types = [
# OES - tCode=5 thermal=0 s_code=0,1 (stress/strain)
# OES - CELAS1/CELAS2/CELAS3/CELAS4 stress
'celas1_stress',
'celas2_stress',
'celas3_stress',
'celas4_stress',
# OES - CELAS1/CELAS2/CELAS3/CELAS4 strain
'celas1_strain',
'celas2_strain',
'celas3_strain',
'celas4_strain',
# OES - isotropic CROD/CONROD/CTUBE stress
'crod_stress',
'conrod_stress',
'ctube_stress',
# OES - isotropic CROD/CONROD/CTUBE strain
'crod_strain',
'conrod_strain',
'ctube_strain',
# OES - isotropic CBAR stress
'cbar_stress',
# OES - isotropic CBAR strain
'cbar_strain',
# OES - isotropic CBEAM stress
'cbeam_stress',
# OES - isotropic CBEAM strain
'cbeam_strain',
# OES - isotropic CTRIA3/CQUAD4 stress
'ctria3_stress',
'cquad4_stress',
# OES - isotropic CTRIA3/CQUAD4 strain
'ctria3_strain',
'cquad4_strain',
# OES - isotropic CTETRA/CHEXA/CPENTA stress
'ctetra_stress',
'chexa_stress',
'cpenta_stress',
# OES - isotropic CTETRA/CHEXA/CPENTA strain
'ctetra_strain',
'chexa_strain',
'cpenta_strain',
# OES - CSHEAR stress
'cshear_stress',
# OES - CSHEAR strain
'cshear_strain',
# OES - CEALS1 224, CELAS3 225
'nonlinear_spring_stress',
# OES - GAPNL 86
'nonlinear_cgap_stress',
# OES - CBUSH 226
'nolinear_cbush_stress',
]
table_types += [
# OES - CTRIAX6
'ctriax_stress',
'ctriax_strain',
'cbush_stress',
'cbush_strain',
'cbush1d_stress_strain',
# OES - nonlinear CROD/CONROD/CTUBE stress
'nonlinear_rod_stress',
'nonlinear_rod_strain',
# OESNLXR - CTRIA3/CQUAD4 stress
'nonlinear_plate_stress',
'nonlinear_plate_strain',
#'hyperelastic_plate_stress',
'hyperelastic_cquad4_strain',
# OES - composite CTRIA3/CQUAD4 stress
'cquad4_composite_stress',
'cquad8_composite_stress',
'ctria3_composite_stress',
'ctria6_composite_stress',
'cquad4_composite_strain',
'cquad8_composite_strain',
'ctria3_composite_strain',
'ctria6_composite_strain',
# OGS1 - grid point stresses
'grid_point_surface_stresses', # tCode=26
'grid_point_volume_stresses', # tCode=27
]
return table_types
def _get_stress_times(model: OP2, isubcase: int) -> Tuple[bool, bool, bool, Any]: # pragma: no cover
"""Are there any stress/strain results?"""
table_types = _get_stress_table_types()
is_real = True
is_data = False
is_static = False
times = None
for table_type in table_types:
if not hasattr(model, table_type):
# print('no table_type=%s' % table_type)
continue
table = getattr(model, table_type)
if isubcase in table:
is_data = True
case = table[isubcase]
is_real = case.is_real
if case.nonlinear_factor is not None:
times = case._times
is_static = False
else:
is_static = True
times = np.zeros(1, dtype='int32')
break
#return is_data, is_static, is_real, times
return is_data, is_static, is_real, times
def _fill_op2_grid_point_surface_stresses(eids_all, cases, model: OP2,
times, key, icase: int,
form_dict, header_dict, keys_map) -> int:
if key not in model.grid_point_surface_stresses:
return icase
#grid_point_surface_stresses[(1, 1, 1, 0, 666, '', '')]
# type=GridPointSurfaceStressesArray nelements=99
# data: [1, nelements, 8] where 8=[nx, ny, txy, angle, majorP, minorP, tmax, ovm]
# node_element.shape = (99, 2)
# location.shape = (99,)
# data.shape = (1, 99, 8)
# sort1
# lsdvmns = [1]
case = model.grid_point_surface_stresses[key]
if case.is_complex:
return icase
#print(case.get_stats())
#eids_all = self.element_ids
nelements = len(eids_all)
keys_map[key] = (case.subtitle, case.label,
case.superelement_adaptivity_index, case.pval_step)
subcase_id = key[0]
eidsi = case.node_element[:, 0]
nidsi = case.node_element[:, 1]
icentroid = np.where(nidsi == 0)[0]
eids_res = eidsi[icentroid]
assert eids_res.min() > 0, eids_res
ueids_res = np.unique(eids_res)
#print('eids_res =', eids_res.tolist(), len(eids_res))
#print('ueids_res=', ueids_res.tolist(), len(ueids_res))
i = np.searchsorted(eids_all, ueids_res)
ui = np.unique(i)
j = np.where(i < len(ui) - 1)[0]
i2 = i[j]
#print('i =', i.tolist(), len(i))
#print('ui =', ui.tolist(), len(ui))
#print('j =', j.tolist(), len(j))
#print('i2 =', i2.tolist(), len(i2))
#ueids_res2 = eids_all[i2]
#ueids_res1 = ueids_res[:len(ui) - 1]
#print('ueids_res1 =', ueids_res1.tolist(), len(ueids_res1))
#print('ueids_res2 =', ueids_res2.tolist(), len(ueids_res2))
#eid_exists = ueids_res1 == ueids_res2
#print("eid_exists =", eid_exists)
#ueids3 = ueids_res1[eid_exists]
#print('ueids3=', ueids3, len(ueids3))
if len(i2) != len(np.unique(i2)):
msg = 'i_gpstress=%s is not unique\n' % str(i2)
#print('eids = %s\n' % str(list(eids)))
#print('eidsi = %s\n' % str(list(eidsi)))
raise RuntimeError(msg)
for itime, unused_dt in enumerate(times):
dt = case._times[itime]
header = _get_nastran_header(case, dt, itime)
header_dict[(key, itime)] = header
# [nx, ny, txy, angle, majorP, minorP, tmax, ovm]
nx = np.full(nelements, np.nan, dtype='float32')
ny = np.full(nelements, np.nan, dtype='float32')
txy = np.full(nelements, np.nan, dtype='float32')
angle = np.full(nelements, np.nan, dtype='float32')
major = np.full(nelements, np.nan, dtype='float32')
minor = np.full(nelements, np.nan, dtype='float32')
tmax = np.full(nelements, np.nan, dtype='float32')
ovm = np.full(nelements, np.nan, dtype='float32')
nx[i2] = case.data[itime, i2, 0]
ny[i2] = case.data[itime, i2, 1]
txy[i2] = case.data[itime, i2, 2]
angle[i2] = case.data[itime, i2, 3]
major[i2] = case.data[itime, i2, 4]
minor[i2] = case.data[itime, i2, 5]
tmax[i2] = case.data[itime, i2, 6]
ovm[i2] = case.data[itime, i2, 7]
headers = ['nx', 'ny', 'txy', 'majorP', 'minorP', 'tmax', 'ovm']
form = [('Surface Stresses', None, [])]
formi = form[0][2]
form_dict[(key, itime)] = form
for header, resi in zip(headers, (nx, ny, txy, angle, major, minor, ovm)):
ese_res = GuiResult(subcase_id, header=header,
title=header, data_format='%.3e',
location='centroid', scalar=resi)
cases[icase] = (ese_res, (subcase_id, header))
formi.append((header, icase, []))
icase += 1
return icase
def _fill_op2_grid_point_stresses_volume_direct(nids, cases, model: OP2,
times, key, icase: int,
form_dict, header_dict, keys_map) -> int:
if key not in model.grid_point_stresses_volume_direct:
return icase
case = model.grid_point_stresses_volume_direct[key]
if case.is_complex:
return icase
nnodes = len(nids)
keys_map[key] = (case.subtitle, case.label,
case.superelement_adaptivity_index, case.pval_step)
subcase_id = key[0]
nids2 = case.node
i = np.searchsorted(nids, nids2)
if len(i) != len(np.unique(i)):
msg = 'i_gpstress=%s is not unique\n' % str(i)
#print('eids = %s\n' % str(list(eids)))
#print('eidsi = %s\n' % str(list(eidsi)))
raise RuntimeError(msg)
for itime, unused_dt in enumerate(times):
dt = case._times[itime]
header = _get_nastran_header(case, dt, itime)
header_dict[(key, itime)] = header
# volume direct
#['ox', 'oy', 'oz', 'txy', 'tyz', 'txz', 'pressure', 'ovm']
ox = np.full(nnodes, np.nan, dtype='float32')
oy = np.full(nnodes, np.nan, dtype='float32')
oz = np.full(nnodes, np.nan, dtype='float32')
txy = np.full(nnodes, np.nan, dtype='float32')
tyz = np.full(nnodes, np.nan, dtype='float32')
txz = np.full(nnodes, np.nan, dtype='float32')
ovm = np.full(nnodes, np.nan, dtype='float32')
ox[i] = case.data[itime, :, 0]
oy[i] = case.data[itime, :, 1]
oz[i] = case.data[itime, :, 2]
txy[i] = case.data[itime, :, 3]
tyz[i] = case.data[itime, :, 4]
txz[i] = case.data[itime, :, 5]
ovm[i] = case.data[itime, :, 7]
headers = ['oxx', 'oyy', 'ozz', 'txy', 'tyz', 'txz', 'ovm']
form = [('Volume Direct', None, [])]
formi = form[0][2]
form_dict[(key, itime)] = form
for header, resi in zip(headers, (ox, oy, oz, txy, tyz, txz, ovm)):
ese_res = GuiResult(subcase_id, header=header,
title=header, data_format='%.3e',
location='node', scalar=resi)
cases[icase] = (ese_res, (subcase_id, header))
formi.append((header, icase, []))
icase += 1
return icase
| 42.109804 | 114 | 0.53455 |
7958dd41fb2288ae2f41529307079d4faef1b564 | 4,388 | py | Python | tools/buildgen/plugins/expand_version.py | warlock135/grpc | 81e13e4fa9c0cdf7dc131ce548e1604c895b738c | [
"Apache-2.0"
] | 36,552 | 2015-02-26T17:30:13.000Z | 2022-03-31T22:41:33.000Z | tools/buildgen/plugins/expand_version.py | SanjanaSingh897/grpc | 2d858866eb95ce5de8ccc8c35189a12733d8ca79 | [
"Apache-2.0"
] | 23,536 | 2015-02-26T17:50:56.000Z | 2022-03-31T23:39:42.000Z | tools/buildgen/plugins/expand_version.py | SanjanaSingh897/grpc | 2d858866eb95ce5de8ccc8c35189a12733d8ca79 | [
"Apache-2.0"
] | 11,050 | 2015-02-26T17:22:10.000Z | 2022-03-31T10:12:35.000Z | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Buildgen package version plugin
This parses the list of targets from the yaml build file, and creates
a custom version string for each language's package.
"""
import re
LANGUAGES = [
'core',
'cpp',
'csharp',
'node',
'objc',
'php',
'python',
'ruby',
]
class Version:
def __init__(self, version_str, override_major=None):
self.tag = None
if '-' in version_str:
version_str, self.tag = version_str.split('-')
self.major, self.minor, self.patch = [
int(x) for x in version_str.split('.')
]
if override_major:
self.major = override_major
def __str__(self):
"""Version string in a somewhat idiomatic style for most languages"""
version_str = '%d.%d.%d' % (self.major, self.minor, self.patch)
if self.tag:
version_str += '-%s' % self.tag
return version_str
def pep440(self):
"""Version string in Python PEP440 style"""
s = '%d.%d.%d' % (self.major, self.minor, self.patch)
if self.tag:
# we need to translate from grpc version tags to pep440 version
# tags; this code is likely to be a little ad-hoc
if self.tag == 'dev':
s += '.dev0'
elif len(self.tag) >= 3 and self.tag[0:3] == 'pre':
s += 'rc%d' % int(self.tag[3:])
else:
raise Exception(
'Don\'t know how to translate version tag "%s" to pep440' %
self.tag)
return s
def ruby(self):
"""Version string in Ruby style"""
if self.tag:
return '%d.%d.%d.%s' % (self.major, self.minor, self.patch,
self.tag)
else:
return '%d.%d.%d' % (self.major, self.minor, self.patch)
def php(self):
"""Version string for PHP PECL package"""
s = '%d.%d.%d' % (self.major, self.minor, self.patch)
if self.tag:
if self.tag == 'dev':
s += 'dev'
elif len(self.tag) >= 3 and self.tag[0:3] == 'pre':
s += 'RC%d' % int(self.tag[3:])
else:
raise Exception(
'Don\'t know how to translate version tag "%s" to PECL version'
% self.tag)
return s
def php_stability(self):
"""stability string for PHP PECL package.xml file"""
if self.tag:
return 'beta'
else:
return 'stable'
def php_composer(self):
"""Version string for PHP Composer package"""
return '%d.%d.%d' % (self.major, self.minor, self.patch)
def php_current_version(self):
return '7.2'
def php_debian_version(self):
return 'stretch'
def mako_plugin(dictionary):
"""Expand version numbers:
- for each language, ensure there's a language_version tag in
settings (defaulting to the master version tag)
- expand version strings to major, minor, patch, and tag
"""
settings = dictionary['settings']
version_str = settings['version']
master_version = Version(version_str)
settings['version'] = master_version
for language in LANGUAGES:
version_tag = '%s_version' % language
override_major = settings.get('%s_major_version' % language, None)
if version_tag in settings:
settings[version_tag] = Version(settings[version_tag],
override_major=override_major)
else:
settings[version_tag] = Version(version_str,
override_major=override_major)
settings['protobuf_major_minor_version'] = ('.'.join(
settings['protobuf_version'].split('.')[:2]))
| 33.242424 | 83 | 0.572926 |
7958de1e7635485bb26b663f83e5e2db7bbd6d6a | 1,751 | py | Python | helpers/users_resource_helper.py | maxazure/papers | d58267d86a522316f2a32128d9f9c82feee08bcc | [
"MIT"
] | null | null | null | helpers/users_resource_helper.py | maxazure/papers | d58267d86a522316f2a32128d9f9c82feee08bcc | [
"MIT"
] | null | null | null | helpers/users_resource_helper.py | maxazure/papers | d58267d86a522316f2a32128d9f9c82feee08bcc | [
"MIT"
] | null | null | null | from flask_restful import fields, reqparse, inputs
from models.user import User
paginate_fields = {
'total': fields.Integer,
'pageSize': fields.Integer,
'current': fields.Integer
}
user_fields = {
'id': fields.Integer,
'name': fields.String,
'email': fields.String,
'intro': fields.String,
'avatar': fields.String,
'created_at': fields.String
}
user_list_fields = {
'pagination': fields.Nested(paginate_fields),
'list': fields.List(fields.Nested(user_fields))
}
sortable_fields = ['id',]
user_post_parser = reqparse.RequestParser()
user_post_parser.add_argument('name', type=str, )
user_post_parser.add_argument('email', type=str, )
user_post_parser.add_argument('intro', type=str, )
user_post_parser.add_argument('avatar', type=str, )
user_update_parser = reqparse.RequestParser()
user_update_parser.add_argument('name', type=str)
user_update_parser.add_argument('email', type=str)
user_update_parser.add_argument('intro', type=str)
user_update_parser.add_argument('avatar', type=str)
user_query_parser = reqparse.RequestParser()
user_query_parser.add_argument('email', type=str)
user_query_parser.add_argument('orderby', type=str, default='id')
user_query_parser.add_argument('desc', type=int, default=0)
user_query_parser.add_argument('page', type=int)
user_query_parser.add_argument('pagesize', type=int)
def make_conditions(conditions, args):
if args['email'] is not None:
conditions.append(User.email==args['email'])
return conditions
def update_all_fields(args, o):
if args['name']:
o.name = args['name']
if args['intro']:
o.intro = args['intro']
if args['avatar']:
o.avatar = args['avatar']
return o | 19.032609 | 65 | 0.70988 |
7958de4d51c3a4fc21da6eba81ad18baa57e3921 | 3,309 | py | Python | Bio/Graphics/GenomeDiagram/_CrossLink.py | lukasz-kozlowski/biopython | 6b601cf09234e1e82cfc94ad5030389036cb6343 | [
"BSD-3-Clause"
] | 2,856 | 2015-01-01T07:10:06.000Z | 2022-03-31T18:17:25.000Z | Bio/Graphics/GenomeDiagram/_CrossLink.py | lukasz-kozlowski/biopython | 6b601cf09234e1e82cfc94ad5030389036cb6343 | [
"BSD-3-Clause"
] | 3,429 | 2015-01-05T11:11:42.000Z | 2022-03-31T13:08:10.000Z | Bio/Graphics/GenomeDiagram/_CrossLink.py | lukasz-kozlowski/biopython | 6b601cf09234e1e82cfc94ad5030389036cb6343 | [
"BSD-3-Clause"
] | 1,619 | 2015-01-05T13:07:11.000Z | 2022-03-31T19:19:52.000Z | # Copyright 2011-2017 by Peter Cock. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Genome Diagram Feature cross-link module."""
from reportlab.lib import colors
class CrossLink:
"""Hold information for drawing a cross link between features."""
def __init__(
self, featureA, featureB, color=colors.lightgreen, border=None, flip=False
):
"""Create a new cross link.
Arguments featureA and featureB should GenomeDiagram feature objects,
or 3-tuples (track object, start, end), and currently must be on
different tracks.
The color and border arguments should be ReportLab colour objects, or
for border use a boolean False for no border, otherwise it defaults to
the same as the main colour.
The flip argument draws an inverted cross link, useful for showing a
mapping where one sequence has been reversed. It is conventional to
also use a different colour (e.g. red for simple links, blue for any
flipped links).
"""
# Initialize attributes
self.featureA = featureA
self.featureB = featureB
self.color = color # default color to draw the feature
self.border = border
self.flip = flip
@property
def startA(self):
"""Start position of Feature A."""
try:
return self.featureA.start
except AttributeError:
track, start, end = self.featureA
return start
@property
def endA(self):
"""End position of Feature A."""
try:
return self.featureA.end
except AttributeError:
track, start, end = self.featureA
return end
def _trackA(self, tracks):
try:
track, start, end = self.featureA
assert track in tracks
return track
except TypeError:
for track in tracks:
for feature_set in track.get_sets():
if hasattr(feature_set, "features"):
if self.featureA in feature_set.features.values():
return track
return None
@property
def startB(self):
"""Start position of Feature B."""
try:
return self.featureB.start
except AttributeError:
track, start, end = self.featureB
return start
@property
def endB(self):
"""End position of Feature B."""
try:
return self.featureB.end
except AttributeError:
track, start, end = self.featureB
return end
def _trackB(self, tracks):
try:
track, start, end = self.featureB
assert track in tracks
return track
except TypeError:
for track in tracks:
for feature_set in track.get_sets():
if hasattr(feature_set, "features"):
if self.featureB in feature_set.features.values():
return track
return None
| 32.762376 | 82 | 0.591115 |
7958df5384ef5f790b3b5a58777975ae7ccebd3a | 330 | py | Python | core/forms.py | Kaue-Silva/Site_Lembretes | 006ce55468e88eb762bf303ddb4dbfb9270c5a52 | [
"MIT"
] | null | null | null | core/forms.py | Kaue-Silva/Site_Lembretes | 006ce55468e88eb762bf303ddb4dbfb9270c5a52 | [
"MIT"
] | null | null | null | core/forms.py | Kaue-Silva/Site_Lembretes | 006ce55468e88eb762bf303ddb4dbfb9270c5a52 | [
"MIT"
] | null | null | null | from django.contrib.auth import forms
from django import forms
from .models import Lembrete
class LembreteForm(forms.ModelForm):
class Meta:
model = Lembrete
fields = ['titulo', 'descricao', 'data_hora']
widgets = {
'data_hora': forms.TextInput(attrs={'type':'datetime-local'})
}
| 25.384615 | 73 | 0.642424 |
7958e0dcd2bbb3124402c9adfba2da4339ba9b00 | 764 | py | Python | .history/List of Capstone Projects/prime_factorization_20200516165159.py | EvanthiosPapadopoulos/Python3 | ab773fd458e365c1510f98ecac65965234c881e8 | [
"MIT"
] | 1 | 2020-05-18T17:50:00.000Z | 2020-05-18T17:50:00.000Z | .history/List of Capstone Projects/prime_factorization_20200516165159.py | EvanthiosPapadopoulos/Python3 | ab773fd458e365c1510f98ecac65965234c881e8 | [
"MIT"
] | null | null | null | .history/List of Capstone Projects/prime_factorization_20200516165159.py | EvanthiosPapadopoulos/Python3 | ab773fd458e365c1510f98ecac65965234c881e8 | [
"MIT"
] | null | null | null | '''
Prime Factorization - Have the user enter a number and find all Prime Factors (if there are any) and display them.
'''
import HeaderOfFiles
def prime_factor(number):
'''
Finding and display all Prime Factors
'''
my_list = []
i = 2
while i < number + 1:
if number % i == 0:
for i in len.my_list:
print("hi")
print(len.my_list)
my_list.append(i)
number = number/i
print(number)
i = 2
else:
i += 1
print(my_list)
# while True:
# try:
# x = int(input("Give me a number to find all Prime Factors: "))
# break
# except ValueError:
# print("Give a number please!")
prime_factor(120) | 22.470588 | 114 | 0.527487 |
7958e16aaeb1bc6305f5a37d351aecdfd108f91f | 2,111 | py | Python | visualiser/facades/ociLoadBalancerHost.py | antoniogomezr/oci-designer-tookit | 0f8756bf778cbbc923df2020ea11cf384e855590 | [
"UPL-1.0",
"Apache-2.0"
] | null | null | null | visualiser/facades/ociLoadBalancerHost.py | antoniogomezr/oci-designer-tookit | 0f8756bf778cbbc923df2020ea11cf384e855590 | [
"UPL-1.0",
"Apache-2.0"
] | null | null | null | visualiser/facades/ociLoadBalancerHost.py | antoniogomezr/oci-designer-tookit | 0f8756bf778cbbc923df2020ea11cf384e855590 | [
"UPL-1.0",
"Apache-2.0"
] | 1 | 2020-11-18T05:50:53.000Z | 2020-11-18T05:50:53.000Z | #!/usr/bin/python
# Copyright (c) 2020, Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
"""Provide Module Description
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = ["Ulrich Dustmann (Oracle Cloud Solutions A-Team)"]
__version__ = "1.0.0"
__module__ = "ociInternetGateway"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import oci
from common.okitLogging import getLogger
from facades.ociConnection import OCILoadBalancerConnection
# Configure logging
logger = getLogger()
class OCILoadBalancerHosts(OCILoadBalancerConnection):
def __init__(self, config=None, configfile=None, profile=None, compartment_id=None, lb_id=None):
self.compartment_id = compartment_id
self.lb_id = lb_id
self.lb_hosts_json = []
self.lb_hosts_obj = []
super(OCILoadBalancerHosts, self).__init__(config=config, configfile=configfile, profile=profile)
def list(self, compartment_id=None, filter=None):
if compartment_id is None:
compartment_id = self.compartment_id
lb_hosts = oci.pagination.list_call_get_all_results(self.client.list_hostnames, load_balancer_id=self.lb_id).data
# Convert to Json object
lb_hosts_json = self.toJson(lb_hosts)
logger.debug(str(lb_hosts_json))
# Filter results
self.lb_hosts_json = self.filterJsonObjectList(lb_hosts_json, filter)
logger.debug(str(self.lb_hosts_json))
# Build List of LoadBalancer Host Objects
self.lb_hosts_obj = []
for lb_host in self.lb_hosts_json:
self.lb_hosts_obj.append(OCILoadBalancerHost(self.config, self.configfile, self.profile, lb_host))
return self.lb_hosts_json
class OCILoadBalancerHost(object):
def __init__(self, config=None, configfile=None, profile=None, data=None):
self.config = config
self.configfile = configfile
self.profile = profile
self.data = data
| 34.606557 | 121 | 0.65514 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.