text
stringlengths 2
999k
|
|---|
# mypy: allow-untyped-defs
from typing import Dict
from urllib import parse as urlparse
from . import error
from . import protocol
from . import transport
from .bidi.client import BidiSession
def command(func):
def inner(self, *args, **kwargs):
if hasattr(self, "session"):
session = self.session
else:
session = self
if session.session_id is None:
session.start()
return func(self, *args, **kwargs)
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
class Timeouts:
def __init__(self, session):
self.session = session
def _get(self, key=None):
timeouts = self.session.send_session_command("GET", "timeouts")
if key is not None:
return timeouts[key]
return timeouts
def _set(self, key, secs):
body = {key: secs * 1000}
self.session.send_session_command("POST", "timeouts", body)
return None
@property
def script(self):
return self._get("script")
@script.setter
def script(self, secs):
return self._set("script", secs)
@property
def page_load(self):
return self._get("pageLoad")
@page_load.setter
def page_load(self, secs):
return self._set("pageLoad", secs)
@property
def implicit(self):
return self._get("implicit")
@implicit.setter
def implicit(self, secs):
return self._set("implicit", secs)
def __str__(self):
name = "%s.%s" % (self.__module__, self.__class__.__name__)
return "<%s script=%d, load=%d, implicit=%d>" % \
(name, self.script, self.page_load, self.implicit)
class ActionSequence:
"""API for creating and performing action sequences.
Each action method adds one or more actions to a queue. When perform()
is called, the queued actions fire in order.
May be chained together as in::
ActionSequence(session, "key", id) \
.key_down("a") \
.key_up("a") \
.perform()
"""
def __init__(self, session, action_type, input_id, pointer_params=None):
"""Represents a sequence of actions of one type for one input source.
:param session: WebDriver session.
:param action_type: Action type; may be "none", "key", or "pointer".
:param input_id: ID of input source.
:param pointer_params: Optional dictionary of pointer parameters.
"""
self.session = session
self._id = input_id
self._type = action_type
self._actions = []
self._pointer_params = pointer_params
@property
def dict(self):
d = {
"type": self._type,
"id": self._id,
"actions": self._actions,
}
if self._pointer_params is not None:
d["parameters"] = self._pointer_params
return d
@command
def perform(self):
"""Perform all queued actions."""
self.session.actions.perform([self.dict])
def _key_action(self, subtype, value):
self._actions.append({"type": subtype, "value": value})
def _pointer_action(self, subtype, button=None, x=None, y=None, duration=None, origin=None, width=None,
height=None, pressure=None, tangential_pressure=None, tilt_x=None,
tilt_y=None, twist=None, altitude_angle=None, azimuth_angle=None):
action = {
"type": subtype
}
if button is not None:
action["button"] = button
if x is not None:
action["x"] = x
if y is not None:
action["y"] = y
if duration is not None:
action["duration"] = duration
if origin is not None:
action["origin"] = origin
if width is not None:
action["width"] = width
if height is not None:
action["height"] = height
if pressure is not None:
action["pressure"] = pressure
if tangential_pressure is not None:
action["tangentialPressure"] = tangential_pressure
if tilt_x is not None:
action["tiltX"] = tilt_x
if tilt_y is not None:
action["tiltY"] = tilt_y
if twist is not None:
action["twist"] = twist
if altitude_angle is not None:
action["altitudeAngle"] = altitude_angle
if azimuth_angle is not None:
action["azimuthAngle"] = azimuth_angle
self._actions.append(action)
def pause(self, duration):
self._actions.append({"type": "pause", "duration": duration})
return self
def pointer_move(self, x, y, duration=None, origin=None, width=None, height=None,
pressure=None, tangential_pressure=None, tilt_x=None, tilt_y=None,
twist=None, altitude_angle=None, azimuth_angle=None):
"""Queue a pointerMove action.
:param x: Destination x-axis coordinate of pointer in CSS pixels.
:param y: Destination y-axis coordinate of pointer in CSS pixels.
:param duration: Number of milliseconds over which to distribute the
move. If None, remote end defaults to 0.
:param origin: Origin of coordinates, either "viewport", "pointer" or
an Element. If None, remote end defaults to "viewport".
"""
self._pointer_action("pointerMove", x=x, y=y, duration=duration, origin=origin,
width=width, height=height, pressure=pressure,
tangential_pressure=tangential_pressure, tilt_x=tilt_x, tilt_y=tilt_y,
twist=twist, altitude_angle=altitude_angle, azimuth_angle=azimuth_angle)
return self
def pointer_up(self, button=0):
"""Queue a pointerUp action for `button`.
:param button: Pointer button to perform action with.
Default: 0, which represents main device button.
"""
self._pointer_action("pointerUp", button=button)
return self
def pointer_down(self, button=0, width=None, height=None, pressure=None,
tangential_pressure=None, tilt_x=None, tilt_y=None,
twist=None, altitude_angle=None, azimuth_angle=None):
"""Queue a pointerDown action for `button`.
:param button: Pointer button to perform action with.
Default: 0, which represents main device button.
"""
self._pointer_action("pointerDown", button=button, width=width, height=height,
pressure=pressure, tangential_pressure=tangential_pressure,
tilt_x=tilt_x, tilt_y=tilt_y, twist=twist, altitude_angle=altitude_angle,
azimuth_angle=azimuth_angle)
return self
def click(self, element=None, button=0):
"""Queue a click with the specified button.
If an element is given, move the pointer to that element first,
otherwise click current pointer coordinates.
:param element: Optional element to click.
:param button: Integer representing pointer button to perform action
with. Default: 0, which represents main device button.
"""
if element:
self.pointer_move(0, 0, origin=element)
return self.pointer_down(button).pointer_up(button)
def key_up(self, value):
"""Queue a keyUp action for `value`.
:param value: Character to perform key action with.
"""
self._key_action("keyUp", value)
return self
def key_down(self, value):
"""Queue a keyDown action for `value`.
:param value: Character to perform key action with.
"""
self._key_action("keyDown", value)
return self
def send_keys(self, keys):
"""Queue a keyDown and keyUp action for each character in `keys`.
:param keys: String of keys to perform key actions with.
"""
for c in keys:
self.key_down(c)
self.key_up(c)
return self
def scroll(self, x, y, delta_x, delta_y, duration=None, origin=None):
"""Queue a scroll action.
:param x: Destination x-axis coordinate of pointer in CSS pixels.
:param y: Destination y-axis coordinate of pointer in CSS pixels.
:param delta_x: scroll delta on x-axis in CSS pixels.
:param delta_y: scroll delta on y-axis in CSS pixels.
:param duration: Number of milliseconds over which to distribute the
scroll. If None, remote end defaults to 0.
:param origin: Origin of coordinates, either "viewport" or an Element.
If None, remote end defaults to "viewport".
"""
action = {
"type": "scroll",
"x": x,
"y": y,
"deltaX": delta_x,
"deltaY": delta_y
}
if duration is not None:
action["duration"] = duration
if origin is not None:
action["origin"] = origin
self._actions.append(action)
return self
class Actions:
def __init__(self, session):
self.session = session
@command
def perform(self, actions=None):
"""Performs actions by tick from each action sequence in `actions`.
:param actions: List of input source action sequences. A single action
sequence may be created with the help of
``ActionSequence.dict``.
"""
body = {"actions": [] if actions is None else actions}
actions = self.session.send_session_command("POST", "actions", body)
return actions
@command
def release(self):
return self.session.send_session_command("DELETE", "actions")
def sequence(self, *args, **kwargs):
"""Return an empty ActionSequence of the designated type.
See ActionSequence for parameter list.
"""
return ActionSequence(self.session, *args, **kwargs)
class Window:
identifier = "window-fcc6-11e5-b4f8-330a88ab9d7f"
def __init__(self, session):
self.session = session
@command
def close(self):
handles = self.session.send_session_command("DELETE", "window")
if handles is not None and len(handles) == 0:
# With no more open top-level browsing contexts, the session is closed.
self.session.session_id = None
return handles
# The many "type: ignore" comments here and below are to silence mypy's
# "Decorated property not supported" error, which is due to a limitation
# in mypy, see https://github.com/python/mypy/issues/1362.
@property # type: ignore
@command
def rect(self):
return self.session.send_session_command("GET", "window/rect")
@rect.setter # type: ignore
@command
def rect(self, new_rect):
self.session.send_session_command("POST", "window/rect", new_rect)
@property # type: ignore
@command
def size(self):
"""Gets the window size as a tuple of `(width, height)`."""
rect = self.rect
return (rect["width"], rect["height"])
@size.setter # type: ignore
@command
def size(self, new_size):
"""Set window size by passing a tuple of `(width, height)`."""
try:
width, height = new_size
body = {"width": width, "height": height}
self.session.send_session_command("POST", "window/rect", body)
except (error.UnknownErrorException, error.InvalidArgumentException):
# silently ignore this error as the command is not implemented
# for Android. Revert this once it is implemented.
pass
@property # type: ignore
@command
def position(self):
"""Gets the window position as a tuple of `(x, y)`."""
rect = self.rect
return (rect["x"], rect["y"])
@position.setter # type: ignore
@command
def position(self, new_position):
"""Set window position by passing a tuple of `(x, y)`."""
try:
x, y = new_position
body = {"x": x, "y": y}
self.session.send_session_command("POST", "window/rect", body)
except error.UnknownErrorException:
# silently ignore this error as the command is not implemented
# for Android. Revert this once it is implemented.
pass
@command
def maximize(self):
return self.session.send_session_command("POST", "window/maximize")
@command
def minimize(self):
return self.session.send_session_command("POST", "window/minimize")
@command
def fullscreen(self):
return self.session.send_session_command("POST", "window/fullscreen")
@classmethod
def from_json(cls, json, session):
uuid = json[Window.identifier]
return cls(uuid, session)
class Frame:
identifier = "frame-075b-4da1-b6ba-e579c2d3230a"
def __init__(self, session):
self.session = session
@classmethod
def from_json(cls, json, session):
uuid = json[Frame.identifier]
return cls(uuid, session)
class ShadowRoot:
identifier = "shadow-6066-11e4-a52e-4f735466cecf"
def __init__(self, session, id):
"""
Construct a new shadow root representation.
:param id: Shadow root UUID which must be unique across
all browsing contexts.
:param session: Current ``webdriver.Session``.
"""
self.id = id
self.session = session
@classmethod
def from_json(cls, json, session):
uuid = json[ShadowRoot.identifier]
return cls(session, uuid)
def send_shadow_command(self, method, uri, body=None):
url = f"shadow/{self.id}/{uri}"
return self.session.send_session_command(method, url, body)
@command
def find_element(self, strategy, selector):
body = {"using": strategy,
"value": selector}
return self.send_shadow_command("POST", "element", body)
@command
def find_elements(self, strategy, selector):
body = {"using": strategy,
"value": selector}
return self.send_shadow_command("POST", "elements", body)
class Find:
def __init__(self, session):
self.session = session
@command
def css(self, element_selector, all=True):
elements = self._find_element("css selector", element_selector, all)
return elements
def _find_element(self, strategy, selector, all):
route = "elements" if all else "element"
body = {"using": strategy,
"value": selector}
return self.session.send_session_command("POST", route, body)
class Cookies:
def __init__(self, session):
self.session = session
def __getitem__(self, name):
self.session.send_session_command("GET", "cookie/%s" % name, {})
def __setitem__(self, name, value):
cookie = {"name": name,
"value": None}
if isinstance(name, str):
cookie["value"] = value
elif hasattr(value, "value"):
cookie["value"] = value.value
self.session.send_session_command("POST", "cookie/%s" % name, {})
class UserPrompt:
def __init__(self, session):
self.session = session
@command
def dismiss(self):
self.session.send_session_command("POST", "alert/dismiss")
@command
def accept(self):
self.session.send_session_command("POST", "alert/accept")
@property # type: ignore
@command
def text(self):
return self.session.send_session_command("GET", "alert/text")
@text.setter # type: ignore
@command
def text(self, value):
body = {"text": value}
self.session.send_session_command("POST", "alert/text", body=body)
class Session:
def __init__(self,
host,
port,
url_prefix="/",
enable_bidi=False,
capabilities=None,
extension=None):
if enable_bidi:
if capabilities is not None:
capabilities.setdefault("alwaysMatch", {}).update({"webSocketUrl": True})
else:
capabilities = {"alwaysMatch": {"webSocketUrl": True}}
self.transport = transport.HTTPWireProtocol(host, port, url_prefix)
self.requested_capabilities = capabilities
self.capabilities = None
self.session_id = None
self.timeouts = None
self.window = None
self.find = None
self.enable_bidi = enable_bidi
self.bidi_session = None
self.extension = None
self.extension_cls = extension
self.timeouts = Timeouts(self)
self.window = Window(self)
self.find = Find(self)
self.alert = UserPrompt(self)
self.actions = Actions(self)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.session_id or "(disconnected)")
def __eq__(self, other):
return (self.session_id is not None and isinstance(other, Session) and
self.session_id == other.session_id)
def __enter__(self):
self.start()
return self
def __exit__(self, *args, **kwargs):
self.end()
def __del__(self):
self.end()
def match(self, capabilities):
return self.requested_capabilities == capabilities
def start(self):
"""Start a new WebDriver session.
:return: Dictionary with `capabilities` and `sessionId`.
:raises error.WebDriverException: If the remote end returns
an error.
"""
if self.session_id is not None:
return
self.transport.close()
body = {"capabilities": {}}
if self.requested_capabilities is not None:
body["capabilities"] = self.requested_capabilities
value = self.send_command("POST", "session", body=body)
assert isinstance(value["sessionId"], str)
assert isinstance(value["capabilities"], Dict)
self.session_id = value["sessionId"]
self.capabilities = value["capabilities"]
if "webSocketUrl" in self.capabilities:
self.bidi_session = BidiSession.from_http(self.session_id,
self.capabilities)
elif self.enable_bidi:
self.end()
raise error.SessionNotCreatedException(
"Requested bidi session, but webSocketUrl capability not found")
if self.extension_cls:
self.extension = self.extension_cls(self)
return value
def end(self):
"""Try to close the active session."""
if self.session_id is None:
return
try:
self.send_command("DELETE", "session/%s" % self.session_id)
except (OSError, error.InvalidSessionIdException):
pass
finally:
self.session_id = None
self.transport.close()
def send_command(self, method, url, body=None, timeout=None):
"""
Send a command to the remote end and validate its success.
:param method: HTTP method to use in request.
:param uri: "Command part" of the HTTP request URL,
e.g. `window/rect`.
:param body: Optional body of the HTTP request.
:return: `None` if the HTTP response body was empty, otherwise
the `value` field returned after parsing the response
body as JSON.
:raises error.WebDriverException: If the remote end returns
an error.
:raises ValueError: If the response body does not contain a
`value` key.
"""
response = self.transport.send(
method, url, body,
encoder=protocol.Encoder, decoder=protocol.Decoder,
session=self, timeout=timeout)
if response.status != 200:
err = error.from_response(response)
if isinstance(err, error.InvalidSessionIdException):
# The driver could have already been deleted the session.
self.session_id = None
raise err
if "value" in response.body:
value = response.body["value"]
"""
Edge does not yet return the w3c session ID.
We want the tests to run in Edge anyway to help with REC.
In order to run the tests in Edge, we need to hack around
bug:
https://developer.microsoft.com/en-us/microsoft-edge/platform/issues/14641972
"""
if url == "session" and method == "POST" and "sessionId" in response.body and "sessionId" not in value:
value["sessionId"] = response.body["sessionId"]
else:
raise ValueError("Expected 'value' key in response body:\n"
"%s" % response)
return value
def send_session_command(self, method, uri, body=None, timeout=None):
"""
Send a command to an established session and validate its success.
:param method: HTTP method to use in request.
:param url: "Command part" of the HTTP request URL,
e.g. `window/rect`.
:param body: Optional body of the HTTP request. Must be JSON
serialisable.
:return: `None` if the HTTP response body was empty, otherwise
the result of parsing the body as JSON.
:raises error.WebDriverException: If the remote end returns
an error.
"""
url = urlparse.urljoin("session/%s/" % self.session_id, uri)
return self.send_command(method, url, body, timeout)
@property # type: ignore
@command
def url(self):
return self.send_session_command("GET", "url")
@url.setter # type: ignore
@command
def url(self, url):
if urlparse.urlsplit(url).netloc is None:
return self.url(url)
body = {"url": url}
return self.send_session_command("POST", "url", body)
@command
def back(self):
return self.send_session_command("POST", "back")
@command
def forward(self):
return self.send_session_command("POST", "forward")
@command
def refresh(self):
return self.send_session_command("POST", "refresh")
@property # type: ignore
@command
def title(self):
return self.send_session_command("GET", "title")
@property # type: ignore
@command
def source(self):
return self.send_session_command("GET", "source")
@command
def new_window(self, type_hint="tab"):
body = {"type": type_hint}
value = self.send_session_command("POST", "window/new", body)
return value["handle"]
@property # type: ignore
@command
def window_handle(self):
return self.send_session_command("GET", "window")
@window_handle.setter # type: ignore
@command
def window_handle(self, handle):
body = {"handle": handle}
return self.send_session_command("POST", "window", body=body)
def switch_frame(self, frame):
if frame == "parent":
url = "frame/parent"
body = None
else:
url = "frame"
body = {"id": frame}
return self.send_session_command("POST", url, body)
@property # type: ignore
@command
def handles(self):
return self.send_session_command("GET", "window/handles")
@property # type: ignore
@command
def active_element(self):
return self.send_session_command("GET", "element/active")
@command
def cookies(self, name=None):
if name is None:
url = "cookie"
else:
url = "cookie/%s" % name
return self.send_session_command("GET", url, {})
@command
def set_cookie(self, name, value, path=None, domain=None,
secure=None, expiry=None, http_only=None):
body = {
"name": name,
"value": value,
}
if domain is not None:
body["domain"] = domain
if expiry is not None:
body["expiry"] = expiry
if http_only is not None:
body["httpOnly"] = http_only
if path is not None:
body["path"] = path
if secure is not None:
body["secure"] = secure
self.send_session_command("POST", "cookie", {"cookie": body})
def delete_cookie(self, name=None):
if name is None:
url = "cookie"
else:
url = "cookie/%s" % name
self.send_session_command("DELETE", url, {})
#[...]
@command
def execute_script(self, script, args=None):
if args is None:
args = []
body = {
"script": script,
"args": args
}
return self.send_session_command("POST", "execute/sync", body)
@command
def execute_async_script(self, script, args=None):
if args is None:
args = []
body = {
"script": script,
"args": args
}
return self.send_session_command("POST", "execute/async", body)
#[...]
@command
def screenshot(self):
return self.send_session_command("GET", "screenshot")
class Element:
"""
Representation of a web element.
A web element is an abstraction used to identify an element when
it is transported via the protocol, between remote- and local ends.
"""
identifier = "element-6066-11e4-a52e-4f735466cecf"
def __init__(self, id, session):
"""
Construct a new web element representation.
:param id: Web element UUID which must be unique across
all browsing contexts.
:param session: Current ``webdriver.Session``.
"""
self.id = id
self.session = session
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.id)
def __eq__(self, other):
return (isinstance(other, Element) and self.id == other.id and
self.session == other.session)
@classmethod
def from_json(cls, json, session):
uuid = json[Element.identifier]
return cls(uuid, session)
def send_element_command(self, method, uri, body=None):
url = "element/%s/%s" % (self.id, uri)
return self.session.send_session_command(method, url, body)
@command
def find_element(self, strategy, selector):
body = {"using": strategy,
"value": selector}
return self.send_element_command("POST", "element", body)
@command
def click(self):
self.send_element_command("POST", "click", {})
@command
def tap(self):
self.send_element_command("POST", "tap", {})
@command
def clear(self):
self.send_element_command("POST", "clear", {})
@command
def send_keys(self, text):
return self.send_element_command("POST", "value", {"text": text})
@property # type: ignore
@command
def text(self):
return self.send_element_command("GET", "text")
@property # type: ignore
@command
def name(self):
return self.send_element_command("GET", "name")
@command
def style(self, property_name):
return self.send_element_command("GET", "css/%s" % property_name)
@property # type: ignore
@command
def rect(self):
return self.send_element_command("GET", "rect")
@property # type: ignore
@command
def selected(self):
return self.send_element_command("GET", "selected")
@command
def screenshot(self):
return self.send_element_command("GET", "screenshot")
@property # type: ignore
@command
def shadow_root(self):
return self.send_element_command("GET", "shadow")
@command
def attribute(self, name):
return self.send_element_command("GET", "attribute/%s" % name)
# This MUST come last because otherwise @property decorators above
# will be overridden by this.
@command
def property(self, name):
return self.send_element_command("GET", "property/%s" % name)
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet56 model for Keras adapted from tf.keras.applications.ResNet50.
# Reference:
- [Deep Residual Learning for Image Recognition](
https://arxiv.org/abs/1512.03385)
Adapted from code contributed by BigMoyan.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from tensorflow.python.keras import backend
from tensorflow.python.keras import layers
BATCH_NORM_DECAY = 0.997
BATCH_NORM_EPSILON = 1e-5
L2_WEIGHT_DECAY = 2e-4
def identity_building_block(input_tensor,
kernel_size,
filters,
stage,
block,
training=None):
"""The identity block is the block that has no conv layer at shortcut.
Arguments:
input_tensor: input tensor
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: current block label, used for generating layer names
training: Only used if training keras model with Estimator. In other
scenarios it is handled automatically.
Returns:
Output tensor for the block.
"""
filters1, filters2 = filters
if tf.keras.backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = tf.keras.layers.Conv2D(filters1, kernel_size,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
name=conv_name_base + '2a')(input_tensor)
x = tf.keras.layers.BatchNormalization(axis=bn_axis,
name=bn_name_base + '2a',
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON)(
x, training=training)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(filters2, kernel_size,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
name=conv_name_base + '2b')(x)
x = tf.keras.layers.BatchNormalization(axis=bn_axis,
name=bn_name_base + '2b',
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON)(
x, training=training)
x = tf.keras.layers.add([x, input_tensor])
x = tf.keras.layers.Activation('relu')(x)
return x
def conv_building_block(input_tensor,
kernel_size,
filters,
stage,
block,
strides=(2, 2),
training=None):
"""A block that has a conv layer at shortcut.
Arguments:
input_tensor: input tensor
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: current block label, used for generating layer names
strides: Strides for the first conv layer in the block.
training: Only used if training keras model with Estimator. In other
scenarios it is handled automatically.
Returns:
Output tensor for the block.
Note that from stage 3,
the first conv layer at main path is with strides=(2, 2)
And the shortcut should have strides=(2, 2) as well
"""
filters1, filters2 = filters
if tf.keras.backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = tf.keras.layers.Conv2D(filters1, kernel_size, strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
name=conv_name_base + '2a')(input_tensor)
x = tf.keras.layers.BatchNormalization(axis=bn_axis,
name=bn_name_base + '2a',
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON)(
x, training=training)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
name=conv_name_base + '2b')(x)
x = tf.keras.layers.BatchNormalization(axis=bn_axis,
name=bn_name_base + '2b',
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON)(
x, training=training)
shortcut = tf.keras.layers.Conv2D(filters2, (1, 1), strides=strides,
kernel_initializer='he_normal',
kernel_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
name=conv_name_base + '1')(input_tensor)
shortcut = tf.keras.layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '1',
momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON)(
shortcut, training=training)
x = tf.keras.layers.add([x, shortcut])
x = tf.keras.layers.Activation('relu')(x)
return x
def resnet_block(input_tensor,
size,
kernel_size,
filters,
stage,
conv_strides=(2, 2),
training=None):
"""A block which applies conv followed by multiple identity blocks.
Arguments:
input_tensor: input tensor
size: integer, number of constituent conv/identity building blocks.
A conv block is applied once, followed by (size - 1) identity blocks.
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
conv_strides: Strides for the first conv layer in the block.
training: Only used if training keras model with Estimator. In other
scenarios it is handled automatically.
Returns:
Output tensor after applying conv and identity blocks.
"""
x = conv_building_block(input_tensor, kernel_size, filters, stage=stage,
strides=conv_strides, block='block_0',
training=training)
for i in range(size - 1):
x = identity_building_block(x, kernel_size, filters, stage=stage,
block='block_%d' % (i + 1), training=training)
return x
def resnet(num_blocks, img_input=None, classes=10, training=None):
"""Instantiates the ResNet architecture.
Arguments:
num_blocks: integer, the number of conv/identity blocks in each block.
The ResNet contains 3 blocks with each block containing one conv block
followed by (layers_per_block - 1) number of idenity blocks. Each
conv/idenity block has 2 convolutional layers. With the input
convolutional layer and the pooling layer towards the end, this brings
the total size of the network to (6*num_blocks + 2)
classes: optional number of classes to classify images into
training: Only used if training keras model with Estimator. In other
scenarios it is handled automatically.
Returns:
A Keras model instance.
"""
if backend.image_data_format() == 'channels_first':
x = layers.Lambda(lambda x: backend.permute_dimensions(x, (0, 3, 1, 2)),
name='transpose')(img_input)
bn_axis = 1
else: # channel_last
x = img_input
bn_axis = 3
x = tf.keras.layers.ZeroPadding2D(padding=(1, 1), name='conv1_pad')(x)
x = tf.keras.layers.Conv2D(16, (3, 3),
strides=(1, 1),
padding='valid',
kernel_initializer='he_normal',
kernel_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
name='conv1')(x)
x = tf.keras.layers.BatchNormalization(axis=bn_axis, name='bn_conv1',
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON)(
x, training=training)
x = tf.keras.layers.Activation('relu')(x)
x = resnet_block(x, size=num_blocks, kernel_size=3, filters=[16, 16],
stage=2, conv_strides=(1, 1), training=training)
x = resnet_block(x, size=num_blocks, kernel_size=3, filters=[32, 32],
stage=3, conv_strides=(2, 2), training=training)
x = resnet_block(x, size=num_blocks, kernel_size=3, filters=[64, 64],
stage=4, conv_strides=(2, 2), training=training)
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = tf.keras.layers.Dense(classes, activation='softmax',
kernel_initializer='he_normal',
kernel_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
name='fc10')(x)
inputs = img_input
# Create model.
model = tf.keras.models.Model(inputs, x, name='resnet56')
return model
resnet20 = functools.partial(resnet, num_blocks=3)
resnet32 = functools.partial(resnet, num_blocks=5)
resnet56 = functools.partial(resnet, num_blocks=9)
resnet110 = functools.partial(resnet, num_blocks=110)
|
from app.app import create_app
import sqlalchemy
app = create_app(environment='production')
@app.route('/', methods=['GET'], strict_slashes=False)
def lin_slogan():
return """<h1>OPENVPN<h1>"""
if __name__ == '__main__':
# app.run(debug=True, host="0.0.0.0")
app.run(host="0.0.0.0")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tkinter as tk
from application import Application
def main():
root = tk.Tk()
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
app = Application(parent=root)
app.mainloop()
if __name__ == "__main__":
main()
|
"""VOC Dataset Classes
Original author: Francisco Massa
https://github.com/fmassa/vision/blob/voc_dataset/torchvision/datasets/voc.py
Updated by: Ellis Brown, Max deGroot
"""
from .config import HOME
import os.path as osp
import sys
import torch
import torch.utils.data as data
import cv2
import numpy as np
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
VOC_CLASSES = ( # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
# note: if you used our download scripts, this should be right
VOC_ROOT = osp.join(HOME, "data\\VOCdevkit\\")
class VOCAnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, class_to_ind=None, keep_difficult=False):
self.class_to_ind = class_to_ind or dict(
zip(VOC_CLASSES, range(len(VOC_CLASSES))))
self.keep_difficult = keep_difficult
def __call__(self, target, width, height):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = []
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not self.keep_difficult and difficult:
continue
name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
bndbox.append(cur_pt)
label_idx = self.class_to_ind[name]
bndbox.append(label_idx)
res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
class VOCDetection(data.Dataset):
"""VOC Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, root,
image_sets=[('2012', 'trainval')],
transform=None, target_transform=VOCAnnotationTransform(),
dataset_name='VOC0712'):
# self.root = root
self.root = '..\\data\\VOCdevkit\\'
self.image_set = image_sets
self.transform = transform
self.target_transform = target_transform
self.name = dataset_name
self._annopath = osp.join('%s', 'Annotations', '%s.xml')
self._imgpath = osp.join('%s', 'JPEGImages', '%s.jpg')
self.ids = list()
for (year, name) in image_sets:
rootpath = osp.join(self.root, 'VOC' + year)
for line in open(osp.join(rootpath, 'ImageSets', 'Main', name + '.txt')):
self.ids.append((rootpath, line.strip()))
def __getitem__(self, index):
im, gt, h, w = self.pull_item(index)
return im, gt
def __len__(self):
return len(self.ids)
def pull_item(self, index):
img_id = self.ids[index]
target = ET.parse(self._annopath % img_id).getroot()
img = cv2.imread(self._imgpath % img_id)
height, width, channels = img.shape
if self.target_transform is not None:
target = self.target_transform(target, width, height)
if self.transform is not None:
target = np.array(target)
img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
# to rgb
img = img[:, :, (2, 1, 0)]
# img = img.transpose(2, 0, 1)
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
return torch.from_numpy(img).permute(2, 0, 1), target, height, width
# return torch.from_numpy(img), target, height, width
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
'''
img_id = self.ids[index]
return cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
def pull_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
img_id = self.ids[index]
anno = ET.parse(self._annopath % img_id).getroot()
gt = self.target_transform(anno, 1, 1)
return img_id[1], gt
def pull_tensor(self, index):
'''Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
'''
return torch.Tensor(self.pull_image(index)).unsqueeze_(0)
|
#!/usr/bin/env python2
import os
from os.path import abspath, dirname, join
import subprocess
from setuptools import setup, find_packages, Command
# Note: We follow PEP-0440 versioning:
# http://legacy.python.org/dev/peps/pep-0440/
VERSION = '0.1.dev0'
# Note: The dependency versions are chosen to match ooni-backend where they overlap:
TwistedDependency = 'twisted == 13.0' # BUG: Include the hash as per ooni-backend.
def run(*args):
print 'Running: {0!r}'.format(args)
try:
subprocess.check_call(args, shell=False)
except subprocess.CalledProcessError, e:
print 'Process exited with {0!r} exit status.'.format(e.returncode)
raise
class TestWithCoverageAndTrialInAVirtualEnvCommand (Command):
"""Run unit tests with coverage analysis and reporting in a virtualenv."""
# Internal settings:
TestToolRequirements = [
TwistedDependency,
'coverage == 3.7.1',
'mock >= 1.0.1',
]
description = __doc__
user_options = [
]
def __init__(self, dist):
Command.__init__(self, dist)
self.oonisupportdir = dirname(dirname(abspath(__file__)))
self.pkgdir = join(self.oonisupportdir, 'mlab-ns-simulator')
self.testdir = join(self.pkgdir, 'build', 'test')
self.venvdir = join(self.testdir, 'venv')
bindir = join(self.venvdir, 'bin')
self.pip = join(bindir, 'pip')
self.coverage = join(bindir, 'coverage')
self.trial = join(bindir, 'trial')
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self._initialize_virtualenv()
self._install_testing_tools()
pkgname = 'mlabsim'
pypkg = join(self.pkgdir, pkgname)
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = '{0}:{1}'.format(self.pkgdir, os.environ['PYTHONPATH'])
else:
os.environ['PYTHONPATH'] = self.pkgdir
# Coverage and trial dump things into cwd, so cd:
os.chdir(self.testdir)
run(self.coverage, 'run', '--branch', '--source', pypkg, self.trial, pkgname)
run(self.coverage, 'html')
def _initialize_virtualenv(self):
virtualenvscript = join(self.oonisupportdir, 'virtualenv', 'virtualenv.py')
run('python2', virtualenvscript, '--no-site-packages', self.venvdir)
def _install_testing_tools(self):
reqspath = join(self.testdir, 'test-tool-requirements.txt')
with file(reqspath, 'w') as f:
for req in self.TestToolRequirements:
f.write(req + '\n')
run(self.pip, 'install', '--use-mirrors', '--requirement', reqspath)
setup(
# Humanish metadata:
name='mlab-ns-simulator',
description='A simulator for the mlab-ns service which provides features Ooni needs.',
version=VERSION,
author='LeastAuthority',
author_email='consultancy@leastauthority.com',
license='FIXME',
url='https://github.com/LeastAuthority/ooni-support',
# Python structure for this package:
packages=find_packages(),
entry_points = {
'console_scripts': [
'mlabsim = mlabsim.main:main',
],
},
test_suite='mlabsim.tests',
# Dependencies:
install_requires=[
TwistedDependency,
'argparse == 1.2.1',
],
# Command customization:
cmdclass={
'test': TestWithCoverageAndTrialInAVirtualEnvCommand,
},
)
|
"""Initial Migration
Revision ID: c950921cfdc6
Revises:
Create Date: 2019-10-25 18:17:12.455875
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c950921cfdc6'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('pass_secure', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_table('pitches',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('owner_id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(), nullable=True),
sa.Column('title', sa.String(), nullable=True),
sa.Column('category', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['owner_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_pitches_description'), 'pitches', ['description'], unique=False)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=1000), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('downvotes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('downvote', sa.Integer(), nullable=True),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('upvotes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('upvote', sa.Integer(), nullable=True),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('upvotes')
op.drop_table('downvotes')
op.drop_table('comments')
op.drop_index(op.f('ix_pitches_description'), table_name='pitches')
op.drop_table('pitches')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
# ### end Alembic commands ###
|
import abc
import asyncio
import logging
import os
from collections import deque
from typing import ( # flake8: noqa
Any,
Awaitable,
Callable,
Deque,
List,
Optional,
Tuple,
Type,
)
import aiohttp
import thriftpy2
from aiohttp import hdrs
from aiohttp.client_exceptions import ClientError, ClientResponseError
from thriftpy2.protocol import TBinaryProtocolFactory
import aiojaeger
from .record import Record
from .spancontext import BaseTraceContext, DummyTraceContext
from .spancontext.jaeger import JaegerTraceContext
from .spancontext.zipkin import ZipkinTraceContext
from .utils import random_id
try:
from thriftpy2.transport import TCyMemoryBuffer as TMemoryBuffer
except ImportError:
from thriftpy2.transport import TMemoryBuffer
logger = logging.getLogger(__name__)
DEFAULT_TIMEOUT = aiohttp.ClientTimeout(total=5 * 60)
BATCHES_MAX_COUNT = 10 ** 4
DataList = List[Record]
SndBatches = Deque[Tuple[int, DataList]]
SendDataCoro = Callable[[DataList], Awaitable[bool]]
class TransportABC(abc.ABC):
@abc.abstractmethod
def send(self, record: Record) -> None: # pragma: no cover
"""Sends data to abstract collector."""
pass
@abc.abstractmethod
async def close(self) -> None: # pragma: no cover
"""Performs additional cleanup actions if required."""
pass
@abc.abstractmethod
def generate_trace_id(self) -> int:
pass
@abc.abstractmethod
def generate_span_id(self) -> int:
pass
@property
@abc.abstractmethod
def span_context(self) -> Type[BaseTraceContext]:
pass
class StubTransport(TransportABC):
"""Dummy transport, which logs spans to a limited queue."""
def __init__(self, queue_length: int = 100) -> None:
logger.info("Collector address was not provided, using stub transport")
self.records: Deque[Record] = deque(maxlen=queue_length)
def send(self, record: Record) -> None:
self.records.append(record)
async def close(self) -> None:
pass
def generate_trace_id(self) -> int:
return random_id()
def generate_span_id(self) -> int:
return random_id()
@property
def span_context(self) -> Type[BaseTraceContext]:
return DummyTraceContext
class StubZipkinTransport(StubTransport):
@property
def span_context(self) -> Type[BaseTraceContext]:
return ZipkinTraceContext
class StubJaegerTransport(StubTransport):
@property
def span_context(self) -> Type[BaseTraceContext]:
return JaegerTraceContext
class BatchManager:
def __init__(
self,
max_size: int,
send_interval: float,
attempt_count: int,
send_data: SendDataCoro,
) -> None:
loop = asyncio.get_event_loop()
self._max_size = max_size
self._send_interval = send_interval
self._send_data = send_data
self._attempt_count = attempt_count
self._max = BATCHES_MAX_COUNT
self._sending_batches: SndBatches = deque([], maxlen=self._max)
self._active_batch: Optional[DataList] = None
self._ender = loop.create_future()
self._timer: Optional[asyncio.Future[Any]] = None
self._sender_task = asyncio.ensure_future(self._sender_loop())
def add(self, data: Any) -> None:
if self._active_batch is None:
self._active_batch = []
self._active_batch.append(data)
if len(self._active_batch) >= self._max_size:
self._sending_batches.append((0, self._active_batch))
self._active_batch = None
if self._timer is not None and not self._timer.done():
self._timer.cancel()
async def stop(self) -> None:
if self._ender.done():
return None
self._ender.set_result(None)
await self._sender_task
await self._send()
if self._timer is not None:
self._timer.cancel()
try:
await self._timer
except asyncio.CancelledError:
pass
async def _sender_loop(self) -> None:
while not self._ender.done():
await self._wait()
await self._send()
async def _send(self) -> None:
if self._active_batch is not None:
self._sending_batches.append((0, self._active_batch))
self._active_batch = None
batches = self._sending_batches.copy()
self._sending_batches = deque([], maxlen=self._max)
for attempt, batch in batches:
if not await self._send_data(batch):
attempt += 1
if attempt < self._attempt_count:
self._sending_batches.append((attempt, batch))
async def _wait(self) -> None:
self._timer = asyncio.ensure_future(asyncio.sleep(self._send_interval))
await asyncio.wait(
[self._timer, self._ender],
return_when=asyncio.FIRST_COMPLETED,
)
class ZipkinTransport(TransportABC):
def __init__(
self,
address: str,
send_interval: float = 5,
*,
send_max_size: int = 100,
send_attempt_count: int = 3,
send_timeout: Optional[aiohttp.ClientTimeout] = None
) -> None:
self._queue: DataList = []
self._closing = False
self._address = address
self._send_interval = send_interval
if send_timeout is None:
send_timeout = DEFAULT_TIMEOUT
self._session = aiohttp.ClientSession(
timeout=send_timeout, headers={"Content-Type": "application/json"}
)
self._batch_manager = BatchManager(
send_max_size, send_interval, send_attempt_count, self._send_data
)
def send(self, record: Record) -> None:
self._batch_manager.add(record)
async def _send_data(self, records: List[Record]) -> bool:
data = [record.asdict() for record in records]
try:
async with self._session.post(self._address, json=data) as resp:
body = await resp.text()
if resp.status >= 300:
msg = "zipkin responded with code: {} and body: {}".format(
resp.status, body
)
raise RuntimeError(msg)
except (asyncio.TimeoutError, ClientError):
return False
except Exception as exc: # pylint: disable=broad-except
# that code should never fail and break application
logger.error("Can not send spans to zipkin", exc_info=exc)
return True
async def close(self) -> None:
if self._closing:
return
self._closing = True
await self._batch_manager.stop()
await self._session.close()
def generate_trace_id(self) -> int:
return random_id(128)
def generate_span_id(self) -> int:
return random_id(64)
@property
def span_context(self) -> Type[BaseTraceContext]:
return ZipkinTraceContext
class ThriftTransport(TransportABC):
MODULE_PATH = os.path.abspath(os.path.dirname(aiojaeger.__file__))
jaeger_thrift = thriftpy2.load(
os.path.join(MODULE_PATH, "jaeger-idl/thrift/jaeger.thrift"),
module_name="jaeger_thrift",
)
def __init__(
self,
address: str,
send_interval: float = 5,
*,
send_max_size: int = 100,
send_attempt_count: int = 3,
send_timeout: Optional[aiohttp.ClientTimeout] = None
) -> None:
if send_timeout is None:
send_timeout = DEFAULT_TIMEOUT
self._address = address
self._batch_manager = BatchManager(
send_max_size, send_interval, send_attempt_count, self._send_data
)
self._session = aiohttp.ClientSession(
timeout=send_timeout,
headers={hdrs.CONTENT_TYPE: "application/x-thrift"},
)
self._binary = TBinaryProtocolFactory(strict_read=False)
async def close(self) -> None:
self._closing = True
await self._batch_manager.stop()
def send(self, record: Record) -> None:
self._batch_manager.add(record)
async def _send_data(self, data: List[Record]) -> bool:
if not data:
return True
batch = self.jaeger_thrift.Batch()
process = self.jaeger_thrift.Process()
process.serviceName = data[0].service_name
batch.process = process
spans = []
for record in data:
span = record.asthrift(self.jaeger_thrift)
spans.append(span)
batch.spans = spans
otrans = TMemoryBuffer()
self._binary.get_protocol(otrans).write_struct(batch)
resp = await self._session.post(self._address, data=otrans.getvalue())
try:
resp.raise_for_status()
except ClientResponseError:
text = await resp.text()
logger.exception("Can not send spans to jaeger: %r", text)
return False
return True
def generate_trace_id(self) -> int:
# TODO: support 128 bit header
return random_id()
def generate_span_id(self) -> int:
return random_id()
@property
def span_context(self) -> Type[BaseTraceContext]:
return JaegerTraceContext
|
def p_a():
n = int(input())
if n == 1:
print("Hello World")
else:
a = int(input())
b = int(input())
print(a + b)
def p_b():
N, T = map(int, input().split())
ans = 10 ** 9
for _ in range(N):
c, t = map(int, input().split())
if t <= T:
ans = min(ans, c)
print(ans if ans != 10 ** 9 else "TLE")
def p_c():
from itertools import product
N = int(input())
xyh = [tuple(map(int, input().split())) for _ in range(N)]
h = 0
for i in xyh:
if h < i[2]:
x_, y_, h = i
for x, y in product(range(101), repeat=2):
high = h + abs(x_ - x) + abs(y_ - y)
if all(h == max(0, high - abs(x - i) - abs(y - j)) for i, j, h in xyh):
print(x, y, high)
def p_d():
n, m = map(int, input().split())
max_candidate = m // n
for i in reversed(range(1, max_candidate + 1)):
diff = m - n * i
if diff % i == 0:
print(i)
break
if __name__ == '__main__':
p_d()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""Vispy configuration functions
"""
import os
from os import path as op
import json
import sys
import platform
import getopt
import traceback
import tempfile
import atexit
from shutil import rmtree
from .event import EmitterGroup, EventEmitter, Event
from .logs import logger, set_log_level, use_log_level
from ..ext.six import string_types, file_types
config = None
_data_path = None
_allowed_config_keys = None
def _init():
""" Create global Config object, parse command flags
"""
global config, _data_path, _allowed_config_keys
app_dir = _get_vispy_app_dir()
if app_dir is not None:
_data_path = op.join(app_dir, 'data')
_test_data_path = op.join(app_dir, 'test_data')
else:
_data_path = _test_data_path = None
# All allowed config keys and the types they may have
_allowed_config_keys = {
'data_path': string_types,
'default_backend': string_types,
'gl_backend': string_types,
'gl_debug': (bool,),
'glir_file': string_types+file_types,
'include_path': list,
'logging_level': string_types,
'qt_lib': string_types,
'dpi': (int, type(None)),
'profile': string_types + (type(None),),
'audit_tests': (bool,),
'test_data_path': string_types + (type(None),),
}
# Default values for all config options
default_config_options = {
'data_path': _data_path,
'default_backend': '',
'gl_backend': 'gl2',
'gl_debug': False,
'glir_file': '',
'include_path': [],
'logging_level': 'info',
'qt_lib': 'any',
'dpi': None,
'profile': None,
'audit_tests': False,
'test_data_path': _test_data_path,
}
config = Config(**default_config_options)
try:
config.update(**_load_config())
except Exception as err:
raise Exception('Error while reading vispy config file "%s":\n %s' %
(_get_config_fname(), err.message))
set_log_level(config['logging_level'])
_parse_command_line_arguments()
###############################################################################
# Command line flag parsing
VISPY_HELP = """
VisPy command line arguments:
--vispy-backend=(qt|pyqt4|pyt5|pyside|glfw|pyglet|sdl2|wx)
Selects the backend system for VisPy to use. This will override the default
backend selection in your configuration file.
--vispy-log=(debug|info|warning|error|critical)[,search string]
Sets the verbosity of logging output. The default is 'warning'. If a search
string is given, messages will only be displayed if they match the string,
or if their call location (module.class:method(line) or
module:function(line)) matches the string.
--vispy-dpi=resolution
Force the screen resolution to a certain value (in pixels per inch). By
default, the OS is queried to determine the screen DPI.
--vispy-fps
Print the framerate (in Frames Per Second) in the console.
--vispy-gl-debug
Enables error checking for all OpenGL calls.
--vispy-glir-file
Export glir commands to specified file.
--vispy-profile=locations
Measure performance at specific code locations and display results.
*locations* may be "all" or a comma-separated list of method names like
"SceneCanvas.draw_visual".
--vispy-cprofile
Enable profiling using the built-in cProfile module and display results
when the program exits.
--vispy-audit-tests
Enable user auditing of image test results.
--vispy-help
Display this help message.
"""
def _parse_command_line_arguments():
""" Transform vispy specific command line args to vispy config.
Put into a function so that any variables dont leak in the vispy namespace.
"""
global config
# Get command line args for vispy
argnames = ['vispy-backend=', 'vispy-gl-debug', 'vispy-glir-file=',
'vispy-log=', 'vispy-help', 'vispy-profile=', 'vispy-cprofile',
'vispy-dpi=', 'vispy-audit-tests']
try:
opts, args = getopt.getopt(sys.argv[1:], '', argnames)
except getopt.GetoptError:
opts = []
# Use them to set the config values
for o, a in opts:
if o.startswith('--vispy'):
if o == '--vispy-backend':
config['default_backend'] = a
logger.info('vispy backend: %s', a)
elif o == '--vispy-gl-debug':
config['gl_debug'] = True
elif o == '--vispy-glir-file':
config['glir_file'] = a
elif o == '--vispy-log':
if ',' in a:
verbose, match = a.split(',')
else:
verbose = a
match = None
config['logging_level'] = a
set_log_level(verbose, match)
elif o == '--vispy-profile':
config['profile'] = a
elif o == '--vispy-cprofile':
_enable_profiling()
elif o == '--vispy-help':
print(VISPY_HELP)
elif o == '--vispy-dpi':
config['dpi'] = int(a)
elif o == '--vispy-audit-tests':
config['audit_tests'] = True
else:
logger.warning("Unsupported vispy flag: %s" % o)
###############################################################################
# CONFIG
# Adapted from pyzolib/paths.py:
# https://bitbucket.org/pyzo/pyzolib/src/tip/paths.py
def _get_vispy_app_dir():
"""Helper to get the default directory for storing vispy data"""
# Define default user directory
user_dir = os.path.expanduser('~')
# Get system app data dir
path = None
if sys.platform.startswith('win'):
path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')
path = path1 or path2
elif sys.platform.startswith('darwin'):
path = os.path.join(user_dir, 'Library', 'Application Support')
# On Linux and as fallback
if not (path and os.path.isdir(path)):
path = user_dir
# Maybe we should store things local to the executable (in case of a
# portable distro or a frozen application that wants to be portable)
prefix = sys.prefix
if getattr(sys, 'frozen', None): # See application_dir() function
prefix = os.path.abspath(os.path.dirname(sys.path[0]))
for reldir in ('settings', '../settings'):
localpath = os.path.abspath(os.path.join(prefix, reldir))
if os.path.isdir(localpath):
try:
open(os.path.join(localpath, 'test.write'), 'wb').close()
os.remove(os.path.join(localpath, 'test.write'))
except IOError:
pass # We cannot write in this directory
else:
path = localpath
break
# Get path specific for this app
appname = '.vispy' if path == user_dir else 'vispy'
path = os.path.join(path, appname)
return path
class ConfigEvent(Event):
""" Event indicating a configuration change.
This class has a 'changes' attribute which is a dict of all name:value
pairs that have changed in the configuration.
"""
def __init__(self, changes):
Event.__init__(self, type='config_change')
self.changes = changes
class Config(object):
""" Container for global settings used application-wide in vispy.
Events:
-------
Config.events.changed - Emits ConfigEvent whenever the configuration
changes.
"""
def __init__(self, **kwargs):
self.events = EmitterGroup(source=self)
self.events['changed'] = EventEmitter(
event_class=ConfigEvent,
source=self)
self._config = {}
self.update(**kwargs)
self._known_keys = get_config_keys()
def __getitem__(self, item):
return self._config[item]
def __setitem__(self, item, val):
self._check_key_val(item, val)
self._config[item] = val
# inform any listeners that a configuration option has changed
self.events.changed(changes={item: val})
def _check_key_val(self, key, val):
global _allowed_config_keys
# check values against acceptable ones
known_keys = _allowed_config_keys
if key not in known_keys:
raise KeyError('key "%s" not in known keys: "%s"'
% (key, known_keys))
if not isinstance(val, known_keys[key]):
raise TypeError('Value for key "%s" must be one of %s, not %s.'
% (key, known_keys[key], type(val)))
def update(self, **kwargs):
for key, val in kwargs.items():
self._check_key_val(key, val)
self._config.update(kwargs)
self.events.changed(changes=kwargs)
def __repr__(self):
return repr(self._config)
def get_config_keys():
"""The config keys known by vispy and their allowed data types.
Returns
-------
keys : dict
Dict of {key: (types,)} pairs.
"""
global _allowed_config_keys
return _allowed_config_keys.copy()
def _get_config_fname():
"""Helper for the vispy config file"""
directory = _get_vispy_app_dir()
if directory is None:
return None
fname = op.join(directory, 'vispy.json')
if os.environ.get('_VISPY_CONFIG_TESTING', None) is not None:
fname = op.join(_TempDir(), 'vispy.json')
return fname
def _load_config():
"""Helper to load prefs from ~/.vispy/vispy.json"""
fname = _get_config_fname()
if fname is None or not op.isfile(fname):
return dict()
with open(fname, 'r') as fid:
config = json.load(fid)
return config
def save_config(**kwargs):
"""Save configuration keys to vispy config file
Parameters
----------
**kwargs : keyword arguments
Key/value pairs to save to the config file.
"""
if kwargs == {}:
kwargs = config._config
current_config = _load_config()
current_config.update(**kwargs)
# write to disk
fname = _get_config_fname()
if fname is None:
raise RuntimeError('config filename could not be determined')
if not op.isdir(op.dirname(fname)):
os.mkdir(op.dirname(fname))
with open(fname, 'w') as fid:
json.dump(current_config, fid, sort_keys=True, indent=0)
def set_data_dir(directory=None, create=False, save=False):
"""Set vispy data download directory"""
if directory is None:
directory = _data_path
if _data_path is None:
raise IOError('default path cannot be determined, please '
'set it manually (directory != None)')
if not op.isdir(directory):
if not create:
raise IOError('directory "%s" does not exist, perhaps try '
'create=True to create it?' % directory)
os.mkdir(directory)
config.update(data_path=directory)
if save:
save_config(data_path=directory)
def _enable_profiling():
""" Start profiling and register callback to print stats when the program
exits.
"""
import cProfile
import atexit
global _profiler
_profiler = cProfile.Profile()
_profiler.enable()
atexit.register(_profile_atexit)
_profiler = None
def _profile_atexit():
global _profiler
_profiler.print_stats(sort='cumulative')
def sys_info(fname=None, overwrite=False):
"""Get relevant system and debugging information
Parameters
----------
fname : str | None
Filename to dump info to. Use None to simply print.
overwrite : bool
If True, overwrite file (if it exists).
Returns
-------
out : str
The system information as a string.
"""
if fname is not None and op.isfile(fname) and not overwrite:
raise IOError('file exists, use overwrite=True to overwrite')
out = ''
try:
# Nest all imports here to avoid any circular imports
from ..app import use_app, Canvas
from ..app.backends import BACKEND_NAMES
from ..gloo import gl
from ..testing import has_backend
# get default app
with use_log_level('warning'):
app = use_app(call_reuse=False) # suppress messages
out += 'Platform: %s\n' % platform.platform()
out += 'Python: %s\n' % str(sys.version).replace('\n', ' ')
out += 'Backend: %s\n' % app.backend_name
for backend in BACKEND_NAMES:
if backend.startswith('ipynb_'):
continue
with use_log_level('warning', print_msg=False):
which = has_backend(backend, out=['which'])[1]
out += '{0:<9} {1}\n'.format(backend + ':', which)
out += '\n'
# We need an OpenGL context to get GL info
canvas = Canvas('Test', (10, 10), show=False, app=app)
canvas._backend._vispy_set_current()
out += 'GL version: %r\n' % (gl.glGetParameter(gl.GL_VERSION),)
x_ = gl.GL_MAX_TEXTURE_SIZE
out += 'MAX_TEXTURE_SIZE: %r\n' % (gl.glGetParameter(x_),)
out += 'Extensions: %r\n' % (gl.glGetParameter(gl.GL_EXTENSIONS),)
canvas.close()
except Exception: # don't stop printing info
out += '\nInfo-gathering error:\n%s' % traceback.format_exc()
pass
if fname is not None:
with open(fname, 'w') as fid:
fid.write(out)
return out
class _TempDir(str):
"""Class for creating and auto-destroying temp dir
This is designed to be used with testing modules.
We cannot simply use __del__() method for cleanup here because the rmtree
function may be cleaned up before this object, so we use the atexit module
instead.
"""
def __new__(self):
new = str.__new__(self, tempfile.mkdtemp())
return new
def __init__(self):
self._path = self.__str__()
atexit.register(self.cleanup)
def cleanup(self):
rmtree(self._path, ignore_errors=True)
# initialize config options
_init()
|
from __future__ import annotations
import logging
import re
import p4transfer
def test_edit_delete_readd(source, target, default_transfer_config):
"""Test an edit followed by a delete followed by a re-add."""
inside_file1 = source.local_path("inside/inside_file1")
inside_file1.write_bytes(b"Test content")
source.p4("add", inside_file1)
source.p4("submit", "-d", "inside_file1 added")
source.p4("edit", inside_file1)
inside_file1.write_bytes(inside_file1.read_bytes() + b"More content")
source.p4("submit", "-d", "inside_file1 edited")
p4transfer.test_transfer(default_transfer_config)
changes = target.p4(
"changes",
)
assert len(changes) == 2
assert changes[0]["change"] == "2"
assert target.counter == 2
assert len(target.p4("changes")) == 2
source.p4("delete", inside_file1)
source.p4("submit", "-d", "inside_file1 deleted")
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 3
assert len(target.p4("changes")) == 3
changes = target.p4(
"changes",
)
assert len(changes) == 3
filelog = target.filelog("//depot/import/inside_file1")
assert filelog[0].revisions[0].action == "delete"
inside_file1.write_bytes(b"New content")
source.p4("add", inside_file1)
source.p4("submit", "-d", "Re-added")
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 4
assert len(target.p4("changes")) == 4
filelog = target.filelog("//depot/import/inside_file1")
assert filelog[0].revisions[0].action == "add"
def test_ignored_delete(source, target, default_transfer_config):
"""Test for ignoring a delete and then doing it again."""
file1 = source.local_path("inside/file1")
file2 = source.local_path("inside/file2")
file1.write_bytes(b"Test content")
source.p4("add", file1)
source.p4("submit", "-d", "files added")
source.p4("integ", file1, file2)
source.p4("submit", "-d", "file2 added")
source.p4("delete", file2)
source.p4("submit", "-d", "file2 deleted")
source.p4("integ", "-Rd", file2, file1)
source.p4("resolve", "-ay")
source.p4("submit", "-d", "file2 delete ignored")
source.p4("integ", "-f", file2, file1)
source.p4("resolve", "-at")
source.p4("submit", "-d", "file2 delete integrated")
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 5
assert len(target.p4("changes")) == 5
def test_ignored_delete_integ(source, target, default_transfer_config):
"""Test for ignoring a delete which is being integrated 0360 - and the resulting rev re-numbering."""
# file3
# $ p4 filelog //Ocean/Console-0.12.4/T_Shark.uasset#2
# //Ocean/Console-0.12.4/T_Shark.uasset#2
# ... #2 change 11891294 integrate on 2020/03/03 by casey.spencer@ROBO (binary+l) 'Merging //Ocean/Release-0.13 to'
# ... ... copy from //Ocean/Release-0.12.4/T_Shark.uasset#3
# ... #1 change 11890184 branch on 2020/03/03 by casey.spencer@ROBO (binary+l) 'Merging //Ocean/Release-0.13 to'
# ... ... branch from //Ocean/Release-0.12.4/T_Shark.uasset#2
# file2
# $ p4 filelog //Ocean/Release-0.12.4/T_Shark.uasset#3
# //Ocean/Release-0.12.4/T_Shark.uasset
# ... #3 change 11891280 integrate on 2020/03/03 by Casey.Spencer@CSws (binary+l) 'Merging //Ocean/Release-0.13 to'
# ... ... copy from //Ocean/Release-0.13/T_Shark.uasset#5
# ... #2 change 11890179 branch on 2020/03/03 by Casey.Spencer@CSws (binary+l) 'Merging //Ocean/Release-0.13 to'
# ... ... branch from //Ocean/Release-0.13/T_Shark.uasset#4
# ... #1 change 11887389 delete on 2020/03/03 by Casey.Spencer@CSws (binary+l) '@ocn Cherry picking CL 11882562'
# ... ... ignored //Ocean/Release-0.13/T_Shark.uasset#4
# file1
# $ p4 filelog //Ocean/Release-0.13/T_Shark.uasset#4
# //Ocean/Release-0.13/T_Shark.uasset
# ... #4 change 11885487 edit on 2020/03/03 by emily.solomon@ESws (binary+l) '#jira nojira Updating Shark an'
# ... ... branch into //Ocean/Release-0.12.4/T_Shark.uasset#2
# ... ... ignored by //Ocean/Release-0.12.4/T_Shark.uasset#1
# ... #3 change 11872056 edit on 2020/03/03 by Emily.Solomon@ESws (binary+l) '#jira nojira Updating Elim Obje'
# ... #2 change 11849715 edit on 2020/03/02 by emily.solomon@ESws (binary+l) '#jira nojira Updating Elim Obj'
# ... #1 change 11711950 add on 2020/02/27 by Emily.Solomon@ESws (binary+l) '#jira nojira Updating Snowman t'
file1 = source.local_path("inside/file1")
file2 = source.local_path("inside/file2")
file3 = source.local_path("inside/file3")
# outside_file1 = source.local_path("outside/outside_file1")
file1.write_bytes(b"Test content")
# outside_file1.write_bytes(b"Some content")
source.p4("add", file1)
source.p4("submit", "-d", "files added")
source.p4("edit", file1)
file1.write_bytes(file1.read_bytes() + b"\nmore")
source.p4("submit", "-d", "edited")
source.p4("edit", file1)
file1.write_bytes(file1.read_bytes() + b"\nmore")
source.p4("submit", "-d", "edited")
source.p4("edit", file1)
file1.write_bytes(file1.read_bytes() + b"\nmore")
source.p4("submit", "-d", "edited")
# Generate a first rev which is an ignore of a delete - this will not be transferred.
source.p4("integ", "-Rb", f"{file1}#4,4", file2)
source.p4("resolve", "-ay")
source.p4("submit", "-d", "ignore delete")
source.p4("integ", "-f", f"{file1}#4,4", file2)
source.p4("resolve", "-at")
source.p4("submit", "-d", "branched")
source.p4("integ", file2, file3)
source.p4("submit", "-d", "branched")
source.p4("edit", file1)
file1.write_bytes(file1.read_bytes() + b"\nmore again")
source.p4("submit", "-d", "edited")
source.p4("integ", file1, file2)
source.p4("resolve", "-am")
source.p4("submit", "-d", "branched")
source.p4("integ", file2, file3)
source.p4("resolve", "-am")
source.p4("submit", "-d", "branched")
filelog = source.filelog("//depot/inside/file2")[0]
assert filelog.revisions[0].integrations[0].how == "copy from"
assert filelog.revisions[1].integrations[0].how == "branch from"
assert filelog.revisions[2].integrations[0].how == "ignored"
assert filelog.revisions[0].action == "integrate"
assert filelog.revisions[1].action == "branch"
assert filelog.revisions[2].action == "delete"
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 10
assert len(target.p4("changes")) == 10
def test_delete_delete(source, target, default_transfer_config):
"""Test for a delete on top of a delete."""
inside_file1 = source.local_path("inside/inside_file1")
inside_file2 = source.local_path("inside/inside_file2")
inside_file3 = source.local_path("inside/inside_file3")
inside_file1.write_bytes(b"Test content")
inside_file2.write_bytes(b"Test content")
source.p4("add", inside_file1)
source.p4("add", inside_file2)
source.p4("submit", "-d", "files added")
source.p4("integrate", inside_file2, inside_file3)
source.p4("submit", "-d", "branched file")
source.p4("delete", inside_file1)
source.p4("delete", inside_file2)
source.p4("submit", "-d", "files deleted")
with source.warnings_ignored:
source.p4("sync", "//depot/inside/inside_file1#1")
source.p4("sync", "//depot/inside/inside_file2#1")
source.p4("delete", "//depot/inside/inside_file1")
source.p4("delete", "//depot/inside/inside_file2")
source.p4("opened")
try:
source.p4("submit", "-d", "files deleted again")
except Exception as e:
logging.info(str(e))
err = source.p4.errors[0]
if re.search(r"Submit failed -- fix problems above then", err):
m = re.search(r"p4 submit -c (\d+)", err)
if m:
source.p4("submit", "-c", m.group(1))
filelog = source.filelog("//depot/inside/inside_file1")[0]
logging.debug(filelog)
assert filelog.revisions[0].action == "delete"
assert filelog.revisions[1].action == "delete"
source.p4("integrate", inside_file2, inside_file3)
source.p4("submit", "-d", "integrated delete")
filelog = source.filelog("//depot/inside/inside_file3")[0]
logging.debug(filelog)
assert filelog.revisions[0].action == "delete"
p4transfer.test_transfer(default_transfer_config)
assert target.counter == 5
assert len(target.p4("changes")) == 5
filelog = target.filelog("//depot/import/inside_file1")[0]
logging.debug(filelog)
assert filelog.revisions[0].action == "delete"
assert filelog.revisions[1].action == "delete"
|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate Node document based on the current settings for GMN."""
import d1_common.type_conversions
import d1_common.xml
import django.conf
import django.urls
import django.urls.base
# Example Node document:
#
# <?xml version="1.0" ?>
# <ns1:node replicate="false" state="up" synchronize="true" type="mn"
# xmlns:ns1="http://ns.dataone.org/service/types/v1">
# <identifier>urn:node:mnDevGMN</identifier>
# <name>GMN Dev</name>
# <description>Test Member Node operated by DataONE</description>
# <baseURL>https://localhost/mn</baseURL>
# <services>
# <service available="true" name="MNCore" version="v1"/>
# <service available="true" name="MNRead" version="v1"/>
# <service available="true" name="MNAuthorization" version="v1"/>
# <service available="true" name="MNStorage" version="v1"/>
# <service available="true" name="MNReplication" version="v1"/>
# </services>
# <synchronization>
# <schedule hour="*" mday="*" min="0/3" mon="*" sec="0" wday="?" year="*"/>
# </synchronization>
# <subject>CN=urn:node:mnDevGMN,DC=dataone,DC=org</subject>
# <contactSubject>CN=MyName,O=Google,C=US,DC=cilogon,DC=org</contactSubject>
# </ns1:node>
# App
def get_pretty_xml(api_major_int=2):
return d1_common.xml.serialize_for_transport(
_get_pyxb(api_major_int), xslt_url=django.urls.base.reverse("home_xslt")
)
def get_xml(api_major_int):
return d1_common.xml.serialize_for_transport(
_get_pyxb(api_major_int),
pretty=False,
xslt_url=django.urls.base.reverse("home_xslt"),
)
def get_pyxb(api_major_int=2):
return _get_pyxb(api_major_int)
# noinspection PyTypeChecker
def _get_pyxb(api_major_int):
if api_major_int == 1:
pyxb_binding = d1_common.type_conversions.get_pyxb_binding_by_api_version(1, 1)
elif api_major_int == 2:
pyxb_binding = d1_common.type_conversions.get_pyxb_binding_by_api_version(2, 0)
else:
assert False
node_pyxb = pyxb_binding.node()
node_pyxb.identifier = django.conf.settings.NODE_IDENTIFIER
node_pyxb.name = django.conf.settings.NODE_NAME
node_pyxb.description = django.conf.settings.NODE_DESCRIPTION
node_pyxb.baseURL = django.conf.settings.NODE_BASEURL
node_pyxb.replicate = django.conf.settings.NODE_REPLICATE
node_pyxb.synchronize = django.conf.settings.NODE_SYNCHRONIZE
node_pyxb.type = "mn"
node_pyxb.state = django.conf.settings.NODE_STATE
node_pyxb.subject.append(pyxb_binding.Subject(django.conf.settings.NODE_SUBJECT))
node_pyxb.contactSubject.append(
pyxb_binding.Subject(django.conf.settings.NODE_CONTACT_SUBJECT)
)
node_pyxb.services = _create_service_list_pyxb(pyxb_binding)
if django.conf.settings.NODE_SYNCHRONIZE:
node_pyxb.synchronization = _create_synchronization_policy_pyxb(pyxb_binding)
if django.conf.settings.NODE_REPLICATE:
node_pyxb.nodeReplicationPolicy = _create_replication_policy_pyxb(pyxb_binding)
return node_pyxb
def _create_synchronization_policy_pyxb(pyxb_binding):
schedule_pyxb = pyxb_binding.Schedule()
schedule_pyxb.year = django.conf.settings.NODE_SYNC_SCHEDULE_YEAR
schedule_pyxb.mon = django.conf.settings.NODE_SYNC_SCHEDULE_MONTH
schedule_pyxb.wday = django.conf.settings.NODE_SYNC_SCHEDULE_WEEKDAY
schedule_pyxb.mday = django.conf.settings.NODE_SYNC_SCHEDULE_MONTHDAY
schedule_pyxb.hour = django.conf.settings.NODE_SYNC_SCHEDULE_HOUR
schedule_pyxb.min = django.conf.settings.NODE_SYNC_SCHEDULE_MINUTE
schedule_pyxb.sec = django.conf.settings.NODE_SYNC_SCHEDULE_SECOND
sync = pyxb_binding.Synchronization()
sync.schedule = schedule_pyxb
return sync
def _create_replication_policy_pyxb(pyxb_binding):
replication_pyxb = pyxb_binding.nodeReplicationPolicy()
if django.conf.settings.REPLICATION_MAXOBJECTSIZE != -1:
replication_pyxb.maxObjectSize = django.conf.settings.REPLICATION_MAXOBJECTSIZE
if django.conf.settings.REPLICATION_SPACEALLOCATED != -1:
replication_pyxb.spaceAllocated = (
django.conf.settings.REPLICATION_SPACEALLOCATED
)
for allowed_node in django.conf.settings.REPLICATION_ALLOWEDNODE:
replication_pyxb.allowedNode.append(pyxb_binding.NodeReference(allowed_node))
for allowed_object in django.conf.settings.REPLICATION_ALLOWEDOBJECTFORMAT:
replication_pyxb.allowedObjectFormat.append(
pyxb_binding.ObjectFormatIdentifier(allowed_object)
)
return replication_pyxb
def _create_service_list_pyxb(pyxb_binding):
# Both v1/node and v2/node list v2 services
service_list_pyxb = pyxb_binding.services()
service_list_pyxb.extend(_create_service_list_for_version_pyxb(pyxb_binding, "v1"))
service_list_pyxb.extend(_create_service_list_for_version_pyxb(pyxb_binding, "v2"))
return service_list_pyxb
def _create_service_list_for_version_pyxb(pyxb_binding, service_version):
return [
_create_service_pyxb(pyxb_binding, "MNCore", service_version),
_create_service_pyxb(pyxb_binding, "MNRead", service_version),
_create_service_pyxb(pyxb_binding, "MNAuthorization", service_version),
_create_service_pyxb(pyxb_binding, "MNStorage", service_version),
_create_service_pyxb(pyxb_binding, "MNReplication", service_version),
]
def _create_service_pyxb(pyxb_binding, service_name, service_version):
service_pyxb = pyxb_binding.Service()
service_pyxb.name = pyxb_binding.ServiceName(service_name)
service_pyxb.version = pyxb_binding.ServiceVersion(service_version)
service_pyxb.available = True
return service_pyxb
|
# Foremast - Pipeline Tooling
#
# Copyright 2018 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utils."""
from unittest import mock
import pytest
from foremast.exceptions import *
from foremast.utils import *
@mock.patch('foremast.utils.banners.LOG')
def test_utils_banner(mock_log):
banner('test', border='+', width=10)
mock_log.info.assert_called_with('+' * 10)
def test_utils_deep_chain_map():
first = {
'key1': {
'subkey1': 1,
},
}
second = {
'key1': {
'subkey2': 2,
},
}
result = {
'key1': {
'subkey1': 1,
'subkey2': 2,
}
}
assert DeepChainMap(first, second) == result
with pytest.raises(KeyError):
assert DeepChainMap(first, second)['key2'] == result
def test_utils_pipeline_check_managed():
assert check_managed_pipeline('app [onetime]', 'app') == 'onetime'
assert check_managed_pipeline('app [us-east-1]', 'app') == 'us-east-1'
bad_names = [
'something',
'app',
'app [us-east-1',
'app us-east-1]',
'app [us-east-1',
'app us-east-1]',
'app name',
'app2 [us-east-1]',
'app name [us-east-1]',
]
for name in bad_names:
with pytest.raises(ValueError):
check_managed_pipeline(name=name, app_name='app')
@mock.patch('foremast.utils.pipelines.gate_request')
def test_utils_pipeline_get_all_pipelines(mock_gate_request):
mock_gate_request.return_value.json.return_value = {}
result = get_all_pipelines(app='app')
assert result == {}
@mock.patch('foremast.utils.pipelines.get_all_pipelines')
def test_utils_pipeline_get_pipeline_id(mock_get_pipelines):
"""Verify Pipeline ID response."""
data = [
{
'name': 'app',
'id': 100
},
]
mock_get_pipelines.return_value = data
result = get_pipeline_id(app='test', name='app')
mock_get_pipelines.assert_called_once_with(app='test')
assert result is 100
result = get_pipeline_id(app='embarrassingly', name='badapp')
mock_get_pipelines.assert_called_with(app='embarrassingly')
assert result == None
def test_utils_generate_packer_filename():
a = generate_packer_filename('aws', 'us-east-1', 'chroot')
assert a == 'aws_us-east-1_chroot.json'
@mock.patch('foremast.utils.elb.gate_request')
def test_utils_find_elb(gate_request_mock):
results = [{'account': 'dev', 'region': 'us-east-1', 'dnsname': 'appdns'}]
gate_request_mock.return_value.json.return_value = results
a = find_elb('app', 'dev', 'us-east-1')
assert a == 'appdns'
with pytest.raises(SpinnakerElbNotFound):
# we already filter by app, so sending incorrect env/region combo
# will trigger the error
find_elb('app', 'devbad', 'us-east-1')
@mock.patch('foremast.utils.slack.slacker')
def test_utils_post_slack_message(mock_slack):
post_slack_message('test', '#test')
mock_slack.called
@mock.patch('foremast.utils.apps.gate_request')
def test_utils_apps_get_details(mock_gate_request):
data = {'attributes': {'repoProjectKey': 'group', 'repoSlug': 'repo1'}}
mock_gate_request.return_value.json.return_value = data
result = get_details(app='repo1group', env='dev')
assert result.app_name() == 'repo1group'
with pytest.raises(SpinnakerAppNotFound):
mock_gate_request.return_value.ok = False
result = get_details(app='repo1group', env='dev')
assert result.app_name() == 'repo1group'
@mock.patch('foremast.utils.apps.gate_request')
def test_utils_apps_get_all_apps(mock_gate_request):
data = []
mock_gate_request.return_value.json.return_value = data
result = get_all_apps()
assert result == []
with pytest.raises(AssertionError):
mock_gate_request.return_value.ok = False
result = get_all_apps()
@mock.patch('foremast.utils.dns.boto3.Session')
@mock.patch('foremast.utils.dns.DOMAIN', 'test')
def test_utils_dns_get_zone_ids(mock_boto3):
data = {
'HostedZones': [
{
'Name': 'internal.example.com',
'Id': 100,
'Config': {
'PrivateZone': True
}
},
{
'Name': 'external.example.com',
'Id': 101,
'Config': {
'PrivateZone': False
}
},
]
}
data_external = {
'HostedZones': [
{
'Name': 'internal.example.com',
'Id': 100,
'Config': {
'PrivateZone': False
}
},
{
'Name': 'external.example.com',
'Id': 101,
'Config': {
'PrivateZone': False
}
},
]
}
mock_boto3.return_value.client.return_value.list_hosted_zones_by_name.return_value = data
# default case
result = get_dns_zone_ids()
assert result == [100]
# all zones
result = get_dns_zone_ids(facing='external')
assert result == [100, 101]
# all internal
result = get_dns_zone_ids(facing='internal')
assert result == [100]
# unkown param - mixed zones
result = get_dns_zone_ids(facing='wrong_param')
assert result == [100]
# no internal zones
mock_boto3.return_value.client.return_value.list_hosted_zones_by_name.return_value = data_external
result = get_dns_zone_ids(facing='internal')
assert result == []
# unknown param - no internal zones
result = get_dns_zone_ids(facing='wrong_param')
assert result == []
@mock.patch('foremast.utils.dns.boto3.Session')
def test_find_existing_record(mock_session):
"""Check that a record is found correctly"""
dns_values = {'env': 'dev', 'zone_id': '/hostedzone/TESTTESTS279', 'dns_name': 'test.example.com'}
test_records = [{
'ResourceRecordSets': [{
'Name': 'test.example.com.',
'Type': 'CNAME'
}]
}, {
'ResourceRecordSets': [{
'Name': 'test.example.com.',
'Failover': 'PRIMARY'
}]
}, {
'ResourceRecordSets': [{
'Name': 'test.example.com.',
'Type': 'A'
}]
}]
client = mock_session.return_value.client.return_value
client.get_paginator.return_value.paginate.return_value = test_records
assert find_existing_record(
dns_values['env'], dns_values['zone_id'], dns_values['dns_name'], check_key='Type', check_value='CNAME') == {
'Name': 'test.example.com.',
'Type': 'CNAME'
}
assert find_existing_record(
dns_values['env'], dns_values['zone_id'], dns_values['dns_name'], check_key='Failover',
check_value='PRIMARY') == {
'Name': 'test.example.com.',
'Failover': 'PRIMARY'
}
assert find_existing_record(
dns_values['env'], dns_values['zone_id'], 'bad.example.com', check_key='Type', check_value='CNAME') == None
@mock.patch('foremast.utils.security_group.gate_request')
@mock.patch('foremast.utils.security_group.get_vpc_id')
def test_utils_sg_get_security_group_id(mock_vpc_id, mock_gate_request):
data = {'id': 100}
mock_gate_request.return_value.json.return_value = data
# default - happy path
result = get_security_group_id()
assert result == 100
# security group not found
with pytest.raises(SpinnakerSecurityGroupError):
mock_gate_request.return_value.json.return_value = {}
result = get_security_group_id()
# error getting details
with pytest.raises(AssertionError):
mock_gate_request.return_value.ok = False
result = get_security_group_id()
@mock.patch('foremast.utils.vpc.gate_request')
def test_utils_vpc_get_vpc_id(mock_gate_request):
data = [
{
'id': 100,
'name': 'vpc',
'account': 'dev',
'region': 'us-east-1'
},
]
mock_gate_request.return_value.json.return_value = data
# default - happy path
result = get_vpc_id(account='dev', region='us-east-1')
assert result == 100
# vpc not found
with pytest.raises(SpinnakerVPCIDNotFound):
result = get_vpc_id(account='dev', region='us-west-2')
assert result == 100
# error getting details
with pytest.raises(SpinnakerVPCNotFound):
mock_gate_request.return_value.ok = False
result = get_vpc_id(account='dev', region='us-east-1')
SUBNET_DATA = [
{
'vpcId': 100,
'account': 'dev',
'id': 1,
'purpose': 'internal',
'region': 'us-east-1',
'target': 'ec2',
'availabilityZone': []
},
{
'vpcId': 101,
'account': 'dev',
'id': 2,
'purpose': 'other',
'region': 'us-west-2',
'target': 'ec2',
'availabilityZone': ['us-west-2a', 'us-west-2b']
},
]
@mock.patch('foremast.utils.subnets.gate_request')
def test_utils_subnets_get_subnets(mock_gate_request):
"""Find one subnet."""
mock_gate_request.return_value.json.return_value = SUBNET_DATA
# default - happy path
result = get_subnets(env='dev', region='us-east-1')
assert result == {
'subnet_ids': {
'us-east-1': [SUBNET_DATA[0]['id']],
},
'us-east-1': [[]],
}
@mock.patch('foremast.utils.subnets.gate_request')
def test_utils_subnets_get_subnets_multiple_az(mock_gate_request):
"""Find multiple Availability Zones."""
mock_gate_request.return_value.json.return_value = SUBNET_DATA
# default - happy path w/multiple az
result = get_subnets(env='dev', region='')
assert result == {'dev': {'us-west-2': [['us-west-2a', 'us-west-2b']], 'us-east-1': [[]]}}
@mock.patch('foremast.utils.subnets.gate_request')
def test_utils_subnets_get_subnets_subnet_not_found(mock_gate_request):
"""Trigger SpinnakerSubnetError when no subnets found."""
mock_gate_request.return_value.json.return_value = SUBNET_DATA
# subnet not found
with pytest.raises(SpinnakerSubnetError):
result = get_subnets(env='dev', region='us-west-1')
assert result == {'us-west-1': [[]]}
@mock.patch('foremast.utils.subnets.gate_request')
def test_utils_subnets_get_subnets_api_error(mock_gate_request):
"""Trigger SpinnakerTimeout when API has error."""
mock_gate_request.return_value.json.return_value = SUBNET_DATA
# error getting details
with pytest.raises(SpinnakerTimeout):
mock_gate_request.return_value.ok = False
result = get_subnets()
@mock.patch('foremast.utils.tasks.check_task')
@mock.patch('foremast.utils.tasks.post_task')
@mock.patch('foremast.utils.tasks.TASK_TIMEOUTS')
def test_utils_timeout_per_env(mock_check_task, mock_requests_post, mock_timeouts):
"""Verify custom timeout propagates to check_task"""
mock_requests_post.return_value = 5
task_data = {"job": [{"credentials": "dev", "type": "fake_task"}]}
mock_timeouts.side_effect = {"dev": {"fake_task": "240"}}
tasks.wait_for_task(task_data)
assert mock_check_task.called_with("fake_task", 240)
assert mock_check_task.called_with("fake_task", tasks.DEFAULT_TASK_TIMEOUT)
@mock.patch('foremast.utils.tasks.check_task')
@mock.patch('foremast.utils.tasks.post_task')
@mock.patch('foremast.utils.tasks.TASK_TIMEOUTS')
def test_utils_default_timeout(mock_check_task, mock_requests_post, mock_timeouts):
"""default timeout for tasks is applied if missing from timeout data"""
mock_requests_post.return_value = 5
task_data = {"job": [{"credentials": "dev", "type": "really_fake_task"}]}
mock_timeouts.side_effect = {"dev": {"fake_task": "240"}}
tasks.wait_for_task(task_data)
assert mock_check_task.called_with("really_fake_task", tasks.DEFAULT_TASK_TIMEOUT)
|
from esphomeyaml.const import CONF_INVERTED, CONF_MODE, CONF_NUMBER, CONF_PCF8574, \
CONF_SETUP_PRIORITY
from esphomeyaml.core import CORE, EsphomeyamlError
from esphomeyaml.cpp_generator import IntLiteral, RawExpression
from esphomeyaml.cpp_types import GPIOInputPin, GPIOOutputPin
def generic_gpio_pin_expression_(conf, mock_obj, default_mode):
if conf is None:
return
number = conf[CONF_NUMBER]
inverted = conf.get(CONF_INVERTED)
if CONF_PCF8574 in conf:
from esphomeyaml.components import pcf8574
for hub in CORE.get_variable(conf[CONF_PCF8574]):
yield None
if default_mode == u'INPUT':
mode = pcf8574.PCF8675_GPIO_MODES[conf.get(CONF_MODE, u'INPUT')]
yield hub.make_input_pin(number, mode, inverted)
return
elif default_mode == u'OUTPUT':
yield hub.make_output_pin(number, inverted)
return
else:
raise EsphomeyamlError(u"Unknown default mode {}".format(default_mode))
if len(conf) == 1:
yield IntLiteral(number)
return
mode = RawExpression(conf.get(CONF_MODE, default_mode))
yield mock_obj(number, mode, inverted)
def gpio_output_pin_expression(conf):
for exp in generic_gpio_pin_expression_(conf, GPIOOutputPin, 'OUTPUT'):
yield None
yield exp
def gpio_input_pin_expression(conf):
for exp in generic_gpio_pin_expression_(conf, GPIOInputPin, 'INPUT'):
yield None
yield exp
def setup_component(obj, config):
if CONF_SETUP_PRIORITY in config:
CORE.add(obj.set_setup_priority(config[CONF_SETUP_PRIORITY]))
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 7, transform = "Anscombe", sigma = 0.0, exog_count = 20, ar_order = 12);
|
from psutil import process_iter, virtual_memory, cpu_percent, Process # NOT STDLIB
import source.pyprompt_common_func as pyprompt_common_func
def _append_(All, pID, CTime, UsedCPU, UsedMem, UsedMemPer, i):
A_, p_, ct_, uC_, uM_, uMP_ = 0, 0, 0, 0, 0, 0
if i.pid != 0:
All.append(i.name())
A_ = pyprompt_common_func.getLongestWord(All)
pID.append(str(i.pid))
p_ = pyprompt_common_func.getLongestWord(pID)
CTime.append(pyprompt_common_func.formatTime(i.create_time()))
ct_ = pyprompt_common_func.getLongestWord(CTime)
UsedCPU.append(str(i.cpu_percent()))
uC_ = pyprompt_common_func.getLongestWord(UsedCPU)
UsedMem.append(str(i.memory_info().rss))
uM_ = pyprompt_common_func.getLongestWord(UsedMem)
UsedMemPer.append(
str(round(i.memory_info().rss / virtual_memory().total * 100, 4))
)
uMP_ = pyprompt_common_func.getLongestWord(UsedMemPer)
return A_, p_, ct_, uC_, uM_, uMP_
def getMemInfo():
"""
Get information about the use of active memory on the level of the entire system
Examples:
root> meminfo
ACTIVE MEMORY: Used/Available: 5451214848(32.426745%)/11359645696(67.573255%) (Total: 16810860544)
"""
VM = virtual_memory()
pcAllMem, pcUsedMem, pcAvaMem = VM.total, VM.used, VM.available
print(
"ACTIVE MEMORY: Used/Available: %i(%f%%)/%i(%f%%) (Total: %i)"
% (
pcUsedMem,
pcUsedMem / pcAllMem * 100,
pcAvaMem,
pcAvaMem / pcAllMem * 100,
pcAllMem,
)
)
def getCPUInfo():
"""
Get information about the use of the CPU on the level of the entire system
Examples:
root> cpuinfo
CPU: Used/Available: 34.400000/65.600000
"""
CPUUsed = cpu_percent()
CPUAva = 100 - CPUUsed
print("CPU: Used/Available: %f/%f" % (CPUUsed, CPUAva))
def getActiveProc(args):
"""
Iterate over all running processes
Arguments:
-s Search through all running processes
-e Specify if the search argument must be explicit
-t Return only the process table, without the additional processes
Examples:
root> proc -s host.exe -e -t
Name | pID | Time Created | CPU | Memory | Memory %
svchost.exeg | 240 | 2020-06-26 16:30:01 | 0.0 | 7868416 | 0.0468
svchost.exeg | 428 | 2020-06-24 08:18:17 | 0.0 | 12623872 | 0.0751
svchost.exeg | 868 | 2020-06-24 08:18:17 | 0.0 | 4444160 | 0.0264
svchost.exeg | 964 | 2020-06-24 08:18:16 | 0.0 | 991232 | 0.0059
fontdrvhost.exe | 984 | 2020-06-24 08:18:16 | 0.0 | 380928 | 0.0023
"""
finding_name = []
IsExplicit = False
IsOnlyTable = False
argCount = 0
for arg in args:
argCount += 1
if arg.lower() == "-s":
try:
finding_name.append(args[argCount])
except Exception:
return "Argument corresponding to -s not specified"
if arg.lower() == "-e":
IsExplicit = True
if arg.lower() == "-t":
IsOnlyTable = True
if not IsOnlyTable:
getMemInfo()
getCPUInfo()
All, pID, CTime, UsedCPU, UsedMem, UsedMemPer = (
["Name"],
["pID"],
["Time Created"],
["CPU"],
["Memory"],
["Memory %"],
)
out = ""
try:
for i in process_iter():
IsOkay = True
if len(finding_name) > 0:
for N in finding_name:
if N not in i.name() and IsExplicit:
IsOkay = False
if N.lower() not in i.name().lower() and not IsExplicit:
IsOkay = False
if IsOkay:
A_, p_, ct_, uC_, uM_, uMP_ = _append_(
All, pID, CTime, UsedCPU, UsedMem, UsedMemPer, i
)
else:
A_, p_, ct_, uC_, uM_, uMP_ = _append_(
All, pID, CTime, UsedCPU, UsedMem, UsedMemPer, i
)
if len(All) != 1:
for i in range(len(All)):
out += (
f"%-{A_}s | %-{p_}s | %-{ct_}s | %-{uC_}s | %-{uM_}s | %-{uMP_}s\n"
% (All[i], pID[i], CTime[i], UsedCPU[i], UsedMem[i], UsedMemPer[i])
)
except Exception:
out = "Could not get running processes"
return out
def killProcess(args):
"""
Kill a process or list of processes by their PID or name
Inserting an integer searches for process PID's
Inserting a non-integer type searches for the process name
Arguments:
-e Specify if the command argument must be explicit
Options:
Y Yes
N No
YA Yes All
NA No All
Examples:
root> kill python
8 processes found.
python.exe, 4036
python.exe, 4604
python.exe, 11112
python.exe, 13836
python.exe, 15324
python.exe, 17196
python.exe, 27328
python.exe, 27700
Do you wish to kill python.exe (PID: 4036)? [Y/N/YA/NA]
...
root> kill 7412
7412 corresponds to python.exe. Do you wish to kill this process? [Y/N]
"""
IsExplicit = False
argCount = 0
for arg in args:
argCount += 1
if arg.lower() == "-e":
IsExplicit = True
args.remove(arg)
for proc in args:
procList = []
try:
proc = int(proc)
try:
p = Process(proc)
if (
input(
"%i corresponds to %s. Do you wish to kill this process? [Y/N] "
% (proc, p.name())
)
.lower()
.strip()
== "y"
):
try:
p.kill()
print("Successfully ended the given process")
except Exception:
print("Could not kill process")
except Exception:
print("Could not find a process corresponding to the PID %i" % proc)
except Exception:
for i in process_iter():
if proc.lower() in i.name().lower() and not IsExplicit:
procList.append([i.name(), i.pid])
if proc in i.name() and IsExplicit:
procList.append([i.name(), i.pid])
if len(procList) > 0:
print("%i processes found." % len(procList))
for i in procList:
print(f"{i[0]}, {i[1]}")
while len(procList):
i = procList[0]
inp = (
input(
"Do you wish to kill %s (PID: %i)? [Y/N/YA/NA] " % (i[0], i[1])
)
.lower()
.strip()
)
if inp == "y":
try:
Process(i[1]).kill()
print("Successfully ended the given process")
except Exception:
print("Could not kill process")
elif inp == "n":
print("Skipping current process")
elif inp == "ya":
for j in procList:
try:
Process(j[1]).kill()
print("Successfully ended: %s (%i)" % (j[0], j[1]))
except Exception:
print("Could not end %s (%i)" % (j[0], j[1]))
return
elif inp == "na":
return "Stopping..."
else:
continue
procList = procList[1:]
else:
print("No processes correspoding to: %s" % proc)
|
"""
Manage Chocolatey package installs
.. versionadded:: 2016.3.0
.. note::
Chocolatey pulls data from the Chocolatey internet database to determine
current versions, find available versions, etc. This is normally a slow
operation and may be optimized by specifying a local, smaller chocolatey
repo.
"""
import salt.utils.data
import salt.utils.versions
from salt.exceptions import SaltInvocationError
def __virtual__():
"""
Load only if chocolatey is loaded
"""
if "chocolatey.install" in __salt__:
return "chocolatey"
return (False, "chocolatey module could not be loaded")
def installed(
name,
version=None,
source=None,
force=False,
pre_versions=False,
install_args=None,
override_args=False,
force_x86=False,
package_args=None,
allow_multiple=False,
execution_timeout=None,
):
"""
Installs a package if not already installed
Args:
name (str):
The name of the package to be installed. Required.
version (str):
Install a specific version of the package. Defaults to latest
version. If the version is different to the one installed then the
specified version will be installed. Default is None.
source (str):
Chocolatey repository (directory, share or remote URL, feed).
Defaults to the official Chocolatey feed. Default is None.
force (bool):
Reinstall the current version of an existing package. Do not use
with ``allow_multiple``. Default is False.
pre_versions (bool):
Include pre-release packages. Default is False.
install_args (str):
Install arguments you want to pass to the installation process, i.e
product key or feature list. Default is None.
override_args (bool):
Set to True if you want to override the original install arguments
(for the native installer) in the package and use your own. When
this is set to False install_args will be appended to the end of the
default arguments. Default is False.
force_x86 (bool):
Force x86 (32bit) installation on 64 bit systems. Default is False.
package_args (str):
Arguments you want to pass to the package. Default is None.
allow_multiple (bool):
Allow mulitiple versions of the package to be installed. Do not use
with ``force``. Does not work with all packages. Default is False.
.. versionadded:: 2017.7.0
execution_timeout (str):
Chocolatey execution timeout value you want to pass to the
installation process. Default is None.
.. code-block:: yaml
Installsomepackage:
chocolatey.installed:
- name: packagename
- version: '12.04'
- source: 'mychocolatey/source'
- force: True
"""
if force and allow_multiple:
raise SaltInvocationError(
"Cannot use 'force' in conjunction with 'allow_multiple'"
)
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
# Get list of currently installed packages
pre_install = __salt__["chocolatey.list"](local_only=True)
# Determine action
# Package not installed
if name.lower() not in [package.lower() for package in pre_install.keys()]:
if version:
ret["changes"] = {name: "Version {} will be installed".format(version)}
else:
ret["changes"] = {name: "Latest version will be installed"}
# Package installed
else:
version_info = __salt__["chocolatey.version"](
name=name, check_remote=True, source=source
)
full_name = name
for pkg in version_info:
if name.lower() == pkg.lower():
full_name = pkg
installed_version = version_info[full_name]["installed"][0]
if version:
if salt.utils.versions.compare(
ver1=installed_version, oper="==", ver2=version
):
if force:
ret["changes"] = {
name: "Version {} will be reinstalled".format(version)
}
ret["comment"] = "Reinstall {} {}".format(full_name, version)
else:
ret["comment"] = "{} {} is already installed".format(name, version)
if __opts__["test"]:
ret["result"] = None
return ret
else:
if allow_multiple:
ret["changes"] = {
name: (
"Version {} will be installed side by side with "
"Version {} if supported".format(version, installed_version)
)
}
ret["comment"] = "Install {0} {1} side-by-side with {0} {2}".format(
full_name, version, installed_version
)
else:
ret["changes"] = {
name: "Version {} will be installed over Version {}".format(
version, installed_version
)
}
ret["comment"] = "Install {0} {1} over {0} {2}".format(
full_name, version, installed_version
)
force = True
else:
version = installed_version
if force:
ret["changes"] = {
name: "Version {} will be reinstalled".format(version)
}
ret["comment"] = "Reinstall {} {}".format(full_name, version)
else:
ret["comment"] = "{} {} is already installed".format(name, version)
if __opts__["test"]:
ret["result"] = None
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The installation was tested"
return ret
# Install the package
result = __salt__["chocolatey.install"](
name=name,
version=version,
source=source,
force=force,
pre_versions=pre_versions,
install_args=install_args,
override_args=override_args,
force_x86=force_x86,
package_args=package_args,
allow_multiple=allow_multiple,
execution_timeout=execution_timeout,
)
if "Running chocolatey failed" not in result:
ret["result"] = True
else:
ret["result"] = False
if not ret["result"]:
ret["comment"] = "Failed to install the package {}".format(name)
# Get list of installed packages after 'chocolatey.install'
post_install = __salt__["chocolatey.list"](local_only=True)
ret["changes"] = salt.utils.data.compare_dicts(pre_install, post_install)
return ret
def uninstalled(name, version=None, uninstall_args=None, override_args=False):
"""
Uninstalls a package
name
The name of the package to be uninstalled
version
Uninstalls a specific version of the package. Defaults to latest
version installed.
uninstall_args
A list of uninstall arguments you want to pass to the uninstallation
process i.e product key or feature list
override_args
Set to true if you want to override the original uninstall arguments (
for the native uninstaller)in the package and use your own.
When this is set to False uninstall_args will be appended to the end of
the default arguments
.. code-block:: yaml
Removemypackage:
chocolatey.uninstalled:
- name: mypackage
- version: '21.5'
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
# Get list of currently installed packages
pre_uninstall = __salt__["chocolatey.list"](local_only=True)
# Determine if package is installed
if name.lower() in [package.lower() for package in pre_uninstall.keys()]:
try:
ret["changes"] = {
name: "{} version {} will be removed".format(
name, pre_uninstall[name][0]
)
}
except KeyError:
ret["changes"] = {name: "{} will be removed".format(name)}
else:
ret["comment"] = "The package {} is not installed".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The uninstall was tested"
return ret
# Uninstall the package
result = __salt__["chocolatey.uninstall"](
name, version, uninstall_args, override_args
)
if "Running chocolatey failed" not in result:
ret["result"] = True
else:
ret["result"] = False
if not ret["result"]:
ret["comment"] = "Failed to uninstall the package {}".format(name)
# Get list of installed packages after 'chocolatey.uninstall'
post_uninstall = __salt__["chocolatey.list"](local_only=True)
ret["changes"] = salt.utils.data.compare_dicts(pre_uninstall, post_uninstall)
return ret
def upgraded(
name,
version=None,
source=None,
force=False,
pre_versions=False,
install_args=None,
override_args=False,
force_x86=False,
package_args=None,
):
"""
Upgrades a package. Will install the package if not installed.
.. versionadded:: 2018.3.0
Args:
name (str):
The name of the package to be installed. Required.
version (str):
Install a specific version of the package. Defaults to latest
version. If the version is greater than the one installed then the
specified version will be installed. Default is ``None``.
source (str):
Chocolatey repository (directory, share or remote URL, feed).
Defaults to the official Chocolatey feed. Default is ``None``.
force (bool):
``True`` will reinstall an existing package with the same version.
Default is ``False``.
pre_versions (bool):
``True`` will nclude pre-release packages. Default is ``False``.
install_args (str):
Install arguments you want to pass to the installation process, i.e
product key or feature list. Default is ``None``.
override_args (bool):
``True`` will override the original install arguments (for the
native installer) in the package and use those specified in
``install_args``. ``False`` will append install_args to the end of
the default arguments. Default is ``False``.
force_x86 (bool):
``True`` forces 32bit installation on 64 bit systems. Default is
``False``.
package_args (str):
Arguments you want to pass to the package. Default is ``None``.
.. code-block:: yaml
upgrade_some_package:
chocolatey.upgraded:
- name: packagename
- version: '12.04'
- source: 'mychocolatey/source'
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
# Get list of currently installed packages
pre_install = __salt__["chocolatey.list"](local_only=True)
# Determine if there are changes
# Package not installed
if name.lower() not in [package.lower() for package in pre_install.keys()]:
if version:
ret["changes"][name] = "Version {} will be installed".format(version)
ret["comment"] = "Install version {}".format(version)
else:
ret["changes"][name] = "Latest version will be installed"
ret["comment"] = "Install latest version"
# Package installed
else:
version_info = __salt__["chocolatey.version"](name, check_remote=True)
# Get the actual full name out of version_info
full_name = name
for pkg in version_info:
if name.lower() == pkg.lower():
full_name = pkg
installed_version = version_info[full_name]["installed"][0]
# If version is not passed, use available... if available is available
if not version:
if "available" in version_info[full_name]:
version = version_info[full_name]["available"][0]
if version:
# If installed version and new version are the same
if salt.utils.versions.compare(
ver1=installed_version, oper="==", ver2=version
):
if force:
ret["changes"][name] = "Version {} will be reinstalled".format(
version
)
ret["comment"] = "Reinstall {} {}".format(full_name, version)
else:
ret["comment"] = "{} {} is already installed".format(
name, installed_version
)
else:
# If installed version is older than new version
if salt.utils.versions.compare(
ver1=installed_version, oper="<", ver2=version
):
ret["changes"][
name
] = "Version {} will be upgraded to Version {}".format(
installed_version, version
)
ret["comment"] = "Upgrade {} {} to {}".format(
full_name, installed_version, version
)
# If installed version is newer than new version
else:
ret["comment"] = "{} {} (newer) is already installed".format(
name, installed_version
)
# Catch all for a condition where version is not passed and there is no
# available version
else:
ret["comment"] = "No version found to install"
# Return if there are no changes to be made
if not ret["changes"]:
return ret
# Return if running in test mode
if __opts__["test"]:
ret["result"] = None
return ret
# Install the package
result = __salt__["chocolatey.upgrade"](
name=name,
version=version,
source=source,
force=force,
pre_versions=pre_versions,
install_args=install_args,
override_args=override_args,
force_x86=force_x86,
package_args=package_args,
)
if "Running chocolatey failed" not in result:
ret["comment"] = "Package {} upgraded successfully".format(name)
ret["result"] = True
else:
ret["comment"] = "Failed to upgrade the package {}".format(name)
ret["result"] = False
# Get list of installed packages after 'chocolatey.install'
post_install = __salt__["chocolatey.list"](local_only=True)
# Prior to this, ret['changes'] would have contained expected changes,
# replace them with the actual changes now that we have completed the
# installation.
ret["changes"] = salt.utils.data.compare_dicts(pre_install, post_install)
return ret
def source_present(name, source_location, username=None, password=None, force=False):
"""
Instructs Chocolatey to add a source if not already present.
name
The name of the source to be added as a chocolatey repository.
source
Location of the source you want to work with.
username
Provide username for chocolatey sources that need authentication
credentials.
password
Provide password for chocolatey sources that need authentication
credentials.
force
Salt will not modify a existing repository with the same name. Set this
option to true to update an existing repository.
CLI Example:
.. code-block:: yaml
add_some_source:
chocolatey.source_present:
- name: reponame
- source: https://repo.exemple.com
- username: myuser
- password: mypassword
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
# Get list of currently present sources
pre_install = __salt__["chocolatey.list_sources"]()
# Determine action
# Source with same name not present
if name.lower() not in [present.lower() for present in pre_install.keys()]:
ret["comment"] = "Add the source {}".format(name)
# Source with same name already present
else:
if force:
ret["comment"] = "Update the source {}".format(name)
else:
ret["comment"] = "A source with the name {} is already present".format(name)
if __opts__["test"]:
ret["result"] = None
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "The installation was tested"
return ret
# Add the source
result = __salt__["chocolatey.add_source"](
name=name, source_location=source_location, username=username, password=password
)
if "Running chocolatey failed" not in result:
ret["result"] = True
ret["comment"] = "Source {} added successfully".format(name)
else:
ret["result"] = False
ret["comment"] = "Failed to add the source {}".format(name)
# Get list of present sources after 'chocolatey.add_source'
post_install = __salt__["chocolatey.list_sources"]()
ret["changes"] = salt.utils.data.compare_dicts(pre_install, post_install)
return ret
|
# Algorithm 1, polynomial regression for Q_l + explicit formula + truncation
import numpy as np
from scipy.misc import comb
from scipy.special import hermitenorm
from tqdm import tqdm
from joblib import Parallel, delayed
from itertools import product
from sklearn.preprocessing import PolynomialFeatures
import math
def H(k, x):
if k==0:
return 1.0
if k ==1:
return x
if k==2:
return (x**2 - 1)/np.sqrt(2)
h = hermitenorm(k)(x) / np.sqrt(math.factorial(k))
return h
def Hermite_val(k_vec,x_vec):
P = 1.0
d = x_vec.shape[0]
for i in range(d):
P = P * H(k_vec[i],x_vec[i])
return P
def generate_X_poly(train_traj, l, max_deg):
N_train = train_traj.shape[0]
N = train_traj.shape[1]
d = train_traj.shape[2]
poly = PolynomialFeatures(max_deg)
# all_points = train_traj[:, :l+1].reshape(-1,d)
# X = poly.fit_transform(all_points)
X = poly.fit_transform(train_traj[:, l])
return X, poly.powers_
def generate_y_sum(train_traj, l, f_target, n_tilde):
N_train = train_traj.shape[0]
N = train_traj.shape[1]
d = train_traj.shape[2]
y = np.zeros(N_train)
# y = np.zeros(N_train*(l+1))
# for s in range(N_train):
# for i in range(l+1):
# if f_target == "sum":
# y[s*l + i] = train_traj[s, i:i + N - l].sum()/N
# # y[s*l + i] = train_traj[s, i:i + n_tilde].sum()/N
# elif f_target == "sum_squared":
# y[s*l + i] = np.square(train_traj[s, i:i + N - l]).sum()/N
# # y[s*l + i] = np.square(train_traj[s, i:i + n_tilde]).sum()/N
# elif f_target == "sum_4th":
# y[s*l + i] = (train_traj[s, i:i + N - l]**4).sum()/N
# elif f_target == "exp_sum":
# y[s*l + i] = np.exp(train_traj[s, i:i + N - l].sum(axis =1)).sum()/N
# else:
# raise Exception('unrecognized target function')
# return y
for s in range(N_train):
if f_target == "sum":
y[s] = train_traj[s,l:].sum()/N
# y[s] = train_traj[s,l:l+n_tilde].sum()/N
elif f_target == "sum_squared":
y[s] = np.square(train_traj[s, l:]).sum()/N
elif f_target == "sum_4th":
y[s] = (train_traj[s,l:]**4).sum()/N
elif f_target == "exp_sum":
y[s] = np.exp(train_traj[s, l:].sum(axis =1)).sum()/N
else:
raise Exception('unrecognized target function')
return y
def Q_l_fit(train_traj, f_target="sum", max_deg = 1, n_tilde = 100):
N_train = train_traj.shape[0]
N = train_traj.shape[1]
d = train_traj.shape[2]
Betas = np.zeros((N, d+ 1 + (max_deg-1) * int(d*(d+1)/2)))
for l in tqdm(range(N)):
# Linear Regression
if 0 < max_deg < 6:
X, degrees = generate_X_poly(train_traj, l, max_deg)
else:
raise Exception('max_deg should be 1 or 2')
y = generate_y_sum(train_traj, l, f_target, n_tilde)
beta = np.linalg.inv(X.T @ X) @ X.T @ y
Betas[l] = beta
return Betas, degrees
def a_lk(traj, traj_grad, l, k_vec, step, degrees, Betas):
dim = traj.shape[1]
S = 0
x_hat = traj[l-1] - step/2 *traj_grad[l-1]
Small_s = np.zeros(dim)
for ind,deg in enumerate(degrees):
Small_s[:] = 0
for d, i in enumerate(deg):
for t in range (i+1):
for s in range (int(t/2 +1)):
if (k_vec[d] == t - 2*s):
Small_s[d] = Small_s[d] + comb(N=i, k = t, exact = True) * x_hat[0]**(i-t) * math.factorial(t)*1/math.factorial(s)*1 / np.sqrt(math.factorial(t-2*s)) *np.sqrt(step)**t /2**s
else:
pass
S = S + Betas[l,ind] * Small_s.prod()
return S
def M_bias(k_vec, traj, traj_grad, traj_noise, step, degrees, Betas):
N = traj.shape[0]
S = 0
for l in range (N):
s = a_lk(traj,traj_grad,l, k_vec, step, degrees, Betas)* Hermite_val(k_vec,traj_noise[l])
S = S + s
return S
def estimator_bias(k_vec, test_traj, test_traj_grad, test_traj_noise, step, degrees, Betas, n_jobs = -1):
N_test = test_traj.shape[0]
M_results = Parallel(n_jobs = n_jobs)(delayed(M_bias)(k_vec, test_traj[i], test_traj_grad[i], test_traj_noise[i], step, degrees, Betas)for i in range(N_test))
return np.array(M_results).reshape(-1)
|
"""
ParallelCluster
ParallelCluster API # noqa: E501
The version of the OpenAPI document: 3.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from pcluster_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class Tag(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,), # noqa: E501
'key': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'value': 'value', # noqa: E501
'key': 'key', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Tag - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
value (str): Tag value. [optional] # noqa: E501
key (str): Tag name. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Umap(CMakePackage):
"""Umap is a library that provides an mmap()-like interface to a
simple, user-space page fault handler based on the userfaultfd Linux
feature (starting with 4.3 linux kernel)."""
homepage = "https://github.com/LLNL/umap"
url = "https://github.com/LLNL/umap/archive/v2.1.0.tar.gz"
git = "https://github.com/LLNL/umap.git"
version('develop', branch='develop')
version('2.1.0', sha256='dfdc5b717aecdbfbb0da22e8567b9f2ffbc3607000a31122bf7c5ab3b85cecd9')
version('2.0.0', sha256='85c4bc68e8790393847a84eb54eaf6fc321acade382a399a2679d541b0e34150')
version('1.0.0', sha256='c746de3fae5bfc5bbf36234d5e888ea45eeba374c26cd8b5a817d0c08e454ed5')
version('0.0.4', sha256='bffaa03668c95b608406269cba6543f5e0ba37b04ac08a3fc4593976996bc273')
version('0.0.3', sha256='8e80835a85ad69fcd95f963822b1616c782114077d79c350017db4d82871455c')
version('0.0.2', sha256='eccc987b414bc568bd33b569ab6e18c328409499f11e65ac5cd5c3e1a8b47509')
version('0.0.1', sha256='49020adf55aa3f8f03757373b21ff229d2e8cf4155d54835019cd4745c1291ef')
variant('logging', default=False, description='Build with logging enabled.')
variant('tests', default=False, description='Build test programs.')
def cmake_args(self):
spec = self.spec
args = [
"-DENABLE_LOGGING=%s" % ('On' if '+logging' in spec else 'Off'),
"-DENABLE_TESTS=%s" % ('On' if '+tests' in spec else 'Off'),
]
return args
|
"""
Django settings for covid19_site project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = [
'dslab-covid19-backend.herokuapp.com',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'model_api.apps.ModelApiConfig',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
CORS_ORIGIN_WHITELIST = [
# React dev server.
'http://localhost:3000',
'http://127.0.0.1:3000',
]
ROOT_URLCONF = 'covid19_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'covid19_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
"""
random synonym insertation Transformation
============================================
"""
import random
from nltk.corpus import wordnet
from textattack.transformations import Transformation
class RandomSynonymInsertion(Transformation):
"""Transformation that inserts synonyms of words that are already in the
sequence."""
def _get_synonyms(self, word):
synonyms = set()
for syn in wordnet.synsets(word):
for lemma in syn.lemmas():
if lemma.name() != word and check_if_one_word(lemma.name()):
synonyms.add(lemma.name())
return list(synonyms)
def _get_transformations(self, current_text, indices_to_modify):
transformed_texts = []
for idx in indices_to_modify:
synonyms = []
# try to find a word with synonyms, and deal with edge case where there aren't any
for attempt in range(7):
synonyms = self._get_synonyms(random.choice(current_text.words))
if synonyms:
break
elif attempt == 6:
return [current_text]
random_synonym = random.choice(synonyms)
transformed_texts.append(
current_text.insert_text_after_word_index(idx, random_synonym))
return transformed_texts
@property
def deterministic(self):
return False
def check_if_one_word(word):
for c in word:
if not c.isalpha():
return False
return True
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
from azure.cli.core.commands import CliCommandType
from azure.cli.command_modules.profile._completers import get_subscription_id_list
from azure.cli.command_modules.profile._format import transform_account_list
import azure.cli.command_modules.profile._help # pylint: disable=unused-import
class ProfileCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
super(ProfileCommandsLoader, self).__init__(cli_ctx=cli_ctx)
def load_command_table(self, args):
profile_custom = CliCommandType(
operations_tmpl='azure.cli.command_modules.profile.custom#{}'
)
with self.command_group('', profile_custom) as g:
g.command('login', 'login')
g.command('logout', 'logout')
with self.command_group('account', profile_custom) as g:
g.command('list', 'list_subscriptions', table_transformer=transform_account_list)
g.command('set', 'set_active_subscription')
g.command('show', 'show_subscription')
g.command('clear', 'account_clear')
g.command('list-locations', 'list_locations')
g.command('get-access-token', 'get_access_token')
return self.command_table
# pylint: disable=line-too-long
def load_arguments(self, command):
with self.argument_context('login') as c:
c.argument('password', options_list=('--password', '-p'), help="Credentials like user password, or for a service principal, provide client secret or a pem file with key and public certificate. Will prompt if not given.")
c.argument('service_principal', action='store_true', help='The credential representing a service principal.')
c.argument('username', options_list=('--username', '-u'), help='user name, service principal, or managed service identity ID')
c.argument('tenant', options_list=('--tenant', '-t'), help='The AAD tenant, must provide when using service principals.')
c.argument('allow_no_subscriptions', action='store_true', help="Support access tenants without subscriptions. It's uncommon but useful to run tenant level commands, such as 'az ad'")
c.argument('identity', options_list=('-i', '--identity'), action='store_true', help="Log in using the Virtual Machine's identity", arg_group='Managed Service Identity')
with self.argument_context('logout') as c:
c.argument('username', help='account user, if missing, logout the current active account')
with self.argument_context('account') as c:
c.argument('subscription', options_list=('--subscription', '-s'), help='Name or ID of subscription.', completer=get_subscription_id_list)
with self.argument_context('account list') as c:
c.argument('all', help="List all subscriptions, rather just 'Enabled' ones", action='store_true')
c.argument('refresh', help="retrieve up-to-date subscriptions from server", action='store_true')
with self.argument_context('account show') as c:
c.argument('show_auth_for_sdk', options_list=('--sdk-auth',), action='store_true', help='output result in compatible with Azure SDK auth file')
COMMAND_LOADER_CLS = ProfileCommandsLoader
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
from PIL import Image
from tensorflow.keras.utils import Sequence
from ..utils.image_processing import preprocess, center_crop
from ..utils.generic_utils import labels2indexes
class DMLGenerator(Sequence):
""" Keras Sequence for Deep Metric Learning with real-time data augmentation (multi-resolution, random crop,
horizontal flip, ...) given with 'data_augmentation' param.
Arguments:
images: Array of filename or opened images.
labels: Image labels
n_class: Number of class per batch.
image_per_class: Number of image per class per batch.
pre_process_method: The pre-process method applied on the images before feeding them to the network.
prediction_dimensions: size of embeddings for all outputs.
steps_per_epoch: Number of batch computed per epoch.
im_crop_size: Size that will be feed into the network.
n_channel: Default 3. Allows to handle Grayscale images such as MNIST.
shuffle: boolean to randomly sample or not the images per batch.
bounding_boxes: Should be a tuple with all x_left, x_right, y_top, y_bottom. Standard left/bottom orientation is
used.
data_augmentation: list of tuple with (proba, data augmentation object).
Returns:
Instance of DMLGenerator which shall be feed into model.fit_generator
"""
def __init__(self, images: np.ndarray,
labels: np.ndarray,
n_class: int,
image_per_class: int,
pre_process_method: str,
prediction_dimensions: int or list,
steps_per_epoch: int,
im_crop_size: tuple = (224, 224),
n_channel=3,
shuffle: bool = True,
bounding_boxes: list = None,
data_augmentation: list = None):
assert np.shape(images)[0] == np.shape(labels)[0], \
"'images' and 'labels' do not have the same number of images ({} != {}).".format(np.shape(images)[0],
np.shape(labels)[0])
self.images = images
self.n_class = n_class
self.shuffle = shuffle
self.n_channel = n_channel
self.original_labels = labels
self.steps_per_epoch = steps_per_epoch
self.im_crop_size = list(im_crop_size)
self.image_per_class = image_per_class
self.data_augmentation = data_augmentation
self.pre_process_method = pre_process_method
self.prediction_dimensions = prediction_dimensions
self.unique_original_labels = list(set(self.original_labels))
self.unique_original_labels.sort()
self.lbl2index, self.index2lbl = labels2indexes(self.unique_original_labels)
self.labels = np.array([self.lbl2index[o_lbl] for o_lbl in self.original_labels], dtype=np.int32)
if bounding_boxes is not None:
self.x1, self.x2, self.y1, self.y2 = bounding_boxes
self.use_bounding_boxes = True
else:
self.use_bounding_boxes = False
self.ind = self.gen_idx2()
self.n_images = len(self.ind) # N pairs in case of training set, or N images in other cases.
def __len__(self):
return self.steps_per_epoch
def __getitem__(self, idx):
ind = self.ind[idx]
im_list = self.images[ind] # get images
# Labels for DML: as keras losses enforce that y_pred.shape == y_true.shape, we build full of zeros matrices.
if isinstance(self.prediction_dimensions, list):
y_true = []
for pred_dim in self.prediction_dimensions:
y_true.append(np.zeros((len(ind), pred_dim), dtype=np.float32))
y_true[-1][:, 0] = self.labels[ind]
else:
y_true = np.zeros((len(ind), self.prediction_dimensions), dtype=np.float32)
y_true[:, 0] = self.labels[ind] # get labels and set the into the first column
batch_images = np.zeros((len(ind),) + tuple(self.im_crop_size) + (self.n_channel,), dtype=np.float32)
# Generate batch
for i, image in enumerate(im_list):
if isinstance(image, str):
im = Image.open(image)
if self.n_channel == 3:
im = im.convert("RGB")
elif self.n_channel == 1:
im = im.convert("L")
else:
raise ValueError("Unexpected channel number. Expected '1' or '3'"
"but got {}".format(self.n_channel))
elif isinstance(image, np.ndarray):
if image.shape[-1] == 3:
im = Image.fromarray(image, mode="RGB")
elif image.shape[-1] == 1:
im = Image.fromarray(image[..., 0], mode="L")
else:
raise ValueError("Unexpected image size. Expected (h, w, 1) or (h, w, 3.) "
"but got {}".format(image.shape))
else:
raise ValueError('Image format not understood.'
'Expected ndarray or str, but got'.format(type(image)))
if self.use_bounding_boxes:
im = im.crop((self.x1[ind[i]], self.y1[ind[i]], self.x2[ind[i]], self.y2[ind[i]]))
if self.data_augmentation is not None:
for proba, data_aug in self.data_augmentation:
if np.random.rand() < proba:
im = data_aug.compute(im)
im = center_crop(im, self.im_crop_size)
# im.show()
if im.mode == "RBG":
im = np.array(im, dtype=np.float32)
elif im.mode == "L":
im = np.array(im, dtype=np.float32)[..., None]
batch_images[i, :, :, :] = im
batch_images = preprocess(batch_images, pre_process_method=self.pre_process_method)
return batch_images, y_true
def on_epoch_end(self):
self.ind = self.gen_idx2()
def gen_idx2(self):
""" Pre-computation of indexes to build the batches in __getitem__.
:return: nd-array: all indexes for each training batch from a given epoch.
"""
unique_instances = list(set(self.labels))
unique_instances.sort()
# Adapt the number of image per class: it avoids to have many time the same image from a given class.
im_list = []
to_pop = []
min_im_per_instance = + np.inf
for ul in unique_instances:
ind = np.where(self.labels == ul)[0]
if len(ind) > 1:
min_im_per_instance = min(min_im_per_instance, len(ind))
if self.shuffle:
np.random.shuffle(ind)
im_list.append(ind)
else:
to_pop.append(ul)
for tp in to_pop:
unique_instances.remove(tp)
if self.shuffle:
np.random.shuffle(im_list)
# The choice is to use at least one time each image as query.
# Some may occur many times to ensure exclusive batch construction.
idx_im = np.zeros(len(im_list), dtype=np.int32)
out_list_of_n_pairs = []
idx_cls = np.arange(len(im_list))
if self.shuffle:
np.random.shuffle(idx_cls)
i = 0
for _ in range(self.steps_per_epoch):
# Get all classes for the current batch:
idx = idx_cls[np.arange(i, i+self.n_class) % len(im_list)]
i += self.n_class
if i >= len(im_list):
i = 0
if self.shuffle:
np.random.shuffle(idx_cls)
# Get images from the given classes to build the batch:
tmp_list = []
for j in idx:
l_j = im_list[j]
for k in range(self.image_per_class):
tmp_list.append(l_j[idx_im[j] % len(l_j)])
idx_im[j] += 1
if idx_im[j] >= len(l_j) and self.shuffle:
np.random.shuffle(l_j)
out_list_of_n_pairs.append(tmp_list)
out_list_of_n_pairs = np.array(out_list_of_n_pairs, dtype=np.int32)
return out_list_of_n_pairs
def train_classes_to_index(self):
""" Convert labels into indexes.
:return: two dictionaries for the forward conversion and the backward conversion.
"""
unique_classes = np.sort(list(set(self.labels)))
new_labels = np.zeros((len(self.labels),), dtype=np.int32)
index2class = np.zeros(len(unique_classes), np.int32)
for i, c in enumerate(unique_classes):
new_labels[np.where(self.labels == c)] = i
index2class[i] = c
# class2index: the new label
# index2class: the correspondence between the new (index) and the old (value) labels.
return new_labels, index2class
|
class Something:
@snapshot(lambda lst: lst.copy(), "lst")
@ensure(lambda lst, OLD: lst == OLD.lst)
def do_something(self, lst: List[int]) -> None:
pass
__book_url__ = "dummy"
__book_version__ = "dummy"
|
from abc import ABC, abstractmethod
class Display():
@abstractmethod
def __init__(self):
pass
@abstractmethod
def show(self):
pass
@abstractmethod
def start(self):
pass
@abstractmethod
def finish(self):
pass
|
# pylint: disable=C0111,R0903
"""Test module
"""
import core.widget
import core.module
class Module(core.module.Module):
def __init__(self, config, theme):
super().__init__(config=config, theme=theme, widgets=core.widget.Widget("test"))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
from django.contrib import admin
from things.admin import ThingAdmin, PrivateListFilter
from .models import Page
class PageAdmin(ThingAdmin):
list_filter = [PrivateListFilter]
admin.site.register(Page, PageAdmin)
|
import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestVectorInteractionBatchOp(unittest.TestCase):
def test_vectorinteractionbatchop(self):
df = pd.DataFrame([
["$8$1:3,2:4,4:7", "$8$1:3,2:4,4:7"],
["$8$0:3,5:5", "$8$1:2,2:4,4:7"],
["$8$2:4,4:5", "$5$1:3,2:3,4:7"]
])
data = BatchOperator.fromDataframe(df, schemaStr="vec1 string, vec2 string")
vecInter = VectorInteractionBatchOp().setSelectedCols(["vec1","vec2"]).setOutputCol("vec_product")
vecInter.linkFrom(data).print()
pass
|
"""Offer reusable conditions."""
from __future__ import annotations
import asyncio
from collections import deque
from collections.abc import Container, Generator
from contextlib import contextmanager
from datetime import datetime, timedelta
import functools as ft
import logging
import re
import sys
from typing import Any, Callable, cast
from homeassistant.components import zone as zone_cmp
from homeassistant.components.device_automation import (
async_get_device_automation_platform,
)
from homeassistant.components.sensor import DEVICE_CLASS_TIMESTAMP
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_GPS_ACCURACY,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_ABOVE,
CONF_AFTER,
CONF_ATTRIBUTE,
CONF_BEFORE,
CONF_BELOW,
CONF_CONDITION,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_ID,
CONF_STATE,
CONF_VALUE_TEMPLATE,
CONF_WEEKDAY,
CONF_ZONE,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
WEEKDAYS,
)
from homeassistant.core import HomeAssistant, State, callback
from homeassistant.exceptions import (
ConditionError,
ConditionErrorContainer,
ConditionErrorIndex,
ConditionErrorMessage,
HomeAssistantError,
TemplateError,
)
from homeassistant.helpers import config_validation as cv, entity_registry as er
from homeassistant.helpers.sun import get_astral_event_date
from homeassistant.helpers.template import Template
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as dt_util
from .trace import (
TraceElement,
trace_append_element,
trace_path,
trace_path_get,
trace_stack_cv,
trace_stack_pop,
trace_stack_push,
trace_stack_top,
)
# mypy: disallow-any-generics
ASYNC_FROM_CONFIG_FORMAT = "async_{}_from_config"
FROM_CONFIG_FORMAT = "{}_from_config"
VALIDATE_CONFIG_FORMAT = "{}_validate_config"
_LOGGER = logging.getLogger(__name__)
INPUT_ENTITY_ID = re.compile(
r"^input_(?:select|text|number|boolean|datetime)\.(?!.+__)(?!_)[\da-z_]+(?<!_)$"
)
ConditionCheckerType = Callable[[HomeAssistant, TemplateVarsType], bool]
def condition_trace_append(variables: TemplateVarsType, path: str) -> TraceElement:
"""Append a TraceElement to trace[path]."""
trace_element = TraceElement(variables, path)
trace_append_element(trace_element)
return trace_element
def condition_trace_set_result(result: bool, **kwargs: Any) -> None:
"""Set the result of TraceElement at the top of the stack."""
node = trace_stack_top(trace_stack_cv)
# The condition function may be called directly, in which case tracing
# is not setup
if not node:
return
node.set_result(result=result, **kwargs)
def condition_trace_update_result(**kwargs: Any) -> None:
"""Update the result of TraceElement at the top of the stack."""
node = trace_stack_top(trace_stack_cv)
# The condition function may be called directly, in which case tracing
# is not setup
if not node:
return
node.update_result(**kwargs)
@contextmanager
def trace_condition(variables: TemplateVarsType) -> Generator[TraceElement, None, None]:
"""Trace condition evaluation."""
should_pop = True
trace_element = trace_stack_top(trace_stack_cv)
if trace_element and trace_element.reuse_by_child:
should_pop = False
trace_element.reuse_by_child = False
else:
trace_element = condition_trace_append(variables, trace_path_get())
trace_stack_push(trace_stack_cv, trace_element)
try:
yield trace_element
except Exception as ex:
trace_element.set_error(ex)
raise ex
finally:
if should_pop:
trace_stack_pop(trace_stack_cv)
def trace_condition_function(condition: ConditionCheckerType) -> ConditionCheckerType:
"""Wrap a condition function to enable basic tracing."""
@ft.wraps(condition)
def wrapper(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Trace condition."""
with trace_condition(variables):
result = condition(hass, variables)
condition_trace_update_result(result=result)
return result
return wrapper
async def async_from_config(
hass: HomeAssistant,
config: ConfigType | Template,
) -> ConditionCheckerType:
"""Turn a condition configuration into a method.
Should be run on the event loop.
"""
if isinstance(config, Template):
# We got a condition template, wrap it in a configuration to pass along.
config = {
CONF_CONDITION: "template",
CONF_VALUE_TEMPLATE: config,
}
condition = config.get(CONF_CONDITION)
for fmt in (ASYNC_FROM_CONFIG_FORMAT, FROM_CONFIG_FORMAT):
factory = getattr(sys.modules[__name__], fmt.format(condition), None)
if factory:
break
if factory is None:
raise HomeAssistantError(f'Invalid condition "{condition}" specified {config}')
# Check for partials to properly determine if coroutine function
check_factory = factory
while isinstance(check_factory, ft.partial):
check_factory = check_factory.func
if asyncio.iscoroutinefunction(check_factory):
return cast(ConditionCheckerType, await factory(hass, config))
return cast(ConditionCheckerType, factory(config))
async def async_and_from_config(
hass: HomeAssistant, config: ConfigType
) -> ConditionCheckerType:
"""Create multi condition matcher using 'AND'."""
checks = [await async_from_config(hass, entry) for entry in config["conditions"]]
@trace_condition_function
def if_and_condition(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Test and condition."""
errors = []
for index, check in enumerate(checks):
try:
with trace_path(["conditions", str(index)]):
if not check(hass, variables):
return False
except ConditionError as ex:
errors.append(
ConditionErrorIndex("and", index=index, total=len(checks), error=ex)
)
# Raise the errors if no check was false
if errors:
raise ConditionErrorContainer("and", errors=errors)
return True
return if_and_condition
async def async_or_from_config(
hass: HomeAssistant, config: ConfigType
) -> ConditionCheckerType:
"""Create multi condition matcher using 'OR'."""
checks = [await async_from_config(hass, entry) for entry in config["conditions"]]
@trace_condition_function
def if_or_condition(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Test or condition."""
errors = []
for index, check in enumerate(checks):
try:
with trace_path(["conditions", str(index)]):
if check(hass, variables):
return True
except ConditionError as ex:
errors.append(
ConditionErrorIndex("or", index=index, total=len(checks), error=ex)
)
# Raise the errors if no check was true
if errors:
raise ConditionErrorContainer("or", errors=errors)
return False
return if_or_condition
async def async_not_from_config(
hass: HomeAssistant, config: ConfigType
) -> ConditionCheckerType:
"""Create multi condition matcher using 'NOT'."""
checks = [await async_from_config(hass, entry) for entry in config["conditions"]]
@trace_condition_function
def if_not_condition(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Test not condition."""
errors = []
for index, check in enumerate(checks):
try:
with trace_path(["conditions", str(index)]):
if check(hass, variables):
return False
except ConditionError as ex:
errors.append(
ConditionErrorIndex("not", index=index, total=len(checks), error=ex)
)
# Raise the errors if no check was true
if errors:
raise ConditionErrorContainer("not", errors=errors)
return True
return if_not_condition
def numeric_state(
hass: HomeAssistant,
entity: None | str | State,
below: float | str | None = None,
above: float | str | None = None,
value_template: Template | None = None,
variables: TemplateVarsType = None,
) -> bool:
"""Test a numeric state condition."""
return run_callback_threadsafe(
hass.loop,
async_numeric_state,
hass,
entity,
below,
above,
value_template,
variables,
).result()
def async_numeric_state( # noqa: C901
hass: HomeAssistant,
entity: None | str | State,
below: float | str | None = None,
above: float | str | None = None,
value_template: Template | None = None,
variables: TemplateVarsType = None,
attribute: str | None = None,
) -> bool:
"""Test a numeric state condition."""
if entity is None:
raise ConditionErrorMessage("numeric_state", "no entity specified")
if isinstance(entity, str):
entity_id = entity
if (entity := hass.states.get(entity)) is None:
raise ConditionErrorMessage("numeric_state", f"unknown entity {entity_id}")
else:
entity_id = entity.entity_id
if attribute is not None and attribute not in entity.attributes:
condition_trace_set_result(
False,
message=f"attribute '{attribute}' of entity {entity_id} does not exist",
)
return False
value: Any = None
if value_template is None:
if attribute is None:
value = entity.state
else:
value = entity.attributes.get(attribute)
else:
variables = dict(variables or {})
variables["state"] = entity
try:
value = value_template.async_render(variables)
except TemplateError as ex:
raise ConditionErrorMessage(
"numeric_state", f"template error: {ex}"
) from ex
# Known states or attribute values that never match the numeric condition
if value in (None, STATE_UNAVAILABLE, STATE_UNKNOWN):
condition_trace_set_result(
False,
message=f"value '{value}' is non-numeric and treated as False",
)
return False
try:
fvalue = float(value)
except (ValueError, TypeError) as ex:
raise ConditionErrorMessage(
"numeric_state",
f"entity {entity_id} state '{value}' cannot be processed as a number",
) from ex
if below is not None:
if isinstance(below, str):
if not (below_entity := hass.states.get(below)):
raise ConditionErrorMessage(
"numeric_state", f"unknown 'below' entity {below}"
)
if below_entity.state in (
STATE_UNAVAILABLE,
STATE_UNKNOWN,
):
return False
try:
if fvalue >= float(below_entity.state):
condition_trace_set_result(
False,
state=fvalue,
wanted_state_below=float(below_entity.state),
)
return False
except (ValueError, TypeError) as ex:
raise ConditionErrorMessage(
"numeric_state",
f"the 'below' entity {below} state '{below_entity.state}' cannot be processed as a number",
) from ex
elif fvalue >= below:
condition_trace_set_result(False, state=fvalue, wanted_state_below=below)
return False
if above is not None:
if isinstance(above, str):
if not (above_entity := hass.states.get(above)):
raise ConditionErrorMessage(
"numeric_state", f"unknown 'above' entity {above}"
)
if above_entity.state in (
STATE_UNAVAILABLE,
STATE_UNKNOWN,
):
return False
try:
if fvalue <= float(above_entity.state):
condition_trace_set_result(
False,
state=fvalue,
wanted_state_above=float(above_entity.state),
)
return False
except (ValueError, TypeError) as ex:
raise ConditionErrorMessage(
"numeric_state",
f"the 'above' entity {above} state '{above_entity.state}' cannot be processed as a number",
) from ex
elif fvalue <= above:
condition_trace_set_result(False, state=fvalue, wanted_state_above=above)
return False
condition_trace_set_result(True, state=fvalue)
return True
def async_numeric_state_from_config(config: ConfigType) -> ConditionCheckerType:
"""Wrap action method with state based condition."""
entity_ids = config.get(CONF_ENTITY_ID, [])
attribute = config.get(CONF_ATTRIBUTE)
below = config.get(CONF_BELOW)
above = config.get(CONF_ABOVE)
value_template = config.get(CONF_VALUE_TEMPLATE)
@trace_condition_function
def if_numeric_state(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Test numeric state condition."""
if value_template is not None:
value_template.hass = hass
errors = []
for index, entity_id in enumerate(entity_ids):
try:
with trace_path(["entity_id", str(index)]), trace_condition(variables):
if not async_numeric_state(
hass,
entity_id,
below,
above,
value_template,
variables,
attribute,
):
return False
except ConditionError as ex:
errors.append(
ConditionErrorIndex(
"numeric_state", index=index, total=len(entity_ids), error=ex
)
)
# Raise the errors if no check was false
if errors:
raise ConditionErrorContainer("numeric_state", errors=errors)
return True
return if_numeric_state
def state(
hass: HomeAssistant,
entity: None | str | State,
req_state: Any,
for_period: timedelta | None = None,
attribute: str | None = None,
) -> bool:
"""Test if state matches requirements.
Async friendly.
"""
if entity is None:
raise ConditionErrorMessage("state", "no entity specified")
if isinstance(entity, str):
entity_id = entity
if (entity := hass.states.get(entity)) is None:
raise ConditionErrorMessage("state", f"unknown entity {entity_id}")
else:
entity_id = entity.entity_id
if attribute is not None and attribute not in entity.attributes:
condition_trace_set_result(
False,
message=f"attribute '{attribute}' of entity {entity_id} does not exist",
)
return False
assert isinstance(entity, State)
if attribute is None:
value: Any = entity.state
else:
value = entity.attributes.get(attribute)
if not isinstance(req_state, list):
req_state = [req_state]
is_state = False
for req_state_value in req_state:
state_value = req_state_value
if (
isinstance(req_state_value, str)
and INPUT_ENTITY_ID.match(req_state_value) is not None
):
if not (state_entity := hass.states.get(req_state_value)):
raise ConditionErrorMessage(
"state", f"the 'state' entity {req_state_value} is unavailable"
)
state_value = state_entity.state
is_state = value == state_value
if is_state:
break
if for_period is None or not is_state:
condition_trace_set_result(is_state, state=value, wanted_state=state_value)
return is_state
duration = dt_util.utcnow() - for_period
duration_ok = duration > entity.last_changed
condition_trace_set_result(duration_ok, state=value, duration=duration)
return duration_ok
def state_from_config(config: ConfigType) -> ConditionCheckerType:
"""Wrap action method with state based condition."""
entity_ids = config.get(CONF_ENTITY_ID, [])
req_states: str | list[str] = config.get(CONF_STATE, [])
for_period = config.get("for")
attribute = config.get(CONF_ATTRIBUTE)
if not isinstance(req_states, list):
req_states = [req_states]
@trace_condition_function
def if_state(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Test if condition."""
errors = []
for index, entity_id in enumerate(entity_ids):
try:
with trace_path(["entity_id", str(index)]), trace_condition(variables):
if not state(hass, entity_id, req_states, for_period, attribute):
return False
except ConditionError as ex:
errors.append(
ConditionErrorIndex(
"state", index=index, total=len(entity_ids), error=ex
)
)
# Raise the errors if no check was false
if errors:
raise ConditionErrorContainer("state", errors=errors)
return True
return if_state
def sun(
hass: HomeAssistant,
before: str | None = None,
after: str | None = None,
before_offset: timedelta | None = None,
after_offset: timedelta | None = None,
) -> bool:
"""Test if current time matches sun requirements."""
utcnow = dt_util.utcnow()
today = dt_util.as_local(utcnow).date()
before_offset = before_offset or timedelta(0)
after_offset = after_offset or timedelta(0)
sunrise_today = get_astral_event_date(hass, SUN_EVENT_SUNRISE, today)
sunset_today = get_astral_event_date(hass, SUN_EVENT_SUNSET, today)
sunrise = sunrise_today
sunset = sunset_today
if today > dt_util.as_local(
cast(datetime, sunrise_today)
).date() and SUN_EVENT_SUNRISE in (before, after):
tomorrow = dt_util.as_local(utcnow + timedelta(days=1)).date()
sunrise_tomorrow = get_astral_event_date(hass, SUN_EVENT_SUNRISE, tomorrow)
sunrise = sunrise_tomorrow
if today > dt_util.as_local(
cast(datetime, sunset_today)
).date() and SUN_EVENT_SUNSET in (before, after):
tomorrow = dt_util.as_local(utcnow + timedelta(days=1)).date()
sunset_tomorrow = get_astral_event_date(hass, SUN_EVENT_SUNSET, tomorrow)
sunset = sunset_tomorrow
if sunrise is None and SUN_EVENT_SUNRISE in (before, after):
# There is no sunrise today
condition_trace_set_result(False, message="no sunrise today")
return False
if sunset is None and SUN_EVENT_SUNSET in (before, after):
# There is no sunset today
condition_trace_set_result(False, message="no sunset today")
return False
if before == SUN_EVENT_SUNRISE:
wanted_time_before = cast(datetime, sunrise) + before_offset
condition_trace_update_result(wanted_time_before=wanted_time_before)
if utcnow > wanted_time_before:
return False
if before == SUN_EVENT_SUNSET:
wanted_time_before = cast(datetime, sunset) + before_offset
condition_trace_update_result(wanted_time_before=wanted_time_before)
if utcnow > wanted_time_before:
return False
if after == SUN_EVENT_SUNRISE:
wanted_time_after = cast(datetime, sunrise) + after_offset
condition_trace_update_result(wanted_time_after=wanted_time_after)
if utcnow < wanted_time_after:
return False
if after == SUN_EVENT_SUNSET:
wanted_time_after = cast(datetime, sunset) + after_offset
condition_trace_update_result(wanted_time_after=wanted_time_after)
if utcnow < wanted_time_after:
return False
return True
def sun_from_config(config: ConfigType) -> ConditionCheckerType:
"""Wrap action method with sun based condition."""
before = config.get("before")
after = config.get("after")
before_offset = config.get("before_offset")
after_offset = config.get("after_offset")
@trace_condition_function
def sun_if(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Validate time based if-condition."""
return sun(hass, before, after, before_offset, after_offset)
return sun_if
def template(
hass: HomeAssistant, value_template: Template, variables: TemplateVarsType = None
) -> bool:
"""Test if template condition matches."""
return run_callback_threadsafe(
hass.loop, async_template, hass, value_template, variables
).result()
def async_template(
hass: HomeAssistant,
value_template: Template,
variables: TemplateVarsType = None,
trace_result: bool = True,
) -> bool:
"""Test if template condition matches."""
try:
info = value_template.async_render_to_info(variables, parse_result=False)
value = info.result()
except TemplateError as ex:
raise ConditionErrorMessage("template", str(ex)) from ex
result = value.lower() == "true"
if trace_result:
condition_trace_set_result(result, entities=list(info.entities))
return result
def async_template_from_config(config: ConfigType) -> ConditionCheckerType:
"""Wrap action method with state based condition."""
value_template = cast(Template, config.get(CONF_VALUE_TEMPLATE))
@trace_condition_function
def template_if(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Validate template based if-condition."""
value_template.hass = hass
return async_template(hass, value_template, variables)
return template_if
def time(
hass: HomeAssistant,
before: dt_util.dt.time | str | None = None,
after: dt_util.dt.time | str | None = None,
weekday: None | str | Container[str] = None,
) -> bool:
"""Test if local time condition matches.
Handle the fact that time is continuous and we may be testing for
a period that crosses midnight. In that case it is easier to test
for the opposite. "(23:59 <= now < 00:01)" would be the same as
"not (00:01 <= now < 23:59)".
"""
now = dt_util.now()
now_time = now.time()
if after is None:
after = dt_util.dt.time(0)
elif isinstance(after, str):
if not (after_entity := hass.states.get(after)):
raise ConditionErrorMessage("time", f"unknown 'after' entity {after}")
if after_entity.domain == "input_datetime":
after = dt_util.dt.time(
after_entity.attributes.get("hour", 23),
after_entity.attributes.get("minute", 59),
after_entity.attributes.get("second", 59),
)
elif after_entity.attributes.get(
ATTR_DEVICE_CLASS
) == DEVICE_CLASS_TIMESTAMP and after_entity.state not in (
STATE_UNAVAILABLE,
STATE_UNKNOWN,
):
after_datetime = dt_util.parse_datetime(after_entity.state)
if after_datetime is None:
return False
after = dt_util.as_local(after_datetime).time()
else:
return False
if before is None:
before = dt_util.dt.time(23, 59, 59, 999999)
elif isinstance(before, str):
if not (before_entity := hass.states.get(before)):
raise ConditionErrorMessage("time", f"unknown 'before' entity {before}")
if before_entity.domain == "input_datetime":
before = dt_util.dt.time(
before_entity.attributes.get("hour", 23),
before_entity.attributes.get("minute", 59),
before_entity.attributes.get("second", 59),
)
elif before_entity.attributes.get(
ATTR_DEVICE_CLASS
) == DEVICE_CLASS_TIMESTAMP and before_entity.state not in (
STATE_UNAVAILABLE,
STATE_UNKNOWN,
):
before_timedatime = dt_util.parse_datetime(before_entity.state)
if before_timedatime is None:
return False
before = dt_util.as_local(before_timedatime).time()
else:
return False
if after < before:
condition_trace_update_result(after=after, now_time=now_time, before=before)
if not after <= now_time < before:
return False
else:
condition_trace_update_result(after=after, now_time=now_time, before=before)
if before <= now_time < after:
return False
if weekday is not None:
now_weekday = WEEKDAYS[now.weekday()]
condition_trace_update_result(weekday=weekday, now_weekday=now_weekday)
if (
isinstance(weekday, str)
and weekday != now_weekday
or now_weekday not in weekday
):
return False
return True
def time_from_config(config: ConfigType) -> ConditionCheckerType:
"""Wrap action method with time based condition."""
before = config.get(CONF_BEFORE)
after = config.get(CONF_AFTER)
weekday = config.get(CONF_WEEKDAY)
@trace_condition_function
def time_if(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Validate time based if-condition."""
return time(hass, before, after, weekday)
return time_if
def zone(
hass: HomeAssistant,
zone_ent: None | str | State,
entity: None | str | State,
) -> bool:
"""Test if zone-condition matches.
Async friendly.
"""
if zone_ent is None:
raise ConditionErrorMessage("zone", "no zone specified")
if isinstance(zone_ent, str):
zone_ent_id = zone_ent
if (zone_ent := hass.states.get(zone_ent)) is None:
raise ConditionErrorMessage("zone", f"unknown zone {zone_ent_id}")
if entity is None:
raise ConditionErrorMessage("zone", "no entity specified")
if isinstance(entity, str):
entity_id = entity
if (entity := hass.states.get(entity)) is None:
raise ConditionErrorMessage("zone", f"unknown entity {entity_id}")
else:
entity_id = entity.entity_id
latitude = entity.attributes.get(ATTR_LATITUDE)
longitude = entity.attributes.get(ATTR_LONGITUDE)
if latitude is None:
raise ConditionErrorMessage(
"zone", f"entity {entity_id} has no 'latitude' attribute"
)
if longitude is None:
raise ConditionErrorMessage(
"zone", f"entity {entity_id} has no 'longitude' attribute"
)
return zone_cmp.in_zone(
zone_ent, latitude, longitude, entity.attributes.get(ATTR_GPS_ACCURACY, 0)
)
def zone_from_config(config: ConfigType) -> ConditionCheckerType:
"""Wrap action method with zone based condition."""
entity_ids = config.get(CONF_ENTITY_ID, [])
zone_entity_ids = config.get(CONF_ZONE, [])
@trace_condition_function
def if_in_zone(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Test if condition."""
errors = []
all_ok = True
for entity_id in entity_ids:
entity_ok = False
for zone_entity_id in zone_entity_ids:
try:
if zone(hass, zone_entity_id, entity_id):
entity_ok = True
except ConditionErrorMessage as ex:
errors.append(
ConditionErrorMessage(
"zone",
f"error matching {entity_id} with {zone_entity_id}: {ex.message}",
)
)
if not entity_ok:
all_ok = False
# Raise the errors only if no definitive result was found
if errors and not all_ok:
raise ConditionErrorContainer("zone", errors=errors)
return all_ok
return if_in_zone
async def async_device_from_config(
hass: HomeAssistant, config: ConfigType
) -> ConditionCheckerType:
"""Test a device condition."""
platform = await async_get_device_automation_platform(
hass, config[CONF_DOMAIN], "condition"
)
return trace_condition_function(
cast(
ConditionCheckerType,
platform.async_condition_from_config(hass, config), # type: ignore
)
)
async def async_trigger_from_config(
hass: HomeAssistant, config: ConfigType
) -> ConditionCheckerType:
"""Test a trigger condition."""
trigger_id = config[CONF_ID]
@trace_condition_function
def trigger_if(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Validate trigger based if-condition."""
return (
variables is not None
and "trigger" in variables
and variables["trigger"].get("id") in trigger_id
)
return trigger_if
def numeric_state_validate_config(
hass: HomeAssistant, config: ConfigType
) -> ConfigType:
"""Validate numeric_state condition config."""
registry = er.async_get(hass)
config = dict(config)
config[CONF_ENTITY_ID] = er.async_resolve_entity_ids(
registry, cv.entity_ids_or_uuids(config[CONF_ENTITY_ID])
)
return config
def state_validate_config(hass: HomeAssistant, config: ConfigType) -> ConfigType:
"""Validate state condition config."""
registry = er.async_get(hass)
config = dict(config)
config[CONF_ENTITY_ID] = er.async_resolve_entity_ids(
registry, cv.entity_ids_or_uuids(config[CONF_ENTITY_ID])
)
return config
async def async_validate_condition_config(
hass: HomeAssistant, config: ConfigType | Template
) -> ConfigType | Template:
"""Validate config."""
if isinstance(config, Template):
return config
condition = config[CONF_CONDITION]
if condition in ("and", "not", "or"):
conditions = []
for sub_cond in config["conditions"]:
sub_cond = await async_validate_condition_config(hass, sub_cond)
conditions.append(sub_cond)
config["conditions"] = conditions
if condition == "device":
config = cv.DEVICE_CONDITION_SCHEMA(config)
assert not isinstance(config, Template)
platform = await async_get_device_automation_platform(
hass, config[CONF_DOMAIN], "condition"
)
if hasattr(platform, "async_validate_condition_config"):
return await platform.async_validate_condition_config(hass, config) # type: ignore
return cast(ConfigType, platform.CONDITION_SCHEMA(config)) # type: ignore
if condition in ("numeric_state", "state"):
validator = getattr(
sys.modules[__name__], VALIDATE_CONFIG_FORMAT.format(condition)
)
return validator(hass, config) # type: ignore
return config
async def async_validate_conditions_config(
hass: HomeAssistant, conditions: list[ConfigType | Template]
) -> list[ConfigType | Template]:
"""Validate config."""
return await asyncio.gather(
*(async_validate_condition_config(hass, cond) for cond in conditions)
)
@callback
def async_extract_entities(config: ConfigType | Template) -> set[str]:
"""Extract entities from a condition."""
referenced: set[str] = set()
to_process = deque([config])
while to_process:
config = to_process.popleft()
if isinstance(config, Template):
continue
condition = config[CONF_CONDITION]
if condition in ("and", "not", "or"):
to_process.extend(config["conditions"])
continue
entity_ids = config.get(CONF_ENTITY_ID)
if isinstance(entity_ids, str):
entity_ids = [entity_ids]
if entity_ids is not None:
referenced.update(entity_ids)
return referenced
@callback
def async_extract_devices(config: ConfigType | Template) -> set[str]:
"""Extract devices from a condition."""
referenced = set()
to_process = deque([config])
while to_process:
config = to_process.popleft()
if isinstance(config, Template):
continue
condition = config[CONF_CONDITION]
if condition in ("and", "not", "or"):
to_process.extend(config["conditions"])
continue
if condition != "device":
continue
if (device_id := config.get(CONF_DEVICE_ID)) is not None:
referenced.add(device_id)
return referenced
|
import sys
sys.path.append('../tytus/parser/team27/G-27/execution/abstract')
sys.path.append('../tytus/storage')
from querie import *
from storageManager import jsonMode as admin
from prettytable import PrettyTable
class drop_database(Querie):
'''
row = numero de fila(int)
column = numero de columna(int)
'''
def __init__(self,column,row):
Querie.__init__(self,column, row)
#Valor de retorno: lista de strings con los nombres de las bases de datos, si ocurrió un error o no hay bases de datos devuelve una lista vacía [].
def execute(self, environment):
result = showDatabases() #<---------------------------
if not isinstance(result,list):
return {'Error': 'La funcion showdatabase(), no retorna un objeto lista', 'Fila':self.row, 'Columna': self.column }
if len(result) == 0:
#Se elimino correctamente la base de datos
return 'No hay bases de datos para mostrar, ya sea porque no existe ninguna base de datos o porque ocurrio un error desconocido'
else:
x = PrettyTable()
x.add_column("bases",result)
#print(x.get_string())
return x.get_string()
|
import sys
from setuptools import setup
args = ' '.join(sys.argv).strip()
if not any(args.endswith(suffix) for suffix in ['setup.py check -r -s', 'setup.py sdist']):
raise ImportError('This is a unique description. Locked by pypi-parker at example-url.co.net.',)
setup(
author='pypi-parker',
author_email='park-email@example-url.co.net',
classifiers=['Development Status :: 7 - Inactive'],
description='This is a unique description. Locked by pypi-parker at example-url.co.net.',
long_description='This is a unique description. Locked by pypi-parker at example-url.co.net.',
name='testpackage-reloaded',
url='example-url.co.net',
version='3.1.4'
)
|
from flask import request, jsonify
from flask_restful import Resource
from klap4.services.song_services import change_single_fcc, change_album_fcc
class SongAPI(Resource):
def put(self, ref, typ):
json_data = request.get_json(force=True)
try:
fcc = json_data['fcc']
if typ == 'single':
song_number = json_data['song_number']
change_single_fcc(ref, song_number, fcc)
elif typ == 'all':
update = change_album_fcc(ref, fcc)
return "Updated"
except:
return jsonify({"error": "Bad request"}), 400
|
from GlobalObjs.Graph import SpaceTimeNode
from Benchmark import Warehouse
import numpy as np
import os
from queue import PriorityQueue
from Visualisations.Vis import Vis
# Based off psuedo code taken from
# https://www.geeksforgeeks.org/a-search-algorithm/
class AStarNode(SpaceTimeNode):
def __init__(self, parent, x, y, t=-1):
super(AStarNode, self).__init__(parent, x, y, t)
self.h = 0 # heuristic
self.g = 0 # distance to node from start
self.f = np.inf # combined distance and heuristic
# so it can be sorted
def __lt__(self, other):
return self.f < other.f
def __gt__(self, other):
return self.f > other.f
def __repr__(self):
return f"Node({self.x},{self.y},{self.t},f={self.f})"
def __str__(self):
return self.__repr__()
def man_dist(self, end):
return abs(self.x - end.x) + abs(self.y - end.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
# def calc_f(self, end):
# self.f = self.g + self.man_dist(end)
# Normal AStar - I.e. ignores time dimension
class AStar:
def __init__(self):
pass
@staticmethod
def man_dist(node_1, node_2):
return abs(node_1.x - node_2.x) + abs(node_1.y - node_2.y)
# Assuming space region not space-time i.e. no time dimension
@staticmethod
def is_node_valid(grid, node):
max_y_bound = len(grid) - 1
max_x_bound = len(grid[0]) - 1
x = node.x
y = node.y
if 0 <= x <= max_x_bound and 0 <= y <= max_y_bound and not grid[y][x]:
return True
else:
return False
@staticmethod
def is_coord_valid(grid, x, y):
max_y_bound = len(grid) - 1
max_x_bound = len(grid[0]) - 1
if 0 <= x <= max_x_bound and 0 <= y <= max_y_bound and not grid[y][x]:
return True
else:
return False
@staticmethod
def get_path_list(end_node):
path_list = [end_node]
if end_node.parent:
curr_node = end_node.parent
path_list.append(curr_node)
while curr_node.parent:
curr_node = curr_node.parent
path_list.append(curr_node)
else:
# Path not found
raise Exception("end_node has no parent - no path exists.")
path_list = list(reversed(path_list))
return path_list
@staticmethod
def find_path(grid, start_pos, end_pos):
if start_pos == end_pos: # I.e. already at end
print("Already at end position")
return [AStarNode(None, start_pos[0], start_pos[1])]
start_node = AStarNode(None, start_pos[0], start_pos[1])
start_node.f = 0
end_node = AStarNode(None, end_pos[0], end_pos[1])
# start_node.calc_f(end_node) # Not required?
# Using a priority queue would be faster -> do later
open_list = []
closed_list = []
open_list.append(start_node)
# open_q.put(start_node)
while open_list:
open_list = sorted(open_list) # Sorted right way?
curr_node = open_list.pop(0)
# curr_node = open_q.get()
# closed_list.append(curr_node) # I think?
x = curr_node.x
y = curr_node.y
children = []
# GENERATE CHILDREN
# get neighbours/children of current node
# left
if AStar.is_coord_valid(grid, x - 1, y):
children.append(AStarNode(curr_node, x - 1, y))
# right
if AStar.is_coord_valid(grid, x + 1, y):
children.append(AStarNode(curr_node, x + 1, y))
# top
if AStar.is_coord_valid(grid, x, y+1):
children.append(AStarNode(curr_node, x, y+1))
# bottom
if AStar.is_coord_valid(grid, x, y - 1):
children.append(AStarNode(curr_node, x, y - 1))
curr_node.children = children # Not sure if required since each node has a specified parent
for child in children:
# 1) If end node found
if child == end_node:
# path found
end_node = child # So it has correct parent
return AStar.get_path_list(end_node)
# tmp_g = curr_node.g + 1
child.g = curr_node.g + 1
child.h = AStar.man_dist(child, end_node)
child.f = child.g + child.h
# tmp_f = (curr_node.g + 1) + AStar.man_dist(curr_node, end_node)
# 2) If a node with the same position as child is in the open list and has a lower f
# than child, skip this child
tmp_list = list(filter(lambda el: el == child, open_list))
if tmp_list and tmp_list[0].f < child.f:
continue
# 3) If a node with the same position as child is in the closed list and has a lower f
# than child, skip this child, otherwise, add the node to the open list.
tmp_list = list(filter(lambda el: el == child, closed_list))
if tmp_list and tmp_list[0].f < child.f:
continue
else:
open_list.append(child)
closed_list.append(curr_node)
return None # I.e. path not found
@staticmethod
def path_to_coords(path):
return [(node.x, node.y) for node in path]
def example():
start_pos = [1, 1]
end_pos = [6, 9] # [5, 8]
# ex_grid = [
# [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
# [0, 0, 1, 1, 1, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# ]
ex_grid = [
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
path = AStar.find_path(ex_grid, start_pos, end_pos)
print_grid(ex_grid, path, AStarNode(None, start_pos[0], start_pos[1]),
AStarNode(None, end_pos[0], end_pos[1]))
# for node in path:
# print(f"({node.x},{node.y})")
# print(path)
def warehouse_example():
# workspace_path = "\\".join(os.getcwd().split("\\")[:-1])
# # print(workspace_path)
# grid = Warehouse.txt_to_grid(workspace_path+"/Benchmark/maps/map_warehouse.txt", simple_layout=True)
# start_pos = Warehouse.get_rand_valid_point(grid)
# end_pos = Warehouse.get_rand_valid_point(grid)
#
# path = AStar.find_path(grid, start_pos, end_pos)
#
# print_grid(grid, path, AStarNode(None, start_pos[0], start_pos[1]),
# AStarNode(None, end_pos[0], end_pos[1]))
workspace_path = "\\".join(os.getcwd().split("\\")[:-1])
# print(workspace_path)
grid = Warehouse.txt_to_grid(workspace_path + "/Benchmark/maps/map_warehouse.txt", simple_layout=True)
# start_pos = Warehouse.get_rand_valid_point(grid)
start_pos = (1, 6)
goal_pos = (32, 15)
# goal_pos = Warehouse.get_rand_valid_point(grid)
print(f"start_pos: {start_pos}")
print(f"goal_pos: {goal_pos}")
path_nodes = AStar.find_path(grid, start_pos, goal_pos)
path = AStar.path_to_coords(path_nodes)
print(f"Path length: {len(path)}")
vis = Vis(grid, (1100, 500))
vis.draw_start(start_pos)
vis.draw_goal(goal_pos)
vis.draw_path(path)
vis.save_to_png("AStarWarehouse")
vis.window.getMouse()
#
# print(grid)
# print(start_pos)
# print(end_pos)
def print_grid(grid, path, start_node, end_node):
for j in range(len(grid)):
curr_str = "|"
for i in range(len(grid[0])):
curr_cell = " "
if AStarNode(None, i, j) in path:
curr_cell = "X"
elif grid[j][i]:
curr_cell = "#"
if AStarNode(None, i, j) == start_node:
curr_cell="S"
elif AStarNode(None, i, j) == end_node:
curr_cell="E"
curr_str += curr_cell
print(curr_str + "|")
if __name__ == "__main__":
# example()
warehouse_example()
|
from collections import deque
regex = input()[1:-1]
farthest = 0
stack = deque([((0, 0), 0)])
visited = set()
for t in regex:
(x, y), d = stack.pop()
if (x, y) not in visited and d >= 1000:
farthest += 1
visited |= {(x, y)}
if t == 'S':
stack.append(((x, y-1), d+1))
elif t == 'N':
stack.append(((x, y+1), d+1))
elif t == 'W':
stack.append(((x-1, y), d+1))
elif t == 'E':
stack.append(((x+1, y), d+1))
elif t == '(':
stack.append(((x, y), d))
stack.append(((x, y), d))
elif t == '|':
stack.append(stack[-1])
elif t == '(':
continue
print(farthest)
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from functools import reduce
from operator import attrgetter, add
import sys
import pytest
from ..util.arithmeticdict import ArithmeticDict
from ..util.testing import requires
from ..util.parsing import parsing_library
from ..units import default_units, units_library, to_unitless, allclose
from ..chemistry import (
equilibrium_quotient, Substance, Species, Reaction,
Equilibrium, balance_stoichiometry
)
if sys.version_info < (3, 6, 0):
class ModuleNotFoundError(ImportError):
pass
@requires('numpy')
def test_equilibrium_quotient():
assert abs(equilibrium_quotient([2.3, 3.7, 5.1], (-1, -1, 1)) -
5.1/2.3/3.7) < 1e-14
@requires(parsing_library)
def test_Substance():
s = Substance.from_formula('H+')
assert s.composition == {0: 1, 1: 1}
assert s.charge == 1
assert abs(s.mass - 1.008) < 1e-3
def test_Substance__2():
H2O = Substance(name='H2O', charge=0, latex_name=r'\mathrm{H_{2}O}',
data={'pKa': 14}) # will_be_missing_in='0.8.0', use data=...
OH_m = Substance(name='OH-', charge=-1, latex_name=r'\mathrm{OH^{-}}')
assert sorted([OH_m, H2O], key=attrgetter('name')) == [H2O, OH_m]
@requires(parsing_library)
def test_Substance__from_formula():
H2O = Substance.from_formula('H2O')
assert H2O.composition == {1: 2, 8: 1}
assert H2O.latex_name == 'H_{2}O'
assert H2O.unicode_name == u'H₂O'
assert H2O.html_name == u'H<sub>2</sub>O'
@requires(parsing_library)
def test_Species():
s = Species.from_formula('H2O')
assert s.phase_idx == 0
mapping = {'(aq)': 0, '(s)': 1, '(g)': 2}
assert Species.from_formula('CO2(g)').phase_idx == 3
assert Species.from_formula('CO2(g)', mapping).phase_idx == 2
assert Species.from_formula('CO2(aq)', mapping).phase_idx == 0
assert Species.from_formula('NaCl(s)').phase_idx == 1
assert Species.from_formula('NaCl(s)', phase_idx=7).phase_idx == 7
assert Species.from_formula('CO2(aq)', mapping, phase_idx=7).phase_idx == 7
uranyl_ads = Species.from_formula('UO2+2(ads)', phases={'(aq)': 0, '(ads)': 1})
assert uranyl_ads.composition == {0: 2, 92: 1, 8: 2}
assert uranyl_ads.phase_idx == 1
def test_Solute():
from ..chemistry import Solute
from ..util.pyutil import ChemPyDeprecationWarning
with pytest.warns(ChemPyDeprecationWarning):
w = Solute('H2O')
assert w.name == 'H2O'
def test_Reaction():
substances = s_Hp, s_OHm, s_H2O = (
Substance('H+', composition={0: 1, 1: 1}),
Substance('OH-', composition={0: -1, 1: 1, 8: 1}),
Substance('H2O', composition={0: 0, 1: 2, 8: 1}),
)
substance_names = Hp, OHm, H2O = [s.name for s in substances]
substance_dict = {n: s for n, s in zip(substance_names, substances)}
r1 = Reaction({Hp: 1, OHm: 1}, {H2O: 1})
assert sum(r1.composition_violation(substance_dict)) == 0
assert r1.composition_violation(substance_dict, ['H+']) == [0]
viol, cks = r1.composition_violation(substance_dict, True)
assert viol == [0]*3 and sorted(cks) == [0, 1, 8]
assert r1.charge_neutrality_violation(substance_dict) == 0
r2 = Reaction({Hp: 1, OHm: 1}, {H2O: 2})
assert sum(r2.composition_violation(substance_dict)) != 0
assert r2.charge_neutrality_violation(substance_dict) == 0
r3 = Reaction({Hp: 2, OHm: 1}, {H2O: 2})
assert sum(r3.composition_violation(substance_dict)) != 0
assert r3.charge_neutrality_violation(substance_dict) != 0
assert r3.keys() == {Hp, OHm, H2O}
with pytest.raises(ValueError):
Reaction({Hp: -1, OHm: -1}, {H2O: -1})
assert r1 == Reaction({'H+', 'OH-'}, {'H2O'})
r4 = Reaction({Hp, OHm}, {H2O}, 7)
ref = {Hp: -3*5*7, OHm: -3*5*7, H2O: 3*5*7}
r4.rate({Hp: 5, OHm: 3}) == ref
r5 = r4.copy()
assert r5 == r4
assert r5 != r1
lhs5, rhs5 = {'H+': 1, 'OH-': 1}, {'H2O': 1}
r5 = Reaction(lhs5, rhs5)
assert r5.reac == lhs5 and r5.prod == rhs5
def test_Reaction__copy():
r1 = Reaction({'H2O'}, {'H2O'}, checks=())
r2 = r1.copy()
assert r1 == r2
r2.reac['H2O2'] = r2.reac.pop('H2O') # 1
r2.prod['H2O2'] = r2.prod.pop('H2O') # 1
assert r1.reac == {'H2O': 1} and r1.prod == {'H2O': 1}
@requires(parsing_library)
def test_Reaction__from_string():
r = Reaction.from_string("H2O -> H+ + OH-; 1e-4", 'H2O H+ OH-'.split())
assert r.reac == {'H2O': 1} and r.prod == {'H+': 1, 'OH-': 1}
with pytest.raises(ValueError):
Reaction.from_string("H2O -> H+ + OH-; 1e-4", 'H2O H OH-'.split())
r2 = Reaction.from_string("H2O -> H+ + OH-; 1e-4; ref='important_paper'")
assert r2.ref == 'important_paper'
with pytest.raises(ValueError):
Reaction.from_string("H2O -> H2O")
Reaction.from_string("H2O -> H2O; None; checks=()")
with pytest.raises(ValueError):
Reaction({'H2O': 2}, {'H2O2': 2, 'O2': -2})
r4 = Reaction({'H+': 2, 'OH-': 1}, {'H2O': 2}, 42.0)
assert Reaction.from_string(str(r4), 'H+ OH- H2O') == r4
assert Reaction.from_string(str(r4), None) == r4
r5 = Reaction.from_string("H2O2 -> 0.5 O2 + H2O", checks=[
c for c in Reaction.default_checks if c != 'all_integral'])
r6 = r5.copy()
assert r5 == r6
r7 = Reaction.from_string("H2O -> H + OH; None; data=dict(ref='foo; bar; baz;') # foobar")
assert r7.data['ref'] == 'foo; bar; baz;'
@requires(parsing_library, units_library)
def test_Reaction_from_string__units():
r5 = Reaction.from_string('2 H2O2 -> O2 + 2 H2O; 1e-7/molar/second', 'H2O O2 H2O2')
assert to_unitless(r5.param, 1/default_units.molar/default_units.second) == 1e-7
r6 = Reaction.from_string('->', checks=())
assert r6.reac == {} and r6.prod == {}
r7 = Reaction.from_string('2 A -> B; exp(log(2e-3))*metre**3/mol/hour', None)
assert r7.reac == {'A': 2} and r7.prod == {'B': 1}
assert allclose(r7.param, 2e-3*default_units.metre**3/default_units.mol/default_units.hour)
with pytest.raises(ValueError):
Reaction.from_string('2 A -> B; 2e-3/hour', None)
r8 = Reaction.from_string('A -> B; "k"')
assert r8.rate_expr().args is None
assert r8.rate_expr().unique_keys == ('k',)
r9 = Reaction.from_string('A -> B; 42.0')
assert r9.rate_expr().args == [42.0]
assert r9.rate_expr().unique_keys is None
Reaction.from_string("H+ + OH- -> H2O; 1e10/M/s", 'H2O H+ OH-'.split())
with pytest.raises(ValueError):
Reaction.from_string("H2O -> H+ + OH-; 1e-4/M/s", 'H2O H+ OH-'.split())
@requires(parsing_library, units_library)
def test_Substance__molar_mass():
mw_water = Substance.from_formula('H2O').molar_mass(default_units)
q = mw_water / ((15.9994 + 2*1.008)*default_units.gram/default_units.mol)
assert abs(q - 1) < 1e-3
@requires(units_library)
def test_Equilibrium__as_reactions():
s = default_units.second
M = default_units.molar
H2O, Hp, OHm = map(Substance, 'H2O H+ OH-'.split())
eq = Equilibrium({'H2O': 1}, {'H+': 1, 'OH-': 1}, 1e-14)
rate = 1.31e11/M/s
fw, bw = eq.as_reactions(kb=rate, units=default_units)
assert abs((bw.param - rate)/rate) < 1e-15
assert abs((fw.param / M)/bw.param - 1e-14)/1e-14 < 1e-15
@requires(parsing_library)
def test_ReactioN__latex():
keys = 'H2O H2 O2'.split()
subst = {k: Substance.from_formula(k) for k in keys}
r2 = Reaction.from_string("2 H2O -> 2 H2 + O2", subst)
assert r2.latex(subst) == r'2 H_{2}O \rightarrow 2 H_{2} + O_{2}'
r3 = Reaction.from_string("2 H2O -> 2 H2 + O2; 42; name='split'", subst)
assert r3.latex(subst, with_param=True, with_name=True) == r'2 H_{2}O \rightarrow 2 H_{2} + O_{2}; 42; split'
assert r3.latex(subst, with_name=True) == r'2 H_{2}O \rightarrow 2 H_{2} + O_{2}; split'
assert r3.latex(subst, with_param=True) == r'2 H_{2}O \rightarrow 2 H_{2} + O_{2}; 42'
assert r3.latex(subst) == r'2 H_{2}O \rightarrow 2 H_{2} + O_{2}'
@requires(parsing_library)
def test_Reaction__unicode():
keys = u'H2O H2 O2'.split()
subst = {k: Substance.from_formula(k) for k in keys}
r2 = Reaction.from_string("2 H2O -> 2 H2 + O2", subst)
assert r2.unicode(subst) == u'2 H₂O → 2 H₂ + O₂'
r3 = Reaction.from_string("2 H2O -> 2 H2 + O2; 42; name='split'", subst)
assert r3.unicode(subst) == u'2 H₂O → 2 H₂ + O₂'
assert r3.unicode(subst, with_name=True) == u'2 H₂O → 2 H₂ + O₂; split'
assert r3.unicode(subst, with_name=True, with_param=True) == u'2 H₂O → 2 H₂ + O₂; 42; split'
assert r3.unicode(subst, with_param=True) == u'2 H₂O → 2 H₂ + O₂; 42'
@requires(parsing_library)
def test_Reaction__html():
keys = 'H2O H2 O2'.split()
subst = {k: Substance.from_formula(k) for k in keys}
r2 = Reaction.from_string("2 H2O -> 2 H2 + O2", subst)
assert r2.html(subst) == \
'2 H<sub>2</sub>O → 2 H<sub>2</sub> + O<sub>2</sub>'
assert r2.html(subst, Reaction_coeff_fmt=lambda s: '<b>{0}</b>'.format(s)) == \
'<b>2</b> H<sub>2</sub>O → <b>2</b> H<sub>2</sub> + O<sub>2</sub>'
assert r2.html(subst, Reaction_formula_fmt=lambda s: '<b>{0}</b>'.format(s)) == \
'2 <b>H<sub>2</sub>O</b> → 2 <b>H<sub>2</sub></b> + <b>O<sub>2</sub></b>'
def test_Reaction__idempotency():
with pytest.raises(ValueError):
Reaction({'A': 1}, {'A': 1})
with pytest.raises(ValueError):
Reaction({}, {})
with pytest.raises(ValueError):
Reaction({'A': 1}, {'B': 1}, inact_reac={'B': 1}, inact_prod={'A': 1})
@requires('sympy')
def test_Equilibrium__eliminate():
e1 = Equilibrium({'A': 1, 'B': 2}, {'C': 3})
e2 = Equilibrium({'D': 5, 'B': 7}, {'E': 11})
coeff = Equilibrium.eliminate([e1, e2], 'B')
assert coeff == [7, -2]
e3 = coeff[0]*e1 + coeff[1]*e2
assert e3.net_stoich('B') == (0,)
e4 = e1*coeff[0] + coeff[1]*e2
assert e4.net_stoich('B') == (0,)
assert (-e1).reac == {'C': 3}
assert (e2*-3).reac == {'E': 33}
@requires(parsing_library, units_library)
def test_Equilibrium__from_string():
assert Equilibrium.from_string('H2O = H+ + OH-').param is None
assert Equilibrium.from_string('H2O = H+ + OH-; 1e-14').param == 1e-14
assert Equilibrium.from_string('H2O = H+ + OH-; 1e-14*molar').param ** 0 == 1
with pytest.raises(ValueError):
Equilibrium.from_string('H+ + OH- = H2O; 1e-14*molar')
eq5 = Equilibrium.from_string("CO2(aq) = CO2(g);"
"chempy.henry.HenryWithUnits(3.3e-4 * molar / Pa, 2400 * K)")
assert eq5.reac == {'CO2(aq)': 1}
def test_Equilibrium__cancel():
# 2B + C -> E
e1 = Equilibrium({'A': 26, 'B': 20, 'C': 7}, {'D': 4, 'E': 7})
e2 = Equilibrium({'A': 13, 'B': 3}, {'D': 2})
coeff = e1.cancel(e2)
assert coeff == -2
@requires('sympy')
def test_balance_stoichiometry():
# 4 NH4ClO4 -> 2 N2 + 4 HCl + 6H2O + 5O2
# 4 Al + 3O2 -> 2Al2O3
# ---------------------------------------
# 6 NH4ClO4 + 10 Al + -> 3 N2 + 6 HCl + 9 H2O + 5 Al2O3
reac, prod = balance_stoichiometry({'NH4ClO4', 'Al'},
{'Al2O3', 'HCl', 'H2O', 'N2'})
assert reac == {'NH4ClO4': 6, 'Al': 10}
assert prod == {'Al2O3': 5, 'HCl': 6, 'H2O': 9, 'N2': 3}
r3, p3 = balance_stoichiometry({'C2H6', 'O2'}, {'H2O', 'CO2'})
assert r3 == {'C2H6': 2, 'O2': 7}
assert p3 == {'CO2': 4, 'H2O': 6}
r4, p4 = balance_stoichiometry({'C7H5(NO2)3', 'NH4NO3'}, {'CO', 'H2O', 'N2'})
assert r4 == {'C7H5(NO2)3': 2, 'NH4NO3': 7}
assert p4 == {'CO': 14, 'H2O': 19, 'N2': 10}
a5, b5 = {"C3H5NO", "CH4", "NH3", "H2O"}, {"C2H6", "CH4O", "CH5N", "CH3N"}
formulas = list(set.union(a5, b5))
substances = dict(zip(formulas, map(Substance.from_formula, formulas)))
compositions = {k: ArithmeticDict(int, substances[k].composition) for k in formulas}
r5, p5 = balance_stoichiometry(a5, b5)
compo_reac = dict(reduce(add, [compositions[k]*v for k, v in r5.items()]))
compo_prod = dict(reduce(add, [compositions[k]*v for k, v in p5.items()]))
assert compo_reac == compo_prod
a6, b6 = map(lambda x: set(x.split()), 'CuSCN KIO3 HCl;CuSO4 KCl HCN ICl H2O'.split(';'))
r6, p6 = balance_stoichiometry(a6, b6)
assert r6 == dict(CuSCN=4, KIO3=7, HCl=14)
assert p6 == dict(CuSO4=4, KCl=7, HCN=4, ICl=7, H2O=5)
r7, p7 = balance_stoichiometry({'Zn+2', 'e-'}, {'Zn'})
assert r7 == {'Zn+2': 1, 'e-': 2}
assert p7 == {'Zn': 1}
r8, p8 = balance_stoichiometry({'Zn'}, {'Zn+2', 'e-'})
assert r8 == {'Zn': 1}
assert p8 == {'Zn+2': 1, 'e-': 2}
@requires('sympy')
def test_balance_stoichiometry__ordering():
reac, prod = 'CuSCN KIO3 HCl'.split(), 'CuSO4 KCl HCN ICl H2O'.split()
rxn = Reaction(*balance_stoichiometry(reac, prod))
res = rxn.string()
ref = '4 CuSCN + 7 KIO3 + 14 HCl -> 4 CuSO4 + 7 KCl + 4 HCN + 7 ICl + 5 H2O'
assert res == ref
@requires('sympy')
def test_balance_stoichiometry__simple():
r2, p2 = balance_stoichiometry({'Na2CO3'}, {'Na2O', 'CO2'})
assert r2 == {'Na2CO3': 1}
assert p2 == {'Na2O': 1, 'CO2': 1}
@requires('sympy', 'pulp')
@pytest.mark.parametrize('underdet', [False, None, True])
def test_balance_stoichiometry__impossible(underdet):
try:
from pulp import PulpSolverError
except ModuleNotFoundError:
from pulp.solvers import PulpSolverError # older version of PuLP
with pytest.raises((ValueError, PulpSolverError)):
r1, p1 = balance_stoichiometry({'CO'}, {'CO2'}, underdetermined=underdet)
@requires('sympy', 'pulp')
def test_balance_stoichiometry__underdetermined():
try:
from pulp import PulpSolverError
except ModuleNotFoundError:
from pulp.solvers import PulpSolverError # older version of PuLP
with pytest.raises(ValueError):
balance_stoichiometry({'C2H6', 'O2'}, {'H2O', 'CO2', 'CO'}, underdetermined=False)
reac, prod = balance_stoichiometry({'C2H6', 'O2'}, {'H2O', 'CO2', 'CO'})
r1 = {'C7H5O3-', 'O2', 'C21H27N7O14P2-2', 'H+'}
p1 = {'C7H5O4-', 'C21H26N7O14P2-', 'H2O'} # see https://github.com/bjodah/chempy/issues/67
bal1 = balance_stoichiometry(r1, p1, underdetermined=None)
assert bal1 == ({'C21H27N7O14P2-2': 1, 'H+': 1, 'C7H5O3-': 1, 'O2': 1},
{'C21H26N7O14P2-': 1, 'H2O': 1, 'C7H5O4-': 1})
with pytest.raises(ValueError):
balance_stoichiometry({'C3H4O3', 'H3PO4'}, {'C3H6O3'}, underdetermined=None)
for underdet in [False, True, None]:
with pytest.raises((ValueError, PulpSolverError)):
balance_stoichiometry({'C3H6O3'}, {'C3H4O3'}, underdetermined=underdet)
with pytest.raises(ValueError): # https://github.com/bjodah/chempy/pull/86#issuecomment-375421609
balance_stoichiometry({'C21H36N7O16P3S', 'C3H4O3'}, {'H2O', 'C5H8O3', 'C24H38N7O18P3S'})
@requires('sympy', 'pulp')
def test_balance_stoichiometry__very_underdetermined():
r3 = set('O2 Fe Al Cr'.split())
p3 = set('FeO Fe2O3 Fe3O4 Al2O3 Cr2O3 CrO3'.split())
bal3 = balance_stoichiometry(r3, p3, underdetermined=None)
ref3 = {'Fe': 7, 'Al': 2, 'Cr': 3, 'O2': 9}, {k: 2 if k == 'FeO' else 1 for k in p3}
substances = {k: Substance.from_formula(k) for k in r3 | p3}
assert all(viol == 0 for viol in Reaction(*ref3).composition_violation(substances))
assert sum(bal3[0].values()) + sum(bal3[1].values()) <= sum(ref3[0].values()) + sum(ref3[1].values())
assert bal3 == ref3
@requires('sympy', 'pulp')
def test_balance_stoichiometry__underdetermined__canoncial():
# This tests for canoncial representation of the underdetermined system
# where all coefficients are integer and >= 1. It is however of limited
# practical use (and hence marked ``xfail``) since underdetermined systems
# have infinite number of solutions. It should however be possible to rewrite
# the logic so that such canoncial results are returned from balance_stoichiometry
r2 = {'O2', 'O3', 'C', 'NO', 'N2O', 'NO2', 'N2O4'}
p2 = {'CO', 'CO2', 'N2'}
bal2 = balance_stoichiometry(r2, p2, underdetermined=None)
ref2 = ({'O2': 1, 'O3': 1, 'C': 7, 'NO': 1, 'N2O': 1, 'NO2': 1, 'N2O4': 1},
{'CO': 1, 'CO2': 6, 'N2': 3})
substances = {k: Substance.from_formula(k) for k in r2 | p2}
assert all(viol == 0 for viol in Reaction(*ref2).composition_violation(substances))
assert sum(bal2[0].values()) + sum(bal2[1].values()) <= sum(ref2[0].values()) + sum(ref2[1].values())
assert bal2 == ref2
@requires('sympy', 'pulp')
def test_balance_stoichiometry__substances__underdetermined():
substances = {s.name: s for s in [
Substance('eggs_6pack', composition=dict(eggs=6)),
Substance('milk_carton', composition=dict(cups_of_milk=4)),
Substance('flour_bag', composition=dict(spoons_of_flour=30)),
Substance('pancake', composition=dict(eggs=1, cups_of_milk=1, spoons_of_flour=2)),
Substance('waffle', composition=dict(eggs=2, cups_of_milk=2, spoons_of_flour=3)),
]}
ur1 = {'eggs_6pack', 'milk_carton', 'flour_bag'}
up1 = {'pancake', 'waffle'}
br1, bp1 = balance_stoichiometry(ur1, up1, substances=substances, underdetermined=None)
ref_r1 = {'eggs_6pack': 6, 'flour_bag': 2, 'milk_carton': 9}
ref_p1 = {'pancake': 12, 'waffle': 12}
assert all(viol == 0 for viol in Reaction(ref_r1, ref_p1).composition_violation(substances))
assert all(v > 0 for v in br1.values()) and all(v > 0 for v in bp1.values())
assert bp1 == ref_p1
assert br1 == ref_r1
@requires('sympy')
def test_balance_stoichiometry__missing_product_atom():
with pytest.raises(ValueError): # No Al on product side
balance_stoichiometry({'C7H5(NO2)3', 'Al', 'NH4NO3'}, {'CO', 'H2O', 'N2'})
@requires('sympy')
def test_balance_stoichiometry__duplicates():
cases = """
C + CO + CO2 -> C + CO # suggested solution: C + CO2 -> 2 CO
C + CO + CO2 -> C + CO2 # suggested solution: 2 CO -> C + CO2
C + CO + CO2 -> CO + CO2 # suggested solution: C + CO2 -> 2 CO
C + CO -> C + CO + CO2 # suggested solution: 2 CO -> C + CO2
C + CO2 -> C + CO + CO2 # suggested solution: C + CO2 -> 2 CO
CO + CO2 -> C + CO + CO2 # suggested solution: 2 CO -> C + CO2
"""
for prob, sol in [l.split('#') for l in cases.strip().splitlines()]:
tst_r = Reaction.from_string(prob)
ref_r = Reaction.from_string(sol.split(':')[1])
tst_bal = balance_stoichiometry(tst_r.reac, tst_r.prod,
allow_duplicates=True, underdetermined=None)
assert Reaction(*tst_bal) == ref_r
with pytest.raises(ValueError):
balance_stoichiometry({'C', 'CO', 'CO2'}, {'C', 'CO', 'CO2'},
allow_duplicates=True, underdetermined=None)
gh120 = {'H4P2O7', 'HPO3', 'H2O'}, {'H4P2O7', 'HPO3'}
bal120 = balance_stoichiometry(*gh120, allow_duplicates=True, underdetermined=None)
assert bal120 == ({'HPO3': 2, 'H2O': 1}, {'H4P2O7': 1})
with pytest.raises(ValueError):
balance_stoichiometry(*gh120)
# https://github.com/bjodah/chempy/issues/120#issuecomment-434453703
bal_Mn = balance_stoichiometry({'H2O2', 'Mn1', 'H1'}, {'Mn1', 'H2O1'}, allow_duplicates=True, underdetermined=None)
assert bal_Mn == ({'H2O2': 1, 'H1': 2}, {'H2O1': 2})
bal_Mn_COx = balance_stoichiometry({'C', 'CO', 'CO2', 'Mn'}, {'C', 'CO2', 'Mn'},
allow_duplicates=True, underdetermined=None)
assert bal_Mn_COx == ({'CO': 2}, {'C': 1, 'CO2': 1})
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from django.utils.translation import gettext_lazy as _
from oscar.apps.offer import custom
from oscar.test.factories import create_product
class CustomRange(object):
name = "Custom range"
def contains_product(self, product):
return product.title.startswith("A")
def num_products(self):
return None
class CustomRangeLazy(object):
name = _("Custom range with gettext_lazy")
def contains_product(self, product):
return product.title.startswith("B")
def num_products(self):
return None
class TestACustomRange(TestCase):
def test_creating_duplicate_range_fails(self):
custom.create_range(CustomRange)
self.assertRaises(ValueError, custom.create_range, CustomRange)
def test_must_have_a_text_name(self):
try:
custom.create_range(CustomRangeLazy)
except ValidationError:
pass
else:
self.fail("Range can't have gettext titles")
def test_correctly_includes_match(self):
rng = custom.create_range(CustomRange)
test_product = create_product(title="A tale")
self.assertTrue(rng.contains_product(test_product))
def test_correctly_excludes_nonmatch(self):
rng = custom.create_range(CustomRange)
test_product = create_product(title="B tale")
self.assertFalse(rng.contains_product(test_product))
|
# flake8: noqa
# disable flake check on this file because some constructs are strange
# or redundant on purpose and can't be disable on a line-by-line basis
import ast
import inspect
import sys
from types import CodeType
from typing import Any
from typing import Dict
from typing import Optional
import py
import _pytest._code
import pytest
from _pytest._code import Source
def test_source_str_function() -> None:
x = Source("3")
assert str(x) == "3"
x = Source(" 3")
assert str(x) == "3"
x = Source(
"""
3
"""
)
assert str(x) == "\n3"
def test_unicode() -> None:
x = Source("4")
assert str(x) == "4"
co = _pytest._code.compile('"å"', mode="eval")
val = eval(co)
assert isinstance(val, str)
def test_source_from_function() -> None:
source = _pytest._code.Source(test_source_str_function)
assert str(source).startswith("def test_source_str_function() -> None:")
def test_source_from_method() -> None:
class TestClass:
def test_method(self):
pass
source = _pytest._code.Source(TestClass().test_method)
assert source.lines == ["def test_method(self):", " pass"]
def test_source_from_lines() -> None:
lines = ["a \n", "b\n", "c"]
source = _pytest._code.Source(lines)
assert source.lines == ["a ", "b", "c"]
def test_source_from_inner_function() -> None:
def f():
pass
source = _pytest._code.Source(f, deindent=False)
assert str(source).startswith(" def f():")
source = _pytest._code.Source(f)
assert str(source).startswith("def f():")
def test_source_putaround_simple() -> None:
source = Source("raise ValueError")
source = source.putaround(
"try:",
"""\
except ValueError:
x = 42
else:
x = 23""",
)
assert (
str(source)
== """\
try:
raise ValueError
except ValueError:
x = 42
else:
x = 23"""
)
def test_source_putaround() -> None:
source = Source()
source = source.putaround(
"""
if 1:
x=1
"""
)
assert str(source).strip() == "if 1:\n x=1"
def test_source_strips() -> None:
source = Source("")
assert source == Source()
assert str(source) == ""
assert source.strip() == source
def test_source_strip_multiline() -> None:
source = Source()
source.lines = ["", " hello", " "]
source2 = source.strip()
assert source2.lines == [" hello"]
def test_syntaxerror_rerepresentation() -> None:
ex = pytest.raises(SyntaxError, _pytest._code.compile, "xyz xyz")
assert ex is not None
assert ex.value.lineno == 1
assert ex.value.offset in {5, 7} # cpython: 7, pypy3.6 7.1.1: 5
assert ex.value.text == "xyz xyz\n"
def test_isparseable() -> None:
assert Source("hello").isparseable()
assert Source("if 1:\n pass").isparseable()
assert Source(" \nif 1:\n pass").isparseable()
assert not Source("if 1:\n").isparseable()
assert not Source(" \nif 1:\npass").isparseable()
assert not Source(chr(0)).isparseable()
class TestAccesses:
def setup_class(self) -> None:
self.source = Source(
"""\
def f(x):
pass
def g(x):
pass
"""
)
def test_getrange(self) -> None:
x = self.source[0:2]
assert x.isparseable()
assert len(x.lines) == 2
assert str(x) == "def f(x):\n pass"
def test_getrange_step_not_supported(self) -> None:
with pytest.raises(IndexError, match=r"step"):
self.source[::2]
def test_getline(self) -> None:
x = self.source[0]
assert x == "def f(x):"
def test_len(self) -> None:
assert len(self.source) == 4
def test_iter(self) -> None:
values = [x for x in self.source]
assert len(values) == 4
class TestSourceParsingAndCompiling:
def setup_class(self) -> None:
self.source = Source(
"""\
def f(x):
assert (x ==
3 +
4)
"""
).strip()
def test_compile(self) -> None:
co = _pytest._code.compile("x=3")
d = {} # type: Dict[str, Any]
exec(co, d)
assert d["x"] == 3
def test_compile_and_getsource_simple(self) -> None:
co = _pytest._code.compile("x=3")
exec(co)
source = _pytest._code.Source(co)
assert str(source) == "x=3"
def test_compile_and_getsource_through_same_function(self) -> None:
def gensource(source):
return _pytest._code.compile(source)
co1 = gensource(
"""
def f():
raise KeyError()
"""
)
co2 = gensource(
"""
def f():
raise ValueError()
"""
)
source1 = inspect.getsource(co1)
assert "KeyError" in source1
source2 = inspect.getsource(co2)
assert "ValueError" in source2
def test_getstatement(self) -> None:
# print str(self.source)
ass = str(self.source[1:])
for i in range(1, 4):
# print "trying start in line %r" % self.source[i]
s = self.source.getstatement(i)
# x = s.deindent()
assert str(s) == ass
def test_getstatementrange_triple_quoted(self) -> None:
# print str(self.source)
source = Source(
"""hello('''
''')"""
)
s = source.getstatement(0)
assert s == str(source)
s = source.getstatement(1)
assert s == str(source)
def test_getstatementrange_within_constructs(self) -> None:
source = Source(
"""\
try:
try:
raise ValueError
except SomeThing:
pass
finally:
42
"""
)
assert len(source) == 7
# check all lineno's that could occur in a traceback
# assert source.getstatementrange(0) == (0, 7)
# assert source.getstatementrange(1) == (1, 5)
assert source.getstatementrange(2) == (2, 3)
assert source.getstatementrange(3) == (3, 4)
assert source.getstatementrange(4) == (4, 5)
# assert source.getstatementrange(5) == (0, 7)
assert source.getstatementrange(6) == (6, 7)
def test_getstatementrange_bug(self) -> None:
source = Source(
"""\
try:
x = (
y +
z)
except:
pass
"""
)
assert len(source) == 6
assert source.getstatementrange(2) == (1, 4)
def test_getstatementrange_bug2(self) -> None:
source = Source(
"""\
assert (
33
==
[
X(3,
b=1, c=2
),
]
)
"""
)
assert len(source) == 9
assert source.getstatementrange(5) == (0, 9)
def test_getstatementrange_ast_issue58(self) -> None:
source = Source(
"""\
def test_some():
for a in [a for a in
CAUSE_ERROR]: pass
x = 3
"""
)
assert getstatement(2, source).lines == source.lines[2:3]
assert getstatement(3, source).lines == source.lines[3:4]
def test_getstatementrange_out_of_bounds_py3(self) -> None:
source = Source("if xxx:\n from .collections import something")
r = source.getstatementrange(1)
assert r == (1, 2)
def test_getstatementrange_with_syntaxerror_issue7(self) -> None:
source = Source(":")
pytest.raises(SyntaxError, lambda: source.getstatementrange(0))
def test_compile_to_ast(self) -> None:
source = Source("x = 4")
mod = source.compile(flag=ast.PyCF_ONLY_AST)
assert isinstance(mod, ast.Module)
compile(mod, "<filename>", "exec")
def test_compile_and_getsource(self) -> None:
co = self.source.compile()
exec(co, globals())
f(7) # type: ignore
excinfo = pytest.raises(AssertionError, f, 6) # type: ignore
assert excinfo is not None
frame = excinfo.traceback[-1].frame
assert isinstance(frame.code.fullsource, Source)
stmt = frame.code.fullsource.getstatement(frame.lineno)
assert str(stmt).strip().startswith("assert")
@pytest.mark.parametrize("name", ["", None, "my"])
def test_compilefuncs_and_path_sanity(self, name: Optional[str]) -> None:
def check(comp, name) -> None:
co = comp(self.source, name)
if not name:
expected = "codegen %s:%d>" % (mypath, mylineno + 2 + 2) # type: ignore
else:
expected = "codegen %r %s:%d>" % (name, mypath, mylineno + 2 + 2) # type: ignore
fn = co.co_filename
assert fn.endswith(expected)
mycode = _pytest._code.Code(self.test_compilefuncs_and_path_sanity)
mylineno = mycode.firstlineno
mypath = mycode.path
for comp in _pytest._code.compile, _pytest._code.Source.compile:
check(comp, name)
def test_offsetless_synerr(self):
pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode="eval")
def test_getstartingblock_singleline() -> None:
class A:
def __init__(self, *args) -> None:
frame = sys._getframe(1)
self.source = _pytest._code.Frame(frame).statement
x = A("x", "y")
values = [i for i in x.source.lines if i.strip()]
assert len(values) == 1
def test_getline_finally() -> None:
def c() -> None:
pass
with pytest.raises(TypeError) as excinfo:
teardown = None
try:
c(1) # type: ignore
finally:
if teardown:
teardown()
source = excinfo.traceback[-1].statement
assert str(source).strip() == "c(1) # type: ignore"
def test_getfuncsource_dynamic() -> None:
source = """
def f():
raise ValueError
def g(): pass
"""
co = _pytest._code.compile(source)
exec(co, globals())
f_source = _pytest._code.Source(f) # type: ignore
g_source = _pytest._code.Source(g) # type: ignore
assert str(f_source).strip() == "def f():\n raise ValueError"
assert str(g_source).strip() == "def g(): pass"
def test_getfuncsource_with_multine_string() -> None:
def f():
c = """while True:
pass
"""
expected = '''\
def f():
c = """while True:
pass
"""
'''
assert str(_pytest._code.Source(f)) == expected.rstrip()
def test_deindent() -> None:
from _pytest._code.source import deindent as deindent
assert deindent(["\tfoo", "\tbar"]) == ["foo", "bar"]
source = """\
def f():
def g():
pass
"""
lines = deindent(source.splitlines())
assert lines == ["def f():", " def g():", " pass"]
def test_source_of_class_at_eof_without_newline(tmpdir, _sys_snapshot) -> None:
# this test fails because the implicit inspect.getsource(A) below
# does not return the "x = 1" last line.
source = _pytest._code.Source(
"""
class A(object):
def method(self):
x = 1
"""
)
path = tmpdir.join("a.py")
path.write(source)
s2 = _pytest._code.Source(tmpdir.join("a.py").pyimport().A)
assert str(source).strip() == str(s2).strip()
if True:
def x():
pass
def test_getsource_fallback() -> None:
from _pytest._code.source import getsource
expected = """def x():
pass"""
src = getsource(x)
assert src == expected
def test_idem_compile_and_getsource() -> None:
from _pytest._code.source import getsource
expected = "def x(): pass"
co = _pytest._code.compile(expected)
src = getsource(co)
assert src == expected
def test_compile_ast() -> None:
# We don't necessarily want to support this.
# This test was added just for coverage.
stmt = ast.parse("def x(): pass")
co = _pytest._code.compile(stmt, filename="foo.py")
assert isinstance(co, CodeType)
def test_findsource_fallback() -> None:
from _pytest._code.source import findsource
src, lineno = findsource(x)
assert src is not None
assert "test_findsource_simple" in str(src)
assert src[lineno] == " def x():"
def test_findsource() -> None:
from _pytest._code.source import findsource
co = _pytest._code.compile(
"""if 1:
def x():
pass
"""
)
src, lineno = findsource(co)
assert src is not None
assert "if 1:" in str(src)
d = {} # type: Dict[str, Any]
eval(co, d)
src, lineno = findsource(d["x"])
assert src is not None
assert "if 1:" in str(src)
assert src[lineno] == " def x():"
def test_getfslineno() -> None:
from _pytest._code import getfslineno
def f(x) -> None:
pass
fspath, lineno = getfslineno(f)
assert isinstance(fspath, py.path.local)
assert fspath.basename == "test_source.py"
assert lineno == f.__code__.co_firstlineno - 1 # see findsource
class A:
pass
fspath, lineno = getfslineno(A)
_, A_lineno = inspect.findsource(A)
assert fspath.basename == "test_source.py"
assert lineno == A_lineno
assert getfslineno(3) == ("", -1)
class B:
pass
B.__name__ = "B2"
assert getfslineno(B)[1] == -1
def test_code_of_object_instance_with_call() -> None:
class A:
pass
pytest.raises(TypeError, lambda: _pytest._code.Source(A()))
class WithCall:
def __call__(self) -> None:
pass
code = _pytest._code.Code(WithCall())
assert "pass" in str(code.source())
class Hello:
def __call__(self) -> None:
pass
pytest.raises(TypeError, lambda: _pytest._code.Code(Hello))
def getstatement(lineno: int, source) -> Source:
from _pytest._code.source import getstatementrange_ast
src = _pytest._code.Source(source, deindent=False)
ast, start, end = getstatementrange_ast(lineno, src)
return src[start:end]
def test_oneline() -> None:
source = getstatement(0, "raise ValueError")
assert str(source) == "raise ValueError"
def test_comment_and_no_newline_at_end() -> None:
from _pytest._code.source import getstatementrange_ast
source = Source(
[
"def test_basic_complex():",
" assert 1 == 2",
"# vim: filetype=pyopencl:fdm=marker",
]
)
ast, start, end = getstatementrange_ast(1, source)
assert end == 2
def test_oneline_and_comment() -> None:
source = getstatement(0, "raise ValueError\n#hello")
assert str(source) == "raise ValueError"
def test_comments() -> None:
source = '''def test():
"comment 1"
x = 1
# comment 2
# comment 3
assert False
"""
comment 4
"""
'''
for line in range(2, 6):
assert str(getstatement(line, source)) == " x = 1"
if sys.version_info >= (3, 8) or hasattr(sys, "pypy_version_info"):
tqs_start = 8
else:
tqs_start = 10
assert str(getstatement(10, source)) == '"""'
for line in range(6, tqs_start):
assert str(getstatement(line, source)) == " assert False"
for line in range(tqs_start, 10):
assert str(getstatement(line, source)) == '"""\ncomment 4\n"""'
def test_comment_in_statement() -> None:
source = """test(foo=1,
# comment 1
bar=2)
"""
for line in range(1, 3):
assert (
str(getstatement(line, source))
== "test(foo=1,\n # comment 1\n bar=2)"
)
def test_single_line_else() -> None:
source = getstatement(1, "if False: 2\nelse: 3")
assert str(source) == "else: 3"
def test_single_line_finally() -> None:
source = getstatement(1, "try: 1\nfinally: 3")
assert str(source) == "finally: 3"
def test_issue55() -> None:
source = (
"def round_trip(dinp):\n assert 1 == dinp\n"
'def test_rt():\n round_trip("""\n""")\n'
)
s = getstatement(3, source)
assert str(s) == ' round_trip("""\n""")'
def test_multiline() -> None:
source = getstatement(
0,
"""\
raise ValueError(
23
)
x = 3
""",
)
assert str(source) == "raise ValueError(\n 23\n)"
class TestTry:
def setup_class(self) -> None:
self.source = """\
try:
raise ValueError
except Something:
raise IndexError(1)
else:
raise KeyError()
"""
def test_body(self) -> None:
source = getstatement(1, self.source)
assert str(source) == " raise ValueError"
def test_except_line(self) -> None:
source = getstatement(2, self.source)
assert str(source) == "except Something:"
def test_except_body(self) -> None:
source = getstatement(3, self.source)
assert str(source) == " raise IndexError(1)"
def test_else(self) -> None:
source = getstatement(5, self.source)
assert str(source) == " raise KeyError()"
class TestTryFinally:
def setup_class(self) -> None:
self.source = """\
try:
raise ValueError
finally:
raise IndexError(1)
"""
def test_body(self) -> None:
source = getstatement(1, self.source)
assert str(source) == " raise ValueError"
def test_finally(self) -> None:
source = getstatement(3, self.source)
assert str(source) == " raise IndexError(1)"
class TestIf:
def setup_class(self) -> None:
self.source = """\
if 1:
y = 3
elif False:
y = 5
else:
y = 7
"""
def test_body(self) -> None:
source = getstatement(1, self.source)
assert str(source) == " y = 3"
def test_elif_clause(self) -> None:
source = getstatement(2, self.source)
assert str(source) == "elif False:"
def test_elif(self) -> None:
source = getstatement(3, self.source)
assert str(source) == " y = 5"
def test_else(self) -> None:
source = getstatement(5, self.source)
assert str(source) == " y = 7"
def test_semicolon() -> None:
s = """\
hello ; pytest.skip()
"""
source = getstatement(0, s)
assert str(source) == s.strip()
def test_def_online() -> None:
s = """\
def func(): raise ValueError(42)
def something():
pass
"""
source = getstatement(0, s)
assert str(source) == "def func(): raise ValueError(42)"
def XXX_test_expression_multiline() -> None:
source = """\
something
'''
'''"""
result = getstatement(1, source)
assert str(result) == "'''\n'''"
def test_getstartingblock_multiline() -> None:
class A:
def __init__(self, *args):
frame = sys._getframe(1)
self.source = _pytest._code.Frame(frame).statement
# fmt: off
x = A('x',
'y'
,
'z')
# fmt: on
values = [i for i in x.source.lines if i.strip()]
assert len(values) == 4
|
from collections import namedtuple
Context = namedtuple('Context', ['query', 'json', 'headers', 'cookies'])
class BasePlugin:
"""
Base plugin for SpecTree plugin classes.
:param spectree: :class:`spectree.SpecTree` instance
"""
def __init__(self, spectree):
self.spectree = spectree
self.config = spectree.config
def register_route(self, app):
"""
:param app: backend framework application
register document API routes to application
"""
raise NotImplementedError
def validate(self, *args, **kwargs):
"""
validate the request and response
"""
raise NotImplementedError
def find_routes(self):
"""
find the routes from application
"""
raise NotImplementedError
def bypass(self, func, method):
"""
:param func: route function (endpoint)
:param method: HTTP method for this route function
bypass some routes that shouldn't be shown in document
"""
raise NotImplementedError
def parse_path(self, route):
"""
:param route: API routes
parse URI path to get the variables in path
"""
raise NotImplementedError
def parse_func(self, route):
"""
:param route: API routes
get the endpoint function from routes
"""
raise NotImplementedError
|
# -*- coding: utf-8 -*-
# pylint: disable=wildcard-import,redefined-builtin,unused-wildcard-import
from __future__ import absolute_import, division, print_function
from builtins import *
# pylint: enable=wildcard-import,redefined-builtin,unused-wildcard-import
import pytest
import pandas as pd
# pylint: disable=E0401
from genemap.mappers.util import drop_duplicates
# pylint: enable=E0401
@pytest.fixture()
def mapping():
"""Example mapping."""
return pd.DataFrame({
'from': ['a', 'b', 'c', 'c', 'd'],
'to': ['1', '1', '2', '3', '4']
})
# pylint: disable=R0201,W0621
class TestDropDuplicates(object):
"""Unit tests for the drop_duplicates function."""
# TODO: add mto test.
def test_none(self, mapping):
"""Test no dropping."""
deduped = drop_duplicates(mapping, how='none')
assert list(mapping.index) == list(deduped.index)
def test_otm(self, mapping):
"""Test dropping with from column."""
deduped = drop_duplicates(mapping, how='otm')
assert list(deduped['from']) == ['a', 'b', 'd']
def test_both(self, mapping):
"""Test dropping from both columns."""
deduped = drop_duplicates(mapping, how='both')
assert list(deduped['from']) == ['d']
def test_invalid_how(self, mapping):
"""Testing invalid how option."""
with pytest.raises(ValueError):
drop_duplicates(mapping, how='invalid')
|
import smart_imports
smart_imports.all()
class UseAbilityTasksTests(utils_testcase.TestCase):
def setUp(self):
super(UseAbilityTasksTests, self).setUp()
game_logic.create_test_map()
self.account = self.accounts_factory.create_account()
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(self.account)
self.hero = self.storage.accounts_to_heroes[self.account.id]
self.task = postponed_tasks.UseAbilityTask(processor_id=relations.ABILITY_TYPE.HELP.value,
hero_id=self.hero.id,
data={'hero_id': self.hero.id,
'transaction_id': None})
def test_create(self):
self.assertEqual(self.task.state, game_postponed_tasks.ComplexChangeTask.STATE.UNPROCESSED)
def test_serialization(self):
self.assertEqual(self.task.serialize(), postponed_tasks.UseAbilityTask.deserialize(self.task.serialize()).serialize())
def test_response_data(self):
self.assertEqual(self.task.processed_data, {'message': None})
def test_banned(self):
self.hero.ban_state_end_at = datetime.datetime.now() + datetime.timedelta(days=1)
heroes_logic.save_hero(self.hero)
self.assertEqual(self.task.process(postponed_tasks_helpers.FakePostpondTaskPrototype(), self.storage), POSTPONED_TASK_LOGIC_RESULT.ERROR)
self.assertEqual(self.task.state, game_postponed_tasks.ComplexChangeTask.STATE.BANNED)
def test_process_can_not_process(self):
with mock.patch('the_tale.game.abilities.deck.help.Help.use', lambda self, task, storage, pvp_balancer, highlevel: (game_postponed_tasks.ComplexChangeTask.RESULT.FAILED, None, ())):
self.assertEqual(self.task.process(postponed_tasks_helpers.FakePostpondTaskPrototype(), self.storage), POSTPONED_TASK_LOGIC_RESULT.ERROR)
self.assertEqual(self.task.state, game_postponed_tasks.ComplexChangeTask.STATE.CAN_NOT_PROCESS)
def test_process_success(self):
self.assertEqual(self.task.process(postponed_tasks_helpers.FakePostpondTaskPrototype(), self.storage), POSTPONED_TASK_LOGIC_RESULT.SUCCESS)
self.assertEqual(self.task.state, game_postponed_tasks.ComplexChangeTask.STATE.PROCESSED)
def test_process_success__has_transaction(self):
energy = game_tt_services.energy.cmd_balance(self.account.id)
status, transaction_id = game_tt_services.energy.cmd_change_balance(account_id=self.account.id,
type='test',
energy=1) # test, that changes will be applied on commit (but not on start)
self.task.data['transaction_id'] = transaction_id
self.assertEqual(game_tt_services.energy.cmd_balance(self.account.id), energy)
self.assertEqual(self.task.process(postponed_tasks_helpers.FakePostpondTaskPrototype(), self.storage), POSTPONED_TASK_LOGIC_RESULT.SUCCESS)
self.assertEqual(self.task.state, game_postponed_tasks.ComplexChangeTask.STATE.PROCESSED)
time.sleep(0.1)
self.assertEqual(game_tt_services.energy.cmd_balance(self.account.id), energy + 1)
def test_process_second_step_success(self):
with mock.patch('the_tale.game.abilities.deck.help.Help.use', lambda self, task, storage, pvp_balancer, highlevel: (game_postponed_tasks.ComplexChangeTask.RESULT.CONTINUE, game_postponed_tasks.ComplexChangeTask.STEP.HIGHLEVEL, ())):
self.assertEqual(self.task.process(postponed_tasks_helpers.FakePostpondTaskPrototype(), self.storage), POSTPONED_TASK_LOGIC_RESULT.CONTINUE)
self.assertTrue(self.task.step.is_HIGHLEVEL)
self.assertEqual(self.task.state, game_postponed_tasks.ComplexChangeTask.STATE.UNPROCESSED)
self.assertEqual(self.task.process(postponed_tasks_helpers.FakePostpondTaskPrototype(), self.storage), POSTPONED_TASK_LOGIC_RESULT.SUCCESS)
self.assertEqual(self.task.state, game_postponed_tasks.ComplexChangeTask.STATE.PROCESSED)
def test_process_second_step_error(self):
with mock.patch('the_tale.game.abilities.deck.help.Help.use', lambda self, task, storage, pvp_balancer, highlevel: (game_postponed_tasks.ComplexChangeTask.RESULT.CONTINUE, game_postponed_tasks.ComplexChangeTask.STEP.HIGHLEVEL, ())):
self.assertEqual(self.task.process(postponed_tasks_helpers.FakePostpondTaskPrototype(), self.storage), POSTPONED_TASK_LOGIC_RESULT.CONTINUE)
self.assertTrue(self.task.step.is_HIGHLEVEL)
self.assertEqual(self.task.state, game_postponed_tasks.ComplexChangeTask.STATE.UNPROCESSED)
with mock.patch('the_tale.game.abilities.deck.help.Help.use', lambda self, task, storage, pvp_balancer, highlevel: (game_postponed_tasks.ComplexChangeTask.RESULT.FAILED, None, ())):
self.assertEqual(self.task.process(postponed_tasks_helpers.FakePostpondTaskPrototype(), self.storage), POSTPONED_TASK_LOGIC_RESULT.ERROR)
self.assertEqual(self.task.state, game_postponed_tasks.ComplexChangeTask.STATE.CAN_NOT_PROCESS)
|
"""ACME protocol messages."""
import json
import josepy as jose
import six
from acme import challenges
from acme import errors
from acme import fields
from acme import jws
from acme import util
from acme.mixins import ResourceMixin
try:
from collections.abc import Hashable
except ImportError: # pragma: no cover
from collections import Hashable
OLD_ERROR_PREFIX = "urn:acme:error:"
ERROR_PREFIX = "urn:ietf:params:acme:error:"
ERROR_CODES = {
'accountDoesNotExist': 'The request specified an account that does not exist',
'alreadyRevoked': 'The request specified a certificate to be revoked that has' \
' already been revoked',
'badCSR': 'The CSR is unacceptable (e.g., due to a short key)',
'badNonce': 'The client sent an unacceptable anti-replay nonce',
'badPublicKey': 'The JWS was signed by a public key the server does not support',
'badRevocationReason': 'The revocation reason provided is not allowed by the server',
'badSignatureAlgorithm': 'The JWS was signed with an algorithm the server does not support',
'caa': 'Certification Authority Authorization (CAA) records forbid the CA from issuing' \
' a certificate',
'compound': 'Specific error conditions are indicated in the "subproblems" array',
'connection': ('The server could not connect to the client to verify the'
' domain'),
'dns': 'There was a problem with a DNS query during identifier validation',
'dnssec': 'The server could not validate a DNSSEC signed domain',
'incorrectResponse': 'Response received didn\'t match the challenge\'s requirements',
# deprecate invalidEmail
'invalidEmail': 'The provided email for a registration was invalid',
'invalidContact': 'The provided contact URI was invalid',
'malformed': 'The request message was malformed',
'rejectedIdentifier': 'The server will not issue certificates for the identifier',
'orderNotReady': 'The request attempted to finalize an order that is not ready to be finalized',
'rateLimited': 'There were too many requests of a given type',
'serverInternal': 'The server experienced an internal error',
'tls': 'The server experienced a TLS error during domain verification',
'unauthorized': 'The client lacks sufficient authorization',
'unsupportedContact': 'A contact URL for an account used an unsupported protocol scheme',
'unknownHost': 'The server could not resolve a domain name',
'unsupportedIdentifier': 'An identifier is of an unsupported type',
'externalAccountRequired': 'The server requires external account binding',
}
ERROR_TYPE_DESCRIPTIONS = dict(
(ERROR_PREFIX + name, desc) for name, desc in ERROR_CODES.items())
ERROR_TYPE_DESCRIPTIONS.update(dict( # add errors with old prefix, deprecate me
(OLD_ERROR_PREFIX + name, desc) for name, desc in ERROR_CODES.items()))
def is_acme_error(err):
"""Check if argument is an ACME error."""
if isinstance(err, Error) and (err.typ is not None):
return (ERROR_PREFIX in err.typ) or (OLD_ERROR_PREFIX in err.typ)
return False
@six.python_2_unicode_compatible
class Error(jose.JSONObjectWithFields, errors.Error):
"""ACME error.
https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00
:ivar unicode typ:
:ivar unicode title:
:ivar unicode detail:
"""
typ = jose.Field('type', omitempty=True, default='about:blank')
title = jose.Field('title', omitempty=True)
detail = jose.Field('detail', omitempty=True)
@classmethod
def with_code(cls, code, **kwargs):
"""Create an Error instance with an ACME Error code.
:unicode code: An ACME error code, like 'dnssec'.
:kwargs: kwargs to pass to Error.
"""
if code not in ERROR_CODES:
raise ValueError("The supplied code: %s is not a known ACME error"
" code" % code)
typ = ERROR_PREFIX + code
return cls(typ=typ, **kwargs)
@property
def description(self):
"""Hardcoded error description based on its type.
:returns: Description if standard ACME error or ``None``.
:rtype: unicode
"""
return ERROR_TYPE_DESCRIPTIONS.get(self.typ)
@property
def code(self):
"""ACME error code.
Basically self.typ without the ERROR_PREFIX.
:returns: error code if standard ACME code or ``None``.
:rtype: unicode
"""
code = str(self.typ).split(':')[-1]
if code in ERROR_CODES:
return code
return None
def __str__(self):
return b' :: '.join(
part.encode('ascii', 'backslashreplace') for part in
(self.typ, self.description, self.detail, self.title)
if part is not None).decode()
class _Constant(jose.JSONDeSerializable, Hashable): # type: ignore
"""ACME constant."""
__slots__ = ('name',)
POSSIBLE_NAMES = NotImplemented
def __init__(self, name):
super(_Constant, self).__init__()
self.POSSIBLE_NAMES[name] = self # pylint: disable=unsupported-assignment-operation
self.name = name
def to_partial_json(self):
return self.name
@classmethod
def from_json(cls, jobj):
if jobj not in cls.POSSIBLE_NAMES: # pylint: disable=unsupported-membership-test
raise jose.DeserializationError(
'{0} not recognized'.format(cls.__name__))
return cls.POSSIBLE_NAMES[jobj]
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self.name)
def __eq__(self, other):
return isinstance(other, type(self)) and other.name == self.name
def __hash__(self):
return hash((self.__class__, self.name))
def __ne__(self, other):
return not self == other
class Status(_Constant):
"""ACME "status" field."""
POSSIBLE_NAMES = {} # type: dict
STATUS_UNKNOWN = Status('unknown')
STATUS_PENDING = Status('pending')
STATUS_PROCESSING = Status('processing')
STATUS_VALID = Status('valid')
STATUS_INVALID = Status('invalid')
STATUS_REVOKED = Status('revoked')
STATUS_READY = Status('ready')
STATUS_DEACTIVATED = Status('deactivated')
class IdentifierType(_Constant):
"""ACME identifier type."""
POSSIBLE_NAMES = {} # type: dict
IDENTIFIER_FQDN = IdentifierType('dns') # IdentifierDNS in Boulder
class Identifier(jose.JSONObjectWithFields):
"""ACME identifier.
:ivar IdentifierType typ:
:ivar unicode value:
"""
typ = jose.Field('type', decoder=IdentifierType.from_json)
value = jose.Field('value')
class Directory(jose.JSONDeSerializable):
"""Directory."""
_REGISTERED_TYPES = {} # type: dict
class Meta(jose.JSONObjectWithFields):
"""Directory Meta."""
_terms_of_service = jose.Field('terms-of-service', omitempty=True)
_terms_of_service_v2 = jose.Field('termsOfService', omitempty=True)
website = jose.Field('website', omitempty=True)
caa_identities = jose.Field('caaIdentities', omitempty=True)
external_account_required = jose.Field('externalAccountRequired', omitempty=True)
def __init__(self, **kwargs):
kwargs = dict((self._internal_name(k), v) for k, v in kwargs.items())
super(Directory.Meta, self).__init__(**kwargs)
@property
def terms_of_service(self):
"""URL for the CA TOS"""
return self._terms_of_service or self._terms_of_service_v2
def __iter__(self):
# When iterating over fields, use the external name 'terms_of_service' instead of
# the internal '_terms_of_service'.
for name in super(Directory.Meta, self).__iter__():
yield name[1:] if name == '_terms_of_service' else name
def _internal_name(self, name):
return '_' + name if name == 'terms_of_service' else name
@classmethod
def _canon_key(cls, key):
return getattr(key, 'resource_type', key)
@classmethod
def register(cls, resource_body_cls):
"""Register resource."""
resource_type = resource_body_cls.resource_type
assert resource_type not in cls._REGISTERED_TYPES
cls._REGISTERED_TYPES[resource_type] = resource_body_cls
return resource_body_cls
def __init__(self, jobj):
canon_jobj = util.map_keys(jobj, self._canon_key)
# TODO: check that everything is an absolute URL; acme-spec is
# not clear on that
self._jobj = canon_jobj
def __getattr__(self, name):
try:
return self[name.replace('_', '-')]
except KeyError as error:
raise AttributeError(str(error))
def __getitem__(self, name):
try:
return self._jobj[self._canon_key(name)]
except KeyError:
raise KeyError('Directory field "' + self._canon_key(name) + '" not found')
def to_partial_json(self):
return self._jobj
@classmethod
def from_json(cls, jobj):
jobj['meta'] = cls.Meta.from_json(jobj.pop('meta', {}))
return cls(jobj)
class Resource(jose.JSONObjectWithFields):
"""ACME Resource.
:ivar acme.messages.ResourceBody body: Resource body.
"""
body = jose.Field('body')
class ResourceWithURI(Resource):
"""ACME Resource with URI.
:ivar unicode uri: Location of the resource.
"""
uri = jose.Field('uri') # no ChallengeResource.uri
class ResourceBody(jose.JSONObjectWithFields):
"""ACME Resource Body."""
class ExternalAccountBinding(object):
"""ACME External Account Binding"""
@classmethod
def from_data(cls, account_public_key, kid, hmac_key, directory):
"""Create External Account Binding Resource from contact details, kid and hmac."""
key_json = json.dumps(account_public_key.to_partial_json()).encode()
decoded_hmac_key = jose.b64.b64decode(hmac_key)
url = directory["newAccount"]
eab = jws.JWS.sign(key_json, jose.jwk.JWKOct(key=decoded_hmac_key),
jose.jwa.HS256, None,
url, kid)
return eab.to_partial_json()
class Registration(ResourceBody):
"""Registration Resource Body.
:ivar josepy.jwk.JWK key: Public key.
:ivar tuple contact: Contact information following ACME spec,
`tuple` of `unicode`.
:ivar unicode agreement:
"""
# on new-reg key server ignores 'key' and populates it based on
# JWS.signature.combined.jwk
key = jose.Field('key', omitempty=True, decoder=jose.JWK.from_json)
contact = jose.Field('contact', omitempty=True, default=())
agreement = jose.Field('agreement', omitempty=True)
status = jose.Field('status', omitempty=True)
terms_of_service_agreed = jose.Field('termsOfServiceAgreed', omitempty=True)
only_return_existing = jose.Field('onlyReturnExisting', omitempty=True)
external_account_binding = jose.Field('externalAccountBinding', omitempty=True)
phone_prefix = 'tel:'
email_prefix = 'mailto:'
@classmethod
def from_data(cls, phone=None, email=None, external_account_binding=None, **kwargs):
"""Create registration resource from contact details."""
details = list(kwargs.pop('contact', ()))
if phone is not None:
details.append(cls.phone_prefix + phone)
if email is not None:
details.extend([cls.email_prefix + mail for mail in email.split(',')])
kwargs['contact'] = tuple(details)
if external_account_binding:
kwargs['external_account_binding'] = external_account_binding
return cls(**kwargs)
def _filter_contact(self, prefix):
return tuple(
detail[len(prefix):] for detail in self.contact # pylint: disable=not-an-iterable
if detail.startswith(prefix))
@property
def phones(self):
"""All phones found in the ``contact`` field."""
return self._filter_contact(self.phone_prefix)
@property
def emails(self):
"""All emails found in the ``contact`` field."""
return self._filter_contact(self.email_prefix)
@Directory.register
class NewRegistration(ResourceMixin, Registration):
"""New registration."""
resource_type = 'new-reg'
resource = fields.Resource(resource_type)
class UpdateRegistration(ResourceMixin, Registration):
"""Update registration."""
resource_type = 'reg'
resource = fields.Resource(resource_type)
class RegistrationResource(ResourceWithURI):
"""Registration Resource.
:ivar acme.messages.Registration body:
:ivar unicode new_authzr_uri: Deprecated. Do not use.
:ivar unicode terms_of_service: URL for the CA TOS.
"""
body = jose.Field('body', decoder=Registration.from_json)
new_authzr_uri = jose.Field('new_authzr_uri', omitempty=True)
terms_of_service = jose.Field('terms_of_service', omitempty=True)
class ChallengeBody(ResourceBody):
"""Challenge Resource Body.
.. todo::
Confusingly, this has a similar name to `.challenges.Challenge`,
as well as `.achallenges.AnnotatedChallenge`. Please use names
such as ``challb`` to distinguish instances of this class from
``achall``.
:ivar acme.challenges.Challenge: Wrapped challenge.
Conveniently, all challenge fields are proxied, i.e. you can
call ``challb.x`` to get ``challb.chall.x`` contents.
:ivar acme.messages.Status status:
:ivar datetime.datetime validated:
:ivar messages.Error error:
"""
__slots__ = ('chall',)
# ACMEv1 has a "uri" field in challenges. ACMEv2 has a "url" field. This
# challenge object supports either one, but should be accessed through the
# name "uri". In Client.answer_challenge, whichever one is set will be
# used.
_uri = jose.Field('uri', omitempty=True, default=None)
_url = jose.Field('url', omitempty=True, default=None)
status = jose.Field('status', decoder=Status.from_json,
omitempty=True, default=STATUS_PENDING)
validated = fields.RFC3339Field('validated', omitempty=True)
error = jose.Field('error', decoder=Error.from_json,
omitempty=True, default=None)
def __init__(self, **kwargs):
kwargs = dict((self._internal_name(k), v) for k, v in kwargs.items())
super(ChallengeBody, self).__init__(**kwargs)
def encode(self, name):
return super(ChallengeBody, self).encode(self._internal_name(name))
def to_partial_json(self):
jobj = super(ChallengeBody, self).to_partial_json()
jobj.update(self.chall.to_partial_json())
return jobj
@classmethod
def fields_from_json(cls, jobj):
jobj_fields = super(ChallengeBody, cls).fields_from_json(jobj)
jobj_fields['chall'] = challenges.Challenge.from_json(jobj)
return jobj_fields
@property
def uri(self):
"""The URL of this challenge."""
return self._url or self._uri
def __getattr__(self, name):
return getattr(self.chall, name)
def __iter__(self):
# When iterating over fields, use the external name 'uri' instead of
# the internal '_uri'.
for name in super(ChallengeBody, self).__iter__():
yield name[1:] if name == '_uri' else name
def _internal_name(self, name):
return '_' + name if name == 'uri' else name
class ChallengeResource(Resource):
"""Challenge Resource.
:ivar acme.messages.ChallengeBody body:
:ivar unicode authzr_uri: URI found in the 'up' ``Link`` header.
"""
body = jose.Field('body', decoder=ChallengeBody.from_json)
authzr_uri = jose.Field('authzr_uri')
@property
def uri(self):
"""The URL of the challenge body."""
return self.body.uri
class Authorization(ResourceBody):
"""Authorization Resource Body.
:ivar acme.messages.Identifier identifier:
:ivar list challenges: `list` of `.ChallengeBody`
:ivar tuple combinations: Challenge combinations (`tuple` of `tuple`
of `int`, as opposed to `list` of `list` from the spec).
:ivar acme.messages.Status status:
:ivar datetime.datetime expires:
"""
identifier = jose.Field('identifier', decoder=Identifier.from_json, omitempty=True)
challenges = jose.Field('challenges', omitempty=True)
combinations = jose.Field('combinations', omitempty=True)
status = jose.Field('status', omitempty=True, decoder=Status.from_json)
# TODO: 'expires' is allowed for Authorization Resources in
# general, but for Key Authorization '[t]he "expires" field MUST
# be absent'... then acme-spec gives example with 'expires'
# present... That's confusing!
expires = fields.RFC3339Field('expires', omitempty=True)
wildcard = jose.Field('wildcard', omitempty=True)
@challenges.decoder
def challenges(value): # pylint: disable=no-self-argument,missing-function-docstring
return tuple(ChallengeBody.from_json(chall) for chall in value)
@property
def resolved_combinations(self):
"""Combinations with challenges instead of indices."""
return tuple(tuple(self.challenges[idx] for idx in combo)
for combo in self.combinations) # pylint: disable=not-an-iterable
@Directory.register
class NewAuthorization(ResourceMixin, Authorization):
"""New authorization."""
resource_type = 'new-authz'
resource = fields.Resource(resource_type)
class UpdateAuthorization(ResourceMixin, Authorization):
"""Update authorization."""
resource_type = 'authz'
resource = fields.Resource(resource_type)
class AuthorizationResource(ResourceWithURI):
"""Authorization Resource.
:ivar acme.messages.Authorization body:
:ivar unicode new_cert_uri: Deprecated. Do not use.
"""
body = jose.Field('body', decoder=Authorization.from_json)
new_cert_uri = jose.Field('new_cert_uri', omitempty=True)
@Directory.register
class CertificateRequest(ResourceMixin, jose.JSONObjectWithFields):
"""ACME new-cert request.
:ivar josepy.util.ComparableX509 csr:
`OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`
"""
resource_type = 'new-cert'
resource = fields.Resource(resource_type)
csr = jose.Field('csr', decoder=jose.decode_csr, encoder=jose.encode_csr)
class CertificateResource(ResourceWithURI):
"""Certificate Resource.
:ivar josepy.util.ComparableX509 body:
`OpenSSL.crypto.X509` wrapped in `.ComparableX509`
:ivar unicode cert_chain_uri: URI found in the 'up' ``Link`` header
:ivar tuple authzrs: `tuple` of `AuthorizationResource`.
"""
cert_chain_uri = jose.Field('cert_chain_uri')
authzrs = jose.Field('authzrs')
@Directory.register
class Revocation(ResourceMixin, jose.JSONObjectWithFields):
"""Revocation message.
:ivar .ComparableX509 certificate: `OpenSSL.crypto.X509` wrapped in
`.ComparableX509`
"""
resource_type = 'revoke-cert'
resource = fields.Resource(resource_type)
certificate = jose.Field(
'certificate', decoder=jose.decode_cert, encoder=jose.encode_cert)
reason = jose.Field('reason')
class Order(ResourceBody):
"""Order Resource Body.
:ivar identifiers: List of identifiers for the certificate.
:vartype identifiers: `list` of `.Identifier`
:ivar acme.messages.Status status:
:ivar authorizations: URLs of authorizations.
:vartype authorizations: `list` of `str`
:ivar str certificate: URL to download certificate as a fullchain PEM.
:ivar str finalize: URL to POST to to request issuance once all
authorizations have "valid" status.
:ivar datetime.datetime expires: When the order expires.
:ivar .Error error: Any error that occurred during finalization, if applicable.
"""
identifiers = jose.Field('identifiers', omitempty=True)
status = jose.Field('status', decoder=Status.from_json,
omitempty=True)
authorizations = jose.Field('authorizations', omitempty=True)
certificate = jose.Field('certificate', omitempty=True)
finalize = jose.Field('finalize', omitempty=True)
expires = fields.RFC3339Field('expires', omitempty=True)
error = jose.Field('error', omitempty=True, decoder=Error.from_json)
@identifiers.decoder
def identifiers(value): # pylint: disable=no-self-argument,missing-function-docstring
return tuple(Identifier.from_json(identifier) for identifier in value)
class OrderResource(ResourceWithURI):
"""Order Resource.
:ivar acme.messages.Order body:
:ivar str csr_pem: The CSR this Order will be finalized with.
:ivar authorizations: Fully-fetched AuthorizationResource objects.
:vartype authorizations: `list` of `acme.messages.AuthorizationResource`
:ivar str fullchain_pem: The fetched contents of the certificate URL
produced once the order was finalized, if it's present.
:ivar alternative_fullchains_pem: The fetched contents of alternative certificate
chain URLs produced once the order was finalized, if present and requested during
finalization.
:vartype alternative_fullchains_pem: `list` of `str`
"""
body = jose.Field('body', decoder=Order.from_json)
csr_pem = jose.Field('csr_pem', omitempty=True)
authorizations = jose.Field('authorizations')
fullchain_pem = jose.Field('fullchain_pem', omitempty=True)
alternative_fullchains_pem = jose.Field('alternative_fullchains_pem', omitempty=True)
@Directory.register
class NewOrder(Order):
"""New order."""
resource_type = 'new-order'
|
#!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Delete all config data for model Cisco-IOS-XR-ip-rsvp-cfg.
usage: gn-delete-xr-ip-rsvp-cfg-10-ydk.py [-h] [-v] device
positional arguments:
device gNMI device (http://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.path import Repository
from ydk.services import CRUDService
from ydk.gnmi.providers import gNMIServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ip_rsvp_cfg \
as xr_ip_rsvp_cfg
import os
import logging
YDK_REPO_DIR = os.path.expanduser("~/.ydk/")
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="gNMI device (http://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create gNMI provider
repository = Repository(YDK_REPO_DIR+device.hostname)
provider = gNMIServiceProvider(repo=repository,
address=device.hostname,
port=device.port,
username=device.username,
password=device.password)
# create CRUD service
crud = CRUDService()
rsvp = xr_ip_rsvp_cfg.Rsvp() # create object
# delete configuration on gNMI device
# crud.delete(provider, rsvp)
exit()
# End of script
|
#!/usr/bin/python3.3
# -*- coding: utf-8 -*-
"""dllfinder
"""
from . import _wapi
import collections
from importlib.machinery import EXTENSION_SUFFIXES
import os
import sys
from . mf3 import ModuleFinder
from . import hooks
################################
# XXX Move these into _wapi???
_buf = _wapi.create_unicode_buffer(260)
_wapi.GetWindowsDirectoryW(_buf, len(_buf))
windir = _buf.value.lower()
_wapi.GetSystemDirectoryW(_buf, len(_buf))
sysdir = _buf.value.lower()
_wapi.GetModuleFileNameW(sys.dllhandle, _buf, len(_buf))
pydll = _buf.value.lower()
def SearchPath(imagename, path=None):
pfile = _wapi.c_wchar_p()
if _wapi.SearchPathW(path,
imagename,
None,
len(_buf),
_buf,
pfile):
return _buf.value
return None
################################
class DllFinder:
def __init__(self):
# _loaded_dlls contains ALL dlls that are bound, this includes
# the loaded extension modules; maps lower case basename to
# full pathname.
self._loaded_dlls = {}
# _dlls contains the full pathname of the dlls that
# are NOT considered system dlls.
#
# The pathname is mapped to a set of modules/dlls that require
# this dll. This allows to find out WHY a certain dll has to
# be included.
self._dlls = collections.defaultdict(set)
def _add_dll(self, path):
self._dlls[path]
self.import_extension(path)
def import_extension(self, pyd, callers=None):
"""Add an extension module and scan it for dependencies.
"""
todo = {pyd} # todo contains the dlls that we have to examine
while todo:
dll = todo.pop() # get one and check it
if dll in self._loaded_dlls:
continue
for dep_dll in self.bind_image(dll):
if dep_dll in self._loaded_dlls:
continue
dll_type = self.determine_dll_type(dep_dll)
if dll_type is None:
continue
## if dll_type == "EXT":
## print("EXT", dep_dll)
## elif dll_type == "DLL":
## print("DLL", dep_dll)
todo.add(dep_dll)
self._dlls[dep_dll].add(dll)
def bind_image(self, imagename):
"""Call BindImageEx and collect all dlls that are bound.
"""
# XXX Should cache results!
import platform
if platform.architecture()[0]=="32bit":
pth = ";".join([p for p in os.environ["PATH"].split(';') if not "intel64_win" in p])
elif platform.architecture()[0]=="64bit":
pth = ";".join([p for p in os.environ["PATH"].split(';') if not "ia32_win" in p])
else:
pth = os.environ["PATH"]
#import pdb;pdb.set_trace()
path = ";".join([os.path.dirname(imagename),
os.path.dirname(sys.executable),
pth])
result = set()
@_wapi.PIMAGEHLP_STATUS_ROUTINE
def status_routine(reason, img_name, dllname, va, parameter):
if reason == _wapi.BindImportModule: # 5
assert img_name.decode("mbcs") == imagename
# imagename binds to dllname
dllname = self.search_path(dllname.decode("mbcs"), path)
result.add(dllname)
return True
# BindImageEx uses the PATH environment variable to find
# dependend dlls; set it to our changed PATH:
old_path = os.environ["PATH"]
assert isinstance(path, str)
os.environ["PATH"] = path
self._loaded_dlls[os.path.basename(imagename).lower()] = imagename
_wapi.BindImageEx(_wapi.BIND_ALL_IMAGES
| _wapi.BIND_CACHE_IMPORT_DLLS
| _wapi.BIND_NO_UPDATE,
imagename.encode("mbcs"),
None,
##path.encode("mbcs"),
None,
status_routine)
# Be a good citizen and cleanup:
os.environ["PATH"] = old_path
return result
def determine_dll_type(self, imagename):
"""determine_dll_type must be called with a full pathname.
For any dll in the Windows or System directory or any
subdirectory thereof return None, except when the dll binds to
or IS the current python dll.
Return "DLL" when the image binds to the python dll, return
None when the image is in the windows or system directory,
return "EXT" otherwise.
"""
fnm = imagename.lower()
if fnm == pydll.lower():
return "DLL"
deps = self.bind_image(imagename)
if pydll in [d.lower() for d in deps]:
return "EXT"
if fnm.startswith(windir + os.sep) or fnm.startswith(sysdir + os.sep):
return None
return "DLL"
def search_path(self, imagename, path):
"""Find an image (exe or dll) on the PATH."""
if imagename.lower() in self._loaded_dlls:
return self._loaded_dlls[imagename.lower()]
# SxS files (like msvcr90.dll or msvcr100.dll) are only found in
# the SxS directory when the PATH is NULL.
if path is not None:
found = SearchPath(imagename)
if found is not None:
return found
return SearchPath(imagename, path)
def all_dlls(self):
"""Return a set containing all dlls that are needed,
except the python dll.
"""
return {dll for dll in self._dlls
if dll.lower() != pydll.lower()}
def extension_dlls(self):
"""Return a set containing only the extension dlls that are
needed.
"""
return {dll for dll in self._dlls
if "EXT" == self.determine_dll_type(dll)}
def real_dlls(self):
"""Return a set containing only the dlls that do not bind to
the python dll.
"""
return {dll for dll in self._dlls
if "DLL" == self.determine_dll_type(dll)
and dll.lower() != pydll.lower()}
################################################################
class Scanner(ModuleFinder):
"""A ModuleFinder subclass which allows to find binary
dependencies.
"""
def __init__(self, path=None, verbose=0, excludes=[], optimize=0):
super().__init__(path, verbose, excludes, optimize)
self.dllfinder = DllFinder()
self._data_directories = {}
self._data_files = {}
self._min_bundle = {}
self._import_package_later = []
self._safe_import_hook_later = []
self._boot_code = []
hooks.init_finder(self)
def add_bootcode(self, code):
"""Add some code that the exe will execute when bootstrapping."""
self._boot_code.append(code)
def set_min_bundle(self, name, value):
self._min_bundle[name] = value
def get_min_bundle(self):
return self._min_bundle
def hook(self, mod):
hookname = "hook_%s" % mod.__name__.replace(".", "_")
mth = getattr(hooks, hookname, None)
if mth:
mth(self, mod)
def _add_module(self, name, mod):
self.hook(mod)
super()._add_module(name, mod)
if hasattr(mod, "__file__") \
and mod.__file__.endswith(tuple(EXTENSION_SUFFIXES)):
callers = {self.modules[n]
for n in self._depgraph[name]
# self._depgraph can contain '-' entries!
if n in self.modules}
self._add_pyd(mod.__file__, callers)
def _add_pyd(self, name, callers):
self.dllfinder.import_extension(name, callers)
## def required_dlls(self):
## return self.dllfinder.required_dlls()
def all_dlls(self):
return self.dllfinder.all_dlls()
def real_dlls(self):
return self.dllfinder.real_dlls()
def extension_dlls(self):
return self.dllfinder.extension_dlls()
def add_datadirectory(self, name, path, recursive):
self._data_directories[name] = (path, recursive)
def add_datafile(self, name, path):
self._data_files[name] = path
def add_dll(self, path):
self.dllfinder._add_dll(path)
## def report_dlls(self):
## import pprint
## pprint.pprint(set(self.dllfinder.required_dlls()))
## pprint.pprint(set(self.dllfinder.system_dlls()))
def import_package_later(self, package):
# This method can be called from hooks to add additional
# packages. It is called BEFORE a module is imported
# completely!
self._import_package_later.append(package)
def safe_import_hook_later(self, name,
caller=None,
fromlist=(),
level=0):
# This method can be called from hooks to add additional
# packages. It is called BEFORE a module is imported
# completely!
self._safe_import_hook_later.append((name, caller, fromlist, level))
def finish(self):
while self._import_package_later:
pkg = self._import_package_later.pop()
self.import_package(pkg)
while self._safe_import_hook_later:
args = self._safe_import_hook_later.pop()
name, caller, fromlist, level = args
self.safe_import_hook(name,
caller=caller,
fromlist=fromlist,
level=level)
################################################################
if __name__ == "__main__":
# test script and usage example
#
# Should we introduce an 'offical' subclass of ModuleFinder
# and DllFinder?
scanner = Scanner()
scanner.import_package("numpy")
print(scanner.all_dlls())
|
import boshC3
#import boshExcel
import docSql
import os
import json
import sys
def psql_run(d_host, d_port, d_user, d_pass, d_db):
import getpass
import os
host = raw_input(">>> host [" + d_host + "] : " )
if host != "":
d_host = host
port = raw_input(">>> port [" + str(d_port) + "] : " )
if port != "":
d_port = port
username = raw_input(">>> username [" + d_user + "] : " )
if username != "":
d_user = username
password = getpass.getpass(">>> Password (hidden) : ")
if password != "":
d_pass = password
dbname = raw_input(">>> database name [" + d_db + "] : " )
if dbname != "":
d_db = dbname
psql_str = "PGPASSWORD=" + d_pass + " psql -h " + d_host + " -p " + str(d_port) + " -U " + d_user + " " + d_db
os.system(psql_str)
#def split_var(statement)
# firstword = statement.split()[0]
# if firstword.find("=") != -1:
# var_name = firstword.split('=')[0]
# command_str = firstword.split('=')[1:]
#def parse_associate_string_for_display(line):
# command = line.split(':=')
# if len(command) >= 2:
# assoc_name=command[0]
# assoc_part=command[1].split()
#
# input_attr = ""
# output_attr = ""
# source_data = ""
# related_attr = ""
# where_str = ""
# flag = 0
#
# for word_index in range(len(assoc_part)):
#
# if assoc_part[word_index] != 'associate' and assoc_part[word_index] != 'with' and assoc_part[word_index] != 'from' and assoc_part[word_index] != 'by' and assoc_part[word_index] != 'where':
# if assoc_part[word_index - 1] == 'associate':
# input_attr = assoc_part[word_index]
# elif assoc_part[word_index - 1] == 'with':
# output_attr = assoc_part[word_index]
# elif assoc_part[word_index - 1] == 'from':
# source_data = assoc_part[word_index]
# elif assoc_part[word_index - 1] == 'by':
# flag = 0
# related_attr = assoc_part[word_index]
# elif assoc_part[word_index - 1] == 'where':
# flag = 1
# where_str += assoc_part[word_index] + " "
# else:
# if flag == 0:
# related_attr += assoc_part[word_index] + " "
# else:
# where_str += assoc_part[word_index] + " "
# print "\nCounting the occurrences of \"" + related_attr + "\" among all \"" + input_attr + "\" X \"" + output_attr + "\" combinations from source \"" + source_data + "\""
# if where_str != "":
# print " with the filter " + where_str
# else:
# print "Syntax error: use ':=' to assign the associate name"
def print_help_string(func_name):
switch_dict = {
'sql_select': docSql.Select,
'sql_create': docSql.Create,
'sql_insert': docSql.Insert,
'sql_update': docSql.Update,
'sql_copy': docSql.Copy,
'sql_load': docSql.Load,
'sql_show': docSql.Show,
'sql_desc': docSql.Desc,
'sql_starschema':docSql.Starschema,
'sql_syntaxout':docSql.Syntaxout,
'assoc_associate': docAssocAssociate,
'assoc_query': docAssocQuery,
'assoc_analyze': docAssocAnalyze,
'assoc_find': docFindAnalyze,
}
func = switch_dict[func_name]
result = func()
return result
def docFindAnalyze():
print '\n'.join(["find top 10 Product.brand by sum(Data) from sales"
,"find top 10 Product.brand, Customer.gender by sum(Data) from sales"
,"find Product.id by sum(Data) from sales"
,"find Product.brand, Customer.gender, sum(Data) from sales"
,"find Product.brand sum(Data) from sales"
,"find distinct Product.id from sales where Product.brand='Fanta'"
,"find distinct Product.brand, Customer.gender by Customer.gender from sales where Product.brandOwner='Pepsico'"
,"find top 5 Product.brand,Customer.gender, Date by Date from sales"
,"find id from Product where brandOwner='Pepsico'"
,"find name, brand from Product where brandOwner='Pepsico'"
])
def docAssocAnalyze():
print '\n'.join(["\tanalyze sum(fact_attribute)"
,"\tfrom tablename"
,"\tgroup by attribute1, attribute2, ..."
,"\n\tex."
,"\tanalyze sum(sales)"
,"\tfrom Fact.bt"
,"\twhere date >= '2013-01-01' AND date <= '2013-12-31' AND Product.category = 'electronics'"
,"\tgroup by Customer.gender, date.quarter"
])
def docAssocQuery():
print '\n'.join(["\t query <query attribute value> from <assoc_name> [by <post_processing1>, <post_processing2>, ...]"
,"\t\t post_processing:"
,"\t\t\tsort_asc : sort the result in ascending order"
,"\t\t\tsort_desc : sort the result in descending order"
,"\t\t\tlimit n: limit the size (n) of query result"
,"\t\t\ttop n: show the top n results"
,"\t\t\tnorm : normalized by the query attribute value's counts in the dataset"
,"\t\t\tthres n: use n as a threshold to filter results"
,"\t\t\tcosine : implementation of cosine similarity"
])
def docAssocAssociate():
print '\n'.join(["\t "
,"\t\tcreate association <association_name>(<query attribute> to <result attribute>) from <bt name> \n\t\t\tby <related attribute> [, <related attribute>,...] [where <where statement>]"
,"\t\t ex."
,"\t\t create association test(Product.brand to Product.brand) from sales by Customer.id"
])
def docSqlCopy():
print '\n'.join(["\tcopy table_name_in_database from \"data_source_string\""
,"\twhere condition"
,"\tlink column1=reference_attribute1 and column2=reference_attribute2 ..."
,"\tfact columni"
,"\tkey (column1, ...)"
,"\n\tex."
,"\tcopy sales from \"type=postgresql host=127.0.0.1 port=5432 user=test password=pass db=testdb\""
,"\twhere date > '2013-01-01'"
,"\tlink cid=Customer.id and pid=Product.id"
,"\tfact sales"
,"\n\tcopy Product from \"type=mysql host=127.0.0.1 port=3306 user=root password=test db=testdb\" key (id)"
])
def docSqlLoad():
print "\tload table_url from \"data_source_string\" \n\tby \"select_statment\"\n\tex.\n\tload sales.bt from \"type=postgresql host=127.0.0.1 port=5432 user=test password=pass db=testdb\" by 'SELECT \"cid\", \"pid\", \"date\", \"sales\" from \"sales\"'"
def docSqlUpdate():
print "\tUPDATE tablename SET column1=value1, column2=value2, ...\n\tWHERE filter condition\n\n\tex.\n\tUPDATE customber.bt SET state=Ohio WHERE name='Kelly George'"
def docSqlSelect():
print '\n'.join(["\tselect sum(fact_attribute)"
,"\tfrom tablename"
,"\tgroup by attribute1, attribute2, ..."
,"\n\tex."
,"\tselect sum(sales)"
,"\tfrom Fact.bt"
,"\twhere date >= '2013-01-01' AND date <= '2013-12-31' AND Product.category = 'electronics'"
,"\tgroup by Customer.gender, date.quarter"
,"\n"
,"\tselect distinct attr from tree"
])
def docSqlCreate():
print '\n'.join(["\tcreate table tablename (column1 data-type, [column2 data-type], ...)"
,"\n\tex."
,"\tcreate table log.bt (session_id INT64, url STRING)"
,"\tcreate table Fact.bt (Customer.id STRING, Product.id STRING,"
,"\tdate DATETIME64, fact sales FLOAT,"
,"\tdim (Customer Customer.bt, Product Product.bt))"
,"\n"
,"\tcreate tree bo_name from source_bt group by attr1,attr2,... where cond1,cond2,..."
,"\tcreate fulltree bo_name from source_bt group by attr1,attr2,... where cond1,cond2,..."
])
def docSqlInsert():
print '\n'.join(["\tinsert into tablename values (value1, value2, ...)"
,"\n\tex."
,"\ninsert into customber.bt values ('6', 'Lois Bennett', 'Norwegian', 'Maryland', 'Mydo', 'Female')"
,"\tinsert into Fact.bt values ('2731', '3083', '2013-01-03 02:09:42', 16.85),"
,"\t('4241', '2472', '2013-01-03 02:09:45', 21.05)"
])
def dump2Csv(result_table,outfile,line_count,status="append"):
outfile=outfile if outfile!=None else "dump_result.csv"
if ".csv" not in outfile:
outfile+=".csv"
import csv
writemode="w" if status=="init" else "a"
local_count=line_count
with open(outfile,writemode) as csvfile:
writer=csv.writer(csvfile)
for row in result_table:
writer.writerow([unicode(s).encode("utf-8") for s in row])
local_count+=1
sys.stdout.write("\r")
sys.stdout.write("dump "+str(local_count)+" lines")
sys.stdout.flush()
return outfile
def redirectFiles(result_table,outfile):
if ".html" in outfile:
outfile=boshC3.Default_output(result_table,outfile)
elif ".xlsx" in outfile:
template=outfile if os.path.exists(outfile) else None
boshExcel.Default_output(result_table,template=template,output=outfile)
elif ".csv" in outfile:
import csv
with open(outfile,"w") as csvfile:
writer=csv.writer(csvfile)
for row in result_table:
writer.writerow([unicode(s).encode("utf-8") for s in row])
else:
fd=open(outfile,"w")
jsonstr=json.dumps(result_table)
fd.write(jsonstr)
fd.close()
print "Write into file: "+os.path.abspath(outfile)
return outfile
def invokeFiles(outfile):
from subprocess import Popen
import platform
try:
# saverr=os.dup(2)
# os.close(2)
if "Darwin" in platform.system():
Popen(["open",outfile])
elif os.name=="posix":
Popen(["xdg-open",outfile])
elif os.name=="nt":
Popen(["open",outfile])
else:
print "Invoke function is not supported under your os environment."
finally:
# os.dup2(saverr,2)
pass
def postactionParser(command_str):
parsedict={"command":None,"out_command":None,"pipes_command":None,"ERR_msg":None,"doInvoke":False}
parsedict["command"]=command_str
splits=command_str.split(">>")
if len(splits)>=2:
if len(splits[1])==0:
parsedict["ERR_msg"]="*** FILENAME required after \">>\""
return parsedict
elif "|" in splits[1]:
parsedict["ERR_msg"]="*** Operations via PIPES should be put before \">>\""
return parsedict
elif "@" in splits[1]:
outfile=splits[1][splits[1].find("@")+1:].strip()
if len(outfile)==0:
parsedict["ERR_msg"]="*** FILENAME required after \">>@\""
return parsedict
parsedict["doInvoke"]=True
parsedict["out_command"]=outfile
else:
parsedict["out_command"]=splits[1].strip()
parsedict["command"]=splits[0].strip()
splits=parsedict["command"].split("|")
if len(splits)>=2:
if len(splits[1])==0:
parsedict["ERR_msg"]="*** PROGRAM/COMMAND required after \"|\""
return parsedict
strBegin=parsedict["command"].find("|")
parsedict["pipes_command"]=parsedict["command"][strBegin+1:]
parsedict["command"]=splits[0]
return parsedict
def runPipes(result_table,pipes_command):
from subprocess import Popen, PIPE
jsonstr=json.dumps(result_table)
command=pipes_command.split()
process=Popen(command,stdin=PIPE) #use STDIN to send data, could use linux build-in commands easily
process.stdin.write(jsonstr)
#command.append(jsonstr) # append data (jsonstr) as the last arguments
#process=Popen(command)
process.communicate()
|
import math
from typing import Mapping, List, Optional, Union, Callable, Text
import tensorflow as tf
from ... import InfoNode
from ....utils import keys
from ....utils import types as ts
class LocNode(InfoNode):
"""Manages a location in rectangular space encoded in binary
as a [D, ceil(lg2(L))]-shaped tensor where D is the number
of dimensions, and L is the length of the longest dimension.
Its latent is structured [D, ceil(lg2(L))]
During `top_down` biasing, `LocNode` properly interpolates the
location proportional to the difference in their rectangular
space representations.
TODO: make location use velocity and acceleration."""
def __init__(self,
grid_shape: ts.Shape):
self.grid_shape = tf.constant(grid_shape)
num_dimensions = self.grid_shape.shape[0]
max_length = tf.reduce_max(self.grid_shape)
self.coefs = 2. ** tf.range(max_length, dtype=tf.keras.backend.floatx())
# a metrix of successive powers of two:
# [[1, 2, 4, 8, ...] # for first grid spatial dimension
# [1, 2, 4, 8, ...] # for second grid spatial dimension
# .
# .
# .
# [1, 2, 4, 8, ...]] # for final grid spatial dimension
super(LocNode, self).__init__(
state_spec_extras=dict(),
parent_names=[],
latent_spec=tf.TensorSpec(self.grid_shape),
name='LocNode')
def bottom_up(self, states: Mapping[Text, ts.NestedTensor]) -> Mapping[Text, ts.NestedTensor]:
return states
def top_down(self, states: Mapping[Text, ts.NestedTensor]) -> Mapping[Text, ts.NestedTensor]:
energy, target = self.f_child(targets=states[self.name][keys.STATES.TARGET_LATENTS])
old_loc = self.get_real_loc(states)
target_loc = self.decode_loc(target)
beta = 0.5 + tf.exp(energy)
new_loc = (1. - beta) * old_loc + beta * target_loc
new_loc = tf.clip_by_value(new_loc, 0, self.grid_shape)
new_loc_base_2_encoded = self.encode_loc(new_loc)
states[self.name][keys.STATES.LATENT] = new_loc_base_2_encoded
return states
def get_real_loc(self, states) -> ts.Tensor:
return self.decode_loc(states[self.name][keys.STATES.LATENT])
def decode_loc(self, base_two_valued: ts.Tensor) -> ts.Tensor:
"""converts [..., N] base-2 encoded tensor into
[...] real-valued tensor.
NOTE: being `base-2 encoded` simply means the tensor's
values are multiplied by successive powers of two. The
actual values may be floating point."""
return base_two_valued @ self.coefs[:, tf.newaxis]
def encode_loc(self, real_valued: ts.Tensor) -> ts.Tensor:
"""converts a [...] real-valued tensor into the equivalent
[..., N] base-2 encoded tensor."""
raise NotImplementedError('I need to make a square wave function')
return tf.nn.sigmoid(math.pi * (2 * (real_valued[..., tf.newaxis] / self.grid_matrix[:, 0] - 1) - 1))
|
from mayan.apps.testing.tests.base import (
BaseTestCase, BaseTransactionTestCase, GenericViewTestCase,
GenericTransactionViewTestCase
)
from .mixins.document_mixins import DocumentTestMixin
class GenericDocumentTestCase(DocumentTestMixin, BaseTestCase):
"""Base test case when testing models or classes"""
class GenericTransactionDocumentTestCase(
DocumentTestMixin, BaseTransactionTestCase
):
"""Base test case when testing models or classes with transactions"""
class GenericDocumentViewTestCase(DocumentTestMixin, GenericViewTestCase):
"""Base test case when testing views"""
class GenericTransactionDocumentViewTestCase(
DocumentTestMixin, GenericTransactionViewTestCase
):
"""Base test case when testing views with transactions"""
|
#############################################################################
# Copyright (c) 2018, Johan Mabille, Sylvain Corlay and Loic Gouarin #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
version_info = (0, 0, 1)
__version__ = '.'.join(map(str, version_info))
|
#!/usr/bin/env python
from twisted.web import http
from twisted.internet import protocol
from twisted.internet import reactor, threads
from ConfigParser import ConfigParser
from nx_parser import signature_parser
import urllib
import pprint
import socket
import MySQLConnector
import MySQLdb
import getopt
import sys
import re
class InterceptHandler(http.Request):
def process(self):
if self.getHeader('Orig_args'):
args = {'GET' : self.getHeader('Orig_args')}
method = 'GET'
elif self.args:
args = {'POST': self.args}
method = 'POST'
else:
method = 'GET'
args = {}
args['Cookie'] = self.getHeader('Cookie')
args['Referer'] = self.getHeader('Referer')
sig = self.getHeader("naxsi_sig")
if sig is None:
print "no naxsi_sig header."
return
url = sig.split('&uri=')[1].split('&')[0]
fullstr = method + ' ' + url + ' ' + ','.join([x + ' : ' + str(args.get(x, 'No Value !')) for x in args.keys()])
threads.deferToThread(self.background, fullstr, sig)
self.finish()
return
def background(self, fullstr, sig):
self.db = MySQLConnector.MySQLConnector().connect()
if self.db is None:
raise ValueError("Cannot connect to db.")
self.cursor = self.db.cursor()
if self.cursor is None:
raise ValueError("Cannot connect to db.")
parser = signature_parser(self.cursor)
parser.sig_to_db(fullstr, sig)
self.db.close()
class InterceptProtocol(http.HTTPChannel):
requestFactory = InterceptHandler
class InterceptFactory(http.HTTPFactory):
protocol = InterceptProtocol
def usage():
print 'Usage: python nx_intercept [-h,--help] [-a,--add-monitoring ip:1.2.3.4|md5:af794f5e532d7a4fa59c49845af7947e] [-q,--quiet] [-l,--log-file /path/to/logfile]'
def add_monitoring(arg, conf_path):
l = arg.split('|')
ip = None
md5 = None
for i in l:
if i.startswith('ip:'):
ip = i[3:]
elif i.startswith('md5:'):
md5 = i[4:]
if md5 is not None and len(md5) != 32:
print 'md5 is not valid ! Nothing will be inserted in db !'
return
if ip is not None:
try:
socket.inet_aton(ip)
except socket.error:
print 'ip is not valid ! Nothing will be inserted in db !'
return
db = MySQLConnector.MySQLConnector(conf_path).connect()
cursor = db.cursor()
if md5 is not None and ip is not None:
cursor.execute("INSERT INTO http_monitor (peer_ip, md5) VALUES (%s, %s)", (ip, md5))
return
if md5 is not None:
cursor.execute("INSERT INTO http_monitor (md5) VALUES (%s)", (md5))
return
if ip is not None:
cursor.execute("INSERT INTO http_monitor (peer_ip) VALUES (%s)", (ip))
return
def fill_db(filename, conf_path):
fd = open(filename, 'r')
mysqlh = MySQLConnector.MySQLConnector(conf_path)
db = mysqlh.connect()
sig = ''
if db is None:
raise ValueError('Cannot connect to db')
cursor = db.cursor()
if cursor is None:
raise ValueError('Cannot connect to db')
if re.match("[a-z0-9]+$", mysqlh.dbname) == False:
print 'bad db name :)'
exit(-2)
cursor.execute("DROP DATABASE IF EXISTS %s;" % mysqlh.dbname)
cursor.execute("CREATE DATABASE %s;" % mysqlh.dbname)
db.select_db(mysqlh.dbname)
for line in fd:
fullstr = ''
if 'NAXSI_FMT' in line:
l = line.split(", ")
date = ' '.join(l[0].split()[:2])
sig = l[0].split('NAXSI_FMT:')[1][1:]
l = l[1:]
request_args = {}
for i in l:
s = i.split(':')
request_args[s[0]] = urllib.unquote(''.join(s[1:]))
# print 'args are ', request_args
if request_args:
fullstr = request_args['request'][2:-1] + ' Referer : ' + request_args.get('referrer', ' "None"')[2:-1].strip('"\n') + ',Cookie : ' + request_args.get('cookie', ' "None"')[2:-1]
if sig != '' and fullstr != '':
# print "adding %s (%s) " % (sig, fullstr)
parser = signature_parser(cursor)
parser.sig_to_db(fullstr, sig, date=date)
fd.close()
db.close()
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], 'c:ha:l:', ['conf-file', 'help', 'add-monitoring', 'log-file'])
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(42)
has_conf = False
conf_path = ''
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-a', '--add-monitoring'):
if has_conf is False:
print "Conf File must be specified first !"
exit(42)
add_monitoring(a, conf_path)
exit(42)
if o in ('-l', '--log-file'):
if has_conf is False:
print "Conf File must be specified first !"
exit(42)
print "Filling database with %s. ALL PREVIOUS CONTENT WILL BE DROPPED !!!!!"
fill_db(a, conf_path)
print "Done."
exit(42)
if o in ('-c', '--conf-file'):
has_conf = True
conf_path = a
if has_conf is False:
print 'Conf file is mandatory !'
exit(-42)
fd = open(conf_path, 'r')
conf = ConfigParser()
conf.readfp(fd)
try:
port = int(conf.get('nx_intercept', 'port'))
except:
print "No port in conf file ! Using default port (8080)"
port = 8080
fd.close()
reactor.listenTCP(port, InterceptFactory())
reactor.run()
|
#!/usr/bin/env python3
# Invoked by: Cloudformation custom actions
# Returns: Error or status message
#
# deletes the resources assocated with lambda like eni
import boto3
import http.client
import urllib
import json
import uuid
import threading
from time import sleep
def handler(event, context):
print(event)
response = {
'StackId': event['StackId'],
'RequestId': event['RequestId'],
'LogicalResourceId': event['LogicalResourceId'],
'Status': 'SUCCESS'
}
print('Performing Action %s' % (event['RequestType']))
if 'PhysicalResourceId' in event:
response['PhysicalResourceId'] = event['PhysicalResourceId']
else:
response['PhysicalResourceId'] = str(uuid.uuid4())
try:
if event['RequestType'] == 'Delete':
user_data = event['ResourceProperties']
# setup ec2 client
ec2client = boto3.client('ec2')
# loop through all ENI's in the Security Group send by CloudFormation
enis = ec2client.describe_network_interfaces(Filters=[{'Name': 'group-id','Values': [user_data['SecurityGroup']]}])
for eni in enis['NetworkInterfaces']:
print('ENI description : '+eni['Description'])
# We only care about ENI's created by Lambda
if eni['Description'].startswith('AWS Lambda VPC ENI: '):
# Check if the eni is still attached and attempt to detach
if 'Attachment' in eni.keys():
print('Detaching ENI...')
ec2client.detach_network_interface(AttachmentId=eni['Attachment']['AttachmentId'])
print(ec2client.describe_network_interfaces(NetworkInterfaceIds=[eni['NetworkInterfaceId']])['NetworkInterfaces'][0].keys())
# Max wait for 5 minutes
retry_attempts = 0
while (retry_attempts < 30) and 'Attachment' in ec2client.describe_network_interfaces(NetworkInterfaceIds=[eni['NetworkInterfaceId']])['NetworkInterfaces'][0].keys():
print('eni still attached, waiting 10 seconds...')
sleep(10)
retry_attempts += 1
# Delete the eni
print('Deleting ENI %s' % eni['NetworkInterfaceId'])
ec2client.delete_network_interface(NetworkInterfaceId=eni['NetworkInterfaceId'])
return send_response(event, response, status='SUCCESS', reason='Successfully deleted the resources associated with lambda')
else:
return send_response(event, response, status='SUCCESS', reason="Nothing to do for request type other than delete")
except Exception as e:
print(str(e))
return send_response(event, response, status='SUCCESS', reason="Failed to delete resources associated with lambda")
def send_response(request, response, status=None, reason=None):
if status is not None:
response['Status'] = status
if reason is not None:
response['Reason'] = reason
if 'ResponseURL' in request and request['ResponseURL']:
try:
url= urllib.parse.urlparse(request['ResponseURL'])
body = json.dumps(response)
https = http.client.HTTPSConnection(url.hostname)
https.request('PUT', url.path + '?' + url.query, body)
except Exception as e:
print(str(e))
print("Failed to send the response to the provdided URL")
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
from functools import partial
import numpy as np
import jax
import jax.numpy as jnp
from jax import core
from jax.util import unzip2
from jax import ad_util
from jax.tree_util import (register_pytree_node, tree_structure,
treedef_is_leaf, tree_flatten, tree_unflatten)
import jax.linear_util as lu
from jax.interpreters import xla
from jax.custom_derivatives import custom_jvp_call_jaxpr_p
from jax._src.lax import lax
from jax._src.lax import control_flow as lax_control_flow
from jax._src.lax import fft as lax_fft
def jet(fun, primals, series):
try:
order, = set(map(len, series))
except ValueError:
msg = "jet terms have inconsistent lengths for different arguments"
raise ValueError(msg) from None
# TODO(mattjj): consider supporting pytree inputs
for i, (x, terms) in enumerate(zip(primals, series)):
treedef = tree_structure(x)
if not treedef_is_leaf(treedef):
raise ValueError("primal value at position {} is not an array".format(i))
for j, t in enumerate(terms):
treedef = tree_structure(t)
if not treedef_is_leaf(treedef):
raise ValueError("term {} for argument {} is not an array".format(j, i))
@lu.transformation_with_aux
def flatten_fun_output(*args):
ans = yield args, {}
yield tree_flatten(ans)
f, out_tree = flatten_fun_output(lu.wrap_init(fun))
out_primals, out_terms = jet_fun(jet_subtrace(f), order).call_wrapped(primals, series)
return tree_unflatten(out_tree(), out_primals), tree_unflatten(out_tree(), out_terms)
@lu.transformation
def jet_fun(order, primals, series):
with core.new_main(JetTrace) as main:
main.order = order
out_primals, out_terms = yield (main, primals, series), {}
del main
out_terms = [[np.zeros_like(p)] * order if s is zero_series else s
for p, s in zip(out_primals, out_terms)]
yield out_primals, out_terms
@lu.transformation
def jet_subtrace(main, primals, series):
trace = JetTrace(main, core.cur_sublevel())
in_tracers = map(partial(JetTracer, trace), primals, series)
ans = yield in_tracers, {}
out_tracers = map(trace.full_raise, ans)
out_primals, out_terms = unzip2((t.primal, t.terms) for t in out_tracers)
yield out_primals, out_terms
@lu.transformation_with_aux
def traceable(in_tree_def, *primals_and_series):
primals_in, series_in = tree_unflatten(in_tree_def, primals_and_series)
primals_out, series_out = yield (primals_in, series_in), {}
out_flat, out_tree_def = tree_flatten((primals_out, series_out))
yield out_flat, out_tree_def
class JetTracer(core.Tracer):
__slots__ = ["primal", "terms"]
def __init__(self, trace, primal, terms):
assert type(terms) in (ZeroSeries, list, tuple)
self._trace = trace
self.primal = primal
self.terms = terms
@property
def aval(self):
return core.get_aval(self.primal)
def full_lower(self):
if self.terms is zero_series or all(t is zero_term for t in self.terms):
return core.full_lower(self.primal)
else:
return self
class JetTrace(core.Trace):
def pure(self, val):
return JetTracer(self, val, zero_series)
def lift(self, val):
return JetTracer(self, val, zero_series)
def sublift(self, val):
return JetTracer(self, val.primal, val.terms)
def process_primitive(self, primitive, tracers, params):
order = self.main.order # pytype: disable=attribute-error
primals_in, series_in = unzip2((t.primal, t.terms) for t in tracers)
series_in = [[zero_term] * order if s is zero_series else s
for s in series_in]
# TODO(mattjj): avoid always instantiating zeros
series_in = [[np.zeros(np.shape(x), dtype=np.result_type(x))
if t is zero_term else t for t in series]
for x, series in zip(primals_in, series_in)]
rule = jet_rules[primitive]
primal_out, terms_out = rule(primals_in, series_in, **params)
if not primitive.multiple_results:
return JetTracer(self, primal_out, terms_out)
else:
return [JetTracer(self, p, ts) for p, ts in zip(primal_out, terms_out)]
def process_call(self, call_primitive, f, tracers, params):
primals_in, series_in = unzip2((t.primal, t.terms) for t in tracers)
primals_and_series, in_tree_def = tree_flatten((primals_in, series_in))
f_jet, out_tree_def = traceable(jet_subtrace(f, self.main), in_tree_def)
update_params = call_param_updaters.get(call_primitive)
new_params = (update_params(params, len(primals_and_series))
if update_params else params)
result = call_primitive.bind(f_jet, *primals_and_series, **new_params)
primals_out, series_out = tree_unflatten(out_tree_def(), result)
return [JetTracer(self, p, ts) for p, ts in zip(primals_out, series_out)]
def post_process_call(self, call_primitive, out_tracers, params):
primals, series = unzip2((t.primal, t.terms) for t in out_tracers)
out, treedef = tree_flatten((primals, series))
del primals, series
main = self.main
def todo(x):
primals, series = tree_unflatten(treedef, x)
trace = JetTrace(main, core.cur_sublevel())
return map(partial(JetTracer, trace), primals, series)
return out, todo
def process_custom_jvp_call(self, primitive, fun, jvp, tracers):
# TODO(mattjj): don't just ignore custom jvp rules?
del primitive, jvp # Unused.
return fun.call_wrapped(*tracers)
def process_custom_vjp_call(self, primitive, fun, fwd, bwd, tracers, out_trees):
del primitive, fwd, bwd, out_trees # Unused.
return fun.call_wrapped(*tracers)
class ZeroTerm(object): pass
zero_term = ZeroTerm()
register_pytree_node(ZeroTerm, lambda z: ((), None), lambda _, xs: zero_term)
class ZeroSeries(object): pass
zero_series = ZeroSeries()
register_pytree_node(ZeroSeries, lambda z: ((), None), lambda _, xs: zero_series)
call_param_updaters = {}
def _xla_call_param_updater(params, num_inputs):
donated_invars = params['donated_invars']
if any(donated_invars):
raise NotImplementedError("donated_invars not supported with jet")
return dict(params, donated_invars=(False,) * num_inputs)
call_param_updaters[xla.xla_call_p] = _xla_call_param_updater
### rule definitions
jet_rules = {}
def defzero(prim):
jet_rules[prim] = partial(zero_prop, prim)
def zero_prop(prim, primals_in, series_in, **params):
primal_out = prim.bind(*primals_in, **params)
return primal_out, zero_series
defzero(lax.le_p)
defzero(lax.lt_p)
defzero(lax.gt_p)
defzero(lax.ge_p)
defzero(lax.eq_p)
defzero(lax.ne_p)
defzero(lax.not_p)
defzero(lax.and_p)
defzero(lax.or_p)
defzero(lax.xor_p)
defzero(lax.floor_p)
defzero(lax.ceil_p)
defzero(lax.round_p)
defzero(lax.sign_p)
defzero(ad_util.stop_gradient_p)
defzero(lax.is_finite_p)
defzero(lax.shift_left_p)
defzero(lax.shift_right_arithmetic_p)
defzero(lax.shift_right_logical_p)
defzero(lax.bitcast_convert_type_p)
def deflinear(prim):
jet_rules[prim] = partial(linear_prop, prim)
def linear_prop(prim, primals_in, series_in, **params):
primal_out = prim.bind(*primals_in, **params)
series_out = [prim.bind(*terms_in, **params) for terms_in in zip(*series_in)]
return primal_out, series_out
deflinear(lax.neg_p)
deflinear(lax.real_p)
deflinear(lax.complex_p)
deflinear(lax.conj_p)
deflinear(lax.imag_p)
deflinear(lax.add_p)
deflinear(lax.sub_p)
deflinear(lax.convert_element_type_p)
deflinear(lax.broadcast_p)
deflinear(lax.broadcast_in_dim_p)
deflinear(lax.concatenate_p)
deflinear(lax.pad_p)
deflinear(lax.reshape_p)
deflinear(lax.rev_p)
deflinear(lax.transpose_p)
deflinear(lax.slice_p)
deflinear(lax.reduce_sum_p)
deflinear(lax.reduce_window_sum_p)
deflinear(lax_fft.fft_p)
deflinear(xla.device_put_p)
def _cumulative_jet_rule(primals_in, series_in, *, axis: int, reverse: bool,
combine_fn: Callable):
# Irrespective of backend, we always use the parallel prefix scan
# implementation when differentiating because reduce_window is not
# arbitrarily differentiable.
return jet(partial(lax_control_flow.associative_scan, combine_fn, axis=axis,
reverse=reverse),
primals_in, series_in)
deflinear(lax_control_flow.cumsum_p)
jet_rules[lax_control_flow.cumprod_p] = partial(_cumulative_jet_rule,
combine_fn=lax.mul)
jet_rules[lax_control_flow.cummax_p] = partial(_cumulative_jet_rule,
combine_fn=lax.max)
jet_rules[lax_control_flow.cummin_p] = partial(_cumulative_jet_rule,
combine_fn=lax.min)
def def_deriv(prim, deriv):
"""
Define the jet rule for a primitive in terms of its first derivative.
"""
jet_rules[prim] = partial(deriv_prop, prim, deriv)
def deriv_prop(prim, deriv, primals_in, series_in):
x, = primals_in
series, = series_in
primal_out = prim.bind(x)
c0, cs = jet(deriv, primals_in, series_in)
c = [c0] + cs
u = [x] + series
v = [primal_out] + [None] * len(series)
for k in range(1, len(v)):
v[k] = fact(k-1) * sum(_scale(k, j) * c[k-j] * u[j] for j in range(1, k + 1))
primal_out, *series_out = v
return primal_out, series_out
def_deriv(lax.erf_p, lambda x: lax.mul(lax._const(x, 2. / np.sqrt(np.pi)), lax.exp(lax.neg(lax.square(x)))))
def def_comp(prim, comp):
"""
Define the jet rule for a primitive in terms of a composition of simpler primitives.
"""
jet_rules[prim] = partial(jet, comp)
def_comp(lax.expm1_p, lambda x: lax.exp(x) - 1)
def_comp(lax.log1p_p, lambda x: lax.log(1 + x))
def_comp(lax.sqrt_p, lambda x: x ** 0.5)
def_comp(lax.rsqrt_p, lambda x: x ** -0.5)
def_comp(lax.asinh_p, lambda x: lax.log(x + lax.sqrt(lax.square(x) + 1)))
def_comp(lax.acosh_p, lambda x: lax.log(x + lax.sqrt(lax.square(x) - 1)))
def_comp(lax.atanh_p, lambda x: 0.5 * lax.log(lax.div(1 + x, 1 - x)))
def_comp(lax.erfc_p, lambda x: 1 - lax.erf(x))
def_comp(lax.rem_p, lambda x, y: x - y * lax.floor(x / y))
def_comp(lax.clamp_p, lambda a, x, b: lax.min(lax.max(a, x), b))
def _erf_inv_rule(primals_in, series_in):
x, = primals_in
series, = series_in
u = [x] + series
primal_out = lax.erf_inv(x)
v = [primal_out] + [None] * len(series)
# derivative on co-domain for caching purposes
deriv_const = np.sqrt(np.pi) / 2.
deriv_y = lambda y: lax.mul(deriv_const, lax.exp(lax.square(y)))
# manually propagate through deriv_y since we don't have lazy evaluation of sensitivities
c = [deriv_y(primal_out)] + [None] * (len(series) - 1)
tmp_sq = [lax.square(v[0])] + [None] * (len(series) - 1)
tmp_exp = [lax.exp(tmp_sq[0])] + [None] * (len(series) - 1)
for k in range(1, len(series)):
# we know c[:k], we compute c[k]
# propagate c to get v
v[k] = fact(k-1) * sum(_scale(k, j) * c[k-j] * u[j] for j in range(1, k + 1))
# propagate v to get next c
# square
tmp_sq[k] = fact(k) * sum(_scale2(k, j) * v[k-j] * v[j] for j in range(k + 1))
# exp
tmp_exp[k] = fact(k-1) * sum(_scale(k, j) * tmp_exp[k-j] * tmp_sq[j] for j in range(1, k + 1))
# const
c[k] = deriv_const * tmp_exp[k]
# we can't, and don't need, to compute c[k+1], just need to get the last v[k]
k = len(series)
v[k] = fact(k-1) * sum(_scale(k, j) * c[k-j] * u[j] for j in range(1, k + 1))
primal_out, *series_out = v
return primal_out, series_out
jet_rules[lax.erf_inv_p] = _erf_inv_rule
### More complicated rules
def fact(n):
return lax.exp(lax.lgamma(n+1.))
def _scale(k, j):
return 1. / (fact(k - j) * fact(j - 1))
def _scale2(k, j):
return 1. / (fact(k - j) * fact(j))
def _exp_taylor(primals_in, series_in):
x, = primals_in
series, = series_in
u = [x] + series
v = [lax.exp(x)] + [None] * len(series)
for k in range(1,len(v)):
v[k] = fact(k-1) * sum([_scale(k, j)* v[k-j] * u[j] for j in range(1, k+1)])
primal_out, *series_out = v
return primal_out, series_out
jet_rules[lax.exp_p] = _exp_taylor
def _pow_taylor(primals_in, series_in):
u_, r_ = primals_in
x, series = jet(lambda x, y: lax.mul(y, lax.log(x)), primals_in, series_in)
u = [x] + series
v = [u_ ** r_] + [None] * len(series)
for k in range(1, len(v)):
v[k] = fact(k-1) * sum([_scale(k, j)* v[k-j] * u[j] for j in range(1, k+1)])
primal_out, *series_out = v
return primal_out, series_out
jet_rules[lax.pow_p] = _pow_taylor
def _integer_pow_taylor(primals_in, series_in, *, y):
if y == 0:
return jet(jnp.ones_like, primals_in, series_in)
elif y == 1:
return jet(lambda x: x, primals_in, series_in)
elif y == 2:
return jet(lambda x: x * x, primals_in, series_in)
x, = primals_in
series, = series_in
u = [x] + series
v = [lax.integer_pow(x, y)] + [None] * len(series)
for k in range(1, len(v)):
vu = sum(_scale(k, j) * v[k-j] * u[j] for j in range(1, k + 1))
uv = sum(_scale(k, j) * u[k-j] * v[j] for j in range(1, k))
v[k] = jnp.where(x == 0, 0, fact(k-1) * (y * vu - uv) / x)
primal_out, *series_out = v
return primal_out, series_out
jet_rules[lax.integer_pow_p] = _integer_pow_taylor
def _expit_taylor(primals_in, series_in):
x, = primals_in
series, = series_in
u = [x] + series
v = [jax.scipy.special.expit(x)] + [None] * len(series)
e = [v[0] * (1 - v[0])] + [None] * len(series) # terms for sigmoid' = sigmoid * (1 - sigmoid)
for k in range(1, len(v)):
v[k] = fact(k-1) * sum([_scale(k, j) * e[k-j] * u[j] for j in range(1, k+1)])
e[k] = (1 - v[0]) * v[k] - fact(k) * sum([_scale2(k, j)* v[j] * v[k-j] for j in range(1, k+1)])
primal_out, *series_out = v
return primal_out, series_out
def _tanh_taylor(primals_in, series_in):
x, = primals_in
series, = series_in
u = [2*x] + [2 * series_ for series_ in series]
primals_in, *series_in = u
primal_out, series_out = _expit_taylor((primals_in, ), (series_in, ))
series_out = [2 * series_ for series_ in series_out]
return 2 * primal_out - 1, series_out
jet_rules[lax.tanh_p] = _tanh_taylor
def _log_taylor(primals_in, series_in):
x, = primals_in
series, = series_in
u = [x] + series
v = [lax.log(x)] + [None] * len(series)
for k in range(1, len(v)):
conv = sum([_scale(k, j) * v[j] * u[k-j] for j in range(1, k)])
v[k] = (u[k] - fact(k - 1) * conv) / u[0]
primal_out, *series_out = v
return primal_out, series_out
jet_rules[lax.log_p] = _log_taylor
def _atan2_taylor(primals_in, series_in):
x, y = primals_in
primal_out = lax.atan2(x, y)
x, series = jet(lax.div, primals_in, series_in)
c0, cs = jet(lambda x: lax.div(1, 1 + lax.square(x)), (x, ), (series, ))
c = [c0] + cs
u = [x] + series
v = [primal_out] + [None] * len(series)
for k in range(1, len(v)):
v[k] = fact(k-1) * sum(_scale(k, j) * c[k-j] * u[j] for j in range(1, k + 1))
primal_out, *series_out = v
return primal_out, series_out
jet_rules[lax.atan2_p] = _atan2_taylor
def _div_taylor_rule(primals_in, series_in):
x, y = primals_in
x_terms, y_terms = series_in
u = [x] + x_terms
w = [y] + y_terms
v = [None] * len(u)
def scale(k, j): return 1. / (fact(k - j) * fact(j))
for k in range(0, len(v)):
conv = sum([scale(k, j) * v[j] * w[k-j] for j in range(0, k)])
v[k] = (u[k] - fact(k) * conv) / w[0]
primal_out, *series_out = v
return primal_out, series_out
jet_rules[lax.div_p] = _div_taylor_rule
def _sinusoidal_rule(sign, prims, primals_in, series_in):
x, = primals_in
series, = series_in
u = [x] + series
s, c = prims
s = [s(x)] + [None] * len(series)
c = [c(x)] + [None] * len(series)
for k in range(1, len(s)):
s[k] = fact(k-1) * sum(_scale(k, j) * u[j] * c[k-j] for j in range(1, k + 1))
c[k] = fact(k-1) * sum(_scale(k, j) * u[j] * s[k-j] for j in range(1, k + 1)) * sign
return (s[0], s[1:]), (c[0], c[1:])
def _get_ind(f, ind):
return lambda *args: f(*args)[ind]
jet_rules[lax.sin_p] = _get_ind(partial(_sinusoidal_rule, -1, (lax.sin, lax.cos)), 0)
jet_rules[lax.cos_p] = _get_ind(partial(_sinusoidal_rule, -1, (lax.sin, lax.cos)), 1)
jet_rules[lax.sinh_p] = _get_ind(partial(_sinusoidal_rule, 1, (lax.sinh, lax.cosh)), 0)
jet_rules[lax.cosh_p] = _get_ind(partial(_sinusoidal_rule, 1, (lax.sinh, lax.cosh)), 1)
def _bilinear_taylor_rule(prim, primals_in, series_in, **params):
x, y = primals_in
x_terms, y_terms = series_in
u = [x] + x_terms
w = [y] + y_terms
v = [None] * len(u)
op = partial(prim.bind, **params)
def scale(k, j): return 1. / (fact(k - j) * fact(j))
for k in range(0, len(v)):
v[k] = fact(k) * sum([scale(k, j) * op(u[j], w[k-j]) for j in range(0, k+1)])
primal_out, *series_out = v
return primal_out, series_out
jet_rules[lax.dot_general_p] = partial(_bilinear_taylor_rule, lax.dot_general_p)
jet_rules[lax.mul_p] = partial(_bilinear_taylor_rule, lax.mul_p)
jet_rules[lax.conv_general_dilated_p] = partial(_bilinear_taylor_rule, lax.conv_general_dilated_p)
def _gather_taylor_rule(primals_in, series_in, **params):
operand, start_indices = primals_in
gs, _ = series_in
primal_out = lax.gather_p.bind(operand, start_indices, **params)
series_out = [lax.gather_p.bind(g, start_indices, **params) for g in gs]
return primal_out, series_out
jet_rules[lax.gather_p] = _gather_taylor_rule
def _gen_reduce_choose_taylor_rule(chooser_fun):
def chooser_taylor_rule(primals_in, series_in, **params):
operand, = primals_in
gs, = series_in
primal_out = chooser_fun(operand, **params)
axes = params.pop("axes", None)
primal_dtype = gs[0].dtype
shape = [1 if i in axes else d for i, d in enumerate(operand.shape)]
location_indicators = lax.convert_element_type(
lax._eq_meet(operand, lax.reshape(primal_out, shape)), primal_dtype)
counts = lax._reduce_sum(location_indicators, axes)
def _reduce_chooser_taylor_rule(g):
return lax.div(lax._reduce_sum(lax.mul(g, location_indicators), axes), counts)
series_out = [_reduce_chooser_taylor_rule(g) for g in gs]
return primal_out, series_out
return chooser_taylor_rule
jet_rules[lax.reduce_max_p] = _gen_reduce_choose_taylor_rule(lax._reduce_max)
jet_rules[lax.reduce_min_p] = _gen_reduce_choose_taylor_rule(lax._reduce_min)
def _abs_taylor_rule(x, series_in, **params):
x, = x
zero = lax.full_like(x, 0, shape=())
primal_out = lax.abs_p.bind(x, **params)
negs = lax.select(lax.lt(x, zero), lax.full_like(x, -1), lax.full_like(x, 1.0))
fix_sign = lambda y: negs * y
series_out = [fix_sign(*terms_in, **params) for terms_in in zip(*series_in)]
return primal_out, series_out
jet_rules[lax.abs_p] = _abs_taylor_rule
def _select_taylor_rule(primal_in, series_in, **params):
b, x, y = primal_in
primal_out = lax.select_p.bind(b, x, y, **params)
sel = lambda _, x, y: lax.select(b, x, y)
series_out = [sel(*terms_in, **params) for terms_in in zip(*series_in)]
return primal_out, series_out
jet_rules[lax.select_p] = _select_taylor_rule
def _lax_max_taylor_rule(primal_in, series_in):
x, y = primal_in
xgy = x > y # greater than mask
xey = x == y # equal to mask
primal_out = lax.select(xgy, x, y)
def select_max_and_avg_eq(x_i, y_i):
"""Select x where x>y or average when x==y"""
max_i = lax.select(xgy, x_i, y_i)
max_i = lax.select(xey, (x_i + y_i)/2, max_i)
return max_i
series_out = [select_max_and_avg_eq(*terms_in) for terms_in in zip(*series_in)]
return primal_out, series_out
jet_rules[lax.max_p] = _lax_max_taylor_rule
def _lax_min_taylor_rule(primal_in, series_in):
x, y = primal_in
xgy = x < y # less than mask
xey = x == y # equal to mask
primal_out = lax.select(xgy, x, y)
def select_min_and_avg_eq(x_i, y_i):
"""Select x where x>y or average when x==y"""
min_i = lax.select(xgy, x_i, y_i)
min_i = lax.select(xey, (x_i + y_i)/2, min_i)
return min_i
series_out = [select_min_and_avg_eq(*terms_in) for terms_in in zip(*series_in)]
return primal_out, series_out
jet_rules[lax.min_p] = _lax_min_taylor_rule
def _custom_jvp_call_jaxpr_rule(primals_in, series_in, *, fun_jaxpr,
jvp_jaxpr_thunk):
# TODO(mattjj): do something better than ignoring custom jvp rules for jet?
del jvp_jaxpr_thunk
return jet(core.jaxpr_as_fun(fun_jaxpr), primals_in, series_in)
jet_rules[custom_jvp_call_jaxpr_p] = _custom_jvp_call_jaxpr_rule
deflinear(lax.tie_in_p)
|
#!/usr/bin/env python3
import argparse
def parse_file(filename):
return open(filename, "rb")
def wc(fp):
"""Returns newline, word, and byte counts for the file.
Args:
fp: file object opened in "rb" mode.
"""
lines, words, read_bytes = 0, 0, 0
is_word = False
block = fp.read1(4096)
while block != b'':
for i in range(len(block)):
c = block[i:i + 1]
if c == b'\n':
lines += 1
if c.isspace():
is_word = False
elif is_word is False:
words += 1
is_word = True
read_bytes += len(block)
block = fp.read1(4096)
return lines, words, read_bytes
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="print newline, word, and byte counts for each file")
parser.add_argument("filename", type=str)
args = parser.parse_args()
fp = parse_file(args.filename)
lines, words, read_bytes = wc(fp)
print("{}\t{}\t{}\t{}".format(lines, words, read_bytes, args.filename))
|
from pyitab.results.simulations import get_results, purge_dataframe, \
calculate_metrics, find_best_k, calculate_centroids, state_errors, \
dynamics_errors
from pyitab.results.base import filter_dataframe
from pyitab.results.dataframe import apply_function
from pyitab.utils import make_dict_product
import pandas as pd
import numpy as np
from tqdm import tqdm
path = "/media/robbis/DATA/fmri/c2b/derivatives/"
pipeline = "c2b+chieti"
data = get_results(path,
pipeline=pipeline,
field_list=['sample_slicer',
'n_clusters',
'n_components',
'ds.a.snr',
'ds.a.time',
'ds.a.states',
'fetch',
'algorithm'],
#filter={'algorithm':['KMeans']}
)
df = purge_dataframe(data)
conditions = {
'time': [1.5, 2., 2.5, 3.],
#'num': [str(j) for j in np.arange(1, 480)],
'snr': [3, 5, 10],
'algorithm': ['GaussianMixture',
'KMeans',
'AgglomerativeClustering',
'SpectralClustering',
'MiniBatchKMeans'],
'subject': [str(i) for i in np.arange(1, 26)]
}
combinations = make_dict_product(**conditions)
metrics = []
best_k = []
for options in combinations:
df_ = filter_dataframe(df, **options)
options = {k: v[0] for k, v in options.items()}
df_metric = calculate_metrics(df_, fixed_variables=options)
df_metric = df_metric.sort_values('k')
df_k = find_best_k(df_metric)
metrics.append(df_metric)
best_k.append(df_k)
df_metrics = pd.concat(metrics)
df_guess = pd.concat(best_k)
df_guess['hit'] = np.int_(df_guess['guess'].values == 6)
df_guess['abshit'] = np.abs(df_guess['guess'].values - 6)
df_great_mean = apply_function(df_guess, keys=['name', 'algorithm'], attr='abshit', fx=np.mean)
df_great_mean = apply_function(df_guess, keys=['name', 'algorithm'], attr='hit', fx=np.mean)
# Plot of metrics
df_mean = apply_function(df_guess, keys=['name'], attr='hit', fx=np.mean)
arg_sort = np.argsort(df_mean['hit'].values)[::-1]
for alg in np.unique(df_great_mean['algorithm']):
df_a = filter_dataframe(df_great_mean, algorithm=[alg])
values = df_a['hit'].values[arg_sort]
pl.plot(values, '-o')
pl.xticks(np.arange(len(values)), df_a['name'].values[arg_sort])
# State similarity
df = calculate_centroids(df)
df = state_errors(df)
df = dynamics_errors(df)
#################################
##### Plot of hits by algorithm #####
#df_guess = pd.read_csv("/home/robbis/Dropbox/simulation_guess.csv")
_, maskg = filter_dataframe(df_guess, return_mask=True, algorithm=['GaussianMixture'])
_, maski = filter_dataframe(df_guess, return_mask=True, name=['Index I'])
mask = np.logical_or(maskg, maski)
df_guess = df_guess.loc[np.logical_not(mask)]
df_great_mean = apply_function(df_guess, keys=['name', 'algorithm', 'snr', 'time'], attr='hit', fx=np.mean)
df_sort = apply_function(df_guess, keys=['name'], attr='hit', fx=np.mean).sort_values(by='hit')
encoding = dict(zip(df_sort['name'].values, np.arange(7)[::-1]))
df_great_mean['metric'] = [encoding[name] for name in df_great_mean['name'].values]
df_guess['metric'] = [encoding[name] for name in df_guess['name'].values]
xlabels = list(encoding.keys())[::-1]
xlabels = ['SIL', 'GEV', 'WGSS', "CV", "EV", "KL"]
#### Totale #####
palette = sns.color_palette("magma", 6)[::-1][::2]
f = sns.relplot(x="metric", y="hit", row="time", col="algorithm", hue="snr", data=df_great_mean,
kind='line', height=6, aspect=.75, palette=palette,
legend="full", marker='o', lw=3.5, markersize=15, markeredgecolor='none')
for ax in f.axes[-1]:
ax.set_xticks(np.arange(len(xlabels)))
ax.set_xticklabels(xlabels)
### Best metric ###
f = sns.relplot(x="metric", y="hit", hue="algorithm", data=df_guess,
kind='line', marker='o',
lw=3.5, markersize=15, markeredgecolor='none')
for ax in f.axes[-1]:
ax.set_xticks(1+np.arange(len(xlabels)))
ax.set_xticklabels(xlabels)
### Metric vs snr ###
palette = sns.color_palette("magma", 6)[::-1][::2]
f = sns.relplot(x="metric", y="hit", hue="snr", data=df_guess, palette=palette,
kind='line', marker='o',
lw=3.5, markersize=15, markeredgecolor='none')
for ax in f.axes[-1]:
ax.set_xticks(1+np.arange(len(xlabels)))
ax.set_xticklabels(xlabels)
### Metric vs time ###
df_sort = apply_function(df_guess, keys=['name', 'snr'], attr='hit', fx=np.mean).sort_values(by='hit')
df_sort['metric'] = [encoding[name] for name in df_sort['name'].values]
palette = sns.color_palette("magma", 8)[::-1][::2]
f = sns.relplot(x="metric", y="hit", hue="time", data=df_guess, palette=palette,
kind='line', marker='o', lw=3.5, markersize=15, markeredgecolor='none')
for ax in f.axes[-1]:
ax.set_xticks(1+np.arange(len(xlabels)))
ax.set_xticklabels(xlabels)
############################
fontsize = 15
params = {'axes.labelsize': fontsize-3,
'axes.titlesize': fontsize-2,
'font.size': fontsize,
'legend.fontsize':fontsize-3 ,
'xtick.labelsize':fontsize-2 ,
'ytick.labelsize':fontsize-2}
pl.rcParams.update(params)
full_df = df
palette = sns.color_palette("magma", 6)[::-1]
f = sns.relplot(x="time", y="dynamics_errors", hue="snr", col="algorithm",
height=5, aspect=.75, facet_kws=dict(sharex=False),
kind="line", legend="full", data=full_df, palette=palette[::2],
marker='o', lw=3.5, markersize=15, markeredgecolor='none')
f = sns.relplot(x="time", y="centroid_similarity", hue="snr", col="algorithm",
height=5, aspect=.75, facet_kws=dict(sharex=False),
kind="line", legend="full", data=full_df, palette=palette[::2],
marker='o', lw=3.5, markersize=15, markeredgecolor='none')
flatui = ["#9b59b6", "#3498db", "#e74c3c", "#34495e", "#2ecc71"]
palette = sns.color_palette(flatui)
f = sns.relplot(x="time", y="dynamics_errors", col="snr", hue="algorithm",
height=5, aspect=.75, facet_kws=dict(sharex=False),
kind="line", legend="full", data=full_df, palette=palette,
marker='o', lw=3.5, markersize=15, markeredgecolor='none')
f = sns.relplot(x="time", y="centroid_similarity", col="snr", hue="algorithm",
height=5, aspect=.75, facet_kws=dict(sharex=False),
kind="line", legend="full", data=full_df, palette=palette,
marker='o', lw=3.5, markersize=15, markeredgecolor='none')
##############################################à
f = sns.relplot(x="algorithm", y="dynamics_errors",
height=5, aspect=.95, facet_kws=dict(sharex=False),
kind="line", data=full_df,
marker='o', lw=3.5, markersize=15, markeredgecolor='none')
f = sns.relplot(x="algorithm", y="centroid_similarity",
height=5, aspect=.95, facet_kws=dict(sharex=False),
kind="line", data=full_df,
marker='o', lw=3.5, markersize=15, markeredgecolor='none')
########################################
from pyitab.plot.connectivity import plot_connectivity_lines
from matplotlib import animation
from mvpa2.base.hdf5 import h5load
path = "/home/robbis/mount/aalto-work/data/simulations/meg/ds-min_time_1.5-snr_10000.gzip"
ds = h5load(path)
samples = ds.samples
matrices = np.array([copy_matrix(array_to_matrix(m)) for m in samples[::50]])
names = ["node_%02d"%(i+1) for i in range(10)]
def animate(i, fig):
names = ["node_%s" % (str(j+1)) for j in range(10)]
#pl.imshow(matrix[i*100])
pl.clf()
plot_connectivity_lines(matrices[i], facecolor='white',
node_names=names, con_thresh=0.,
kind='circle', fig=fig)
fig = pl.figure(figsize=(8, 8))
anim = animation.FuncAnimation(fig, animate, fargs=[fig],
frames=45, interval=20)
anim.save('/home/robbis/animation.gif', writer='imagemagick', fps=10)
|
def quickSort(li):
arr = []
low = 0
high = len(li) - 1
if low < high:
mid = partition(li, low, high)
if low < mid - 1:
arr.append(low)
arr.append(mid - 1)
if mid + 1 < high:
arr.append(mid + 1)
arr.append(high)
while arr:
r = arr.pop()
l = arr.pop()
mid = partition(li, l, r)
if l < mid - 1:
arr.append(l)
arr.append(mid - 1)
if mid + 1 < r:
arr.append(mid + 1)
arr.append(r)
return li
a = quickSort([3,1,6,4,9,5,7,8])
print(a)
|
#!/usr/bin/python
# Copyright 2010 by BBN Technologies Corp.
# All Rights Reserved.
USAGE="""\
Usage: %%prog [RELEASE_DIR] PIPELINE_DIR [options...]
PIPELINE_DIR: The directory created by this script. This directory
will contain the files necessary to run SERIF.
RELEASE_DIR: The SERIF release directory. Defaults to:
%s"""
import os, sys, optparse, textwrap, shutil
#----------------------------------------------------------------------
# Configuration
BYBLOS_DIR = 'Cube2/'
SERVER_BYBLOS_DTD = os.path.join(BYBLOS_DIR, 'install-optimize-static',
'etc', 'server_byblos.dtd')
SERVER_BYBLOS_DRIVER = os.path.join(BYBLOS_DIR, 'install-optimize-static',
"bin", "server_byblos_driver")
CONFIG_FILES = [
'pipeline_config_server_english.xml',
'pipeline_config_english.xml',
'pipeline_config_fbf_english.xml']
CONFIG_PLACEHOLDERS = [
(r'***PATH***', '%(working_dir)s'),
(r'***DATA***', '%(release_dir)s/data'),
(r'***BUILD***', '%(release_dir)s/bin'),
(r'***SCORE***', '%(release_dir)s/scoring'),
# not sure about what this one should point to:
#(r'***REGRESSION**', '%(working_dir)s/regression'),
]
FBF_CONFIG_PLACEHOLDERS = [
(r'***PATH***', '%(working_dir)s'),
(r'***DATA***', '%(release_dir)s/data'),
(r'***BUILD***', '%(release_dir)s/bin/%(language)s'),
(r'***SCORE***', '%(release_dir)s/scoring'),
# not sure about what this one should point to:
#(r'***REGRESSION**', '%(working_dir)s/regression'),
]
SERIF_SERVER_START_PLACEHOLDERS = [
(r'$BYBLOS_DIST', '%(byblos_dist)s'),
]
SERIF_PROCESS_SGM_PLACEHOLDERS = [
(r'$SERIF_PIPELINES', '%(serif_pipelines)s'),
]
DEFAULT_RELEASE_DIR = os.path.split(
os.path.split(os.path.abspath(sys.argv[0]))[0])[0]
#----------------------------------------------------------------------
# Installer.
class PipelineInstaller(object):
def __init__(self, release_dir, working_dir, verbosity=0, dryrun=False):
if release_dir is None:
release_dir = DEFAULT_RELEASE_DIR
self.release_dir = release_dir
self.working_dir = working_dir
self.verbosity = verbosity
self.dryrun = dryrun
#----------------------------------------------------------------------
# Helper functions
#
# These all assume that we're copying things from self.release_dir
# and writing them to self.working_dir
def copy_dir(self, dirname, dirname2=None):
src = os.path.join(self.release_dir, dirname)
dst = os.path.join(self.working_dir, dirname2 or dirname)
parent_dir = os.path.split(dst)[0]
if not os.path.exists(parent_dir): self.make_dir(parent_dir)
if self.verbosity > 0:
self.log('copy dir %s -> %s' % (src, dst))
if not self.dryrun:
shutil.copytree(src, dst)
def make_dir(self, dirname):
dst = os.path.join(self.working_dir, dirname)
if self.verbosity > 0:
self.log('make dir %s' % dst)
if not self.dryrun:
os.makedirs(dst)
def copy_file(self, filename, filename2=None):
src = os.path.join(self.release_dir, filename)
dst = os.path.join(self.working_dir, filename2 or filename)
parent_dir = os.path.split(dst)[0]
if not os.path.exists(parent_dir): self.make_dir(parent_dir)
if self.verbosity > 0:
self.log('copy file %s -> %s' % (src, dst))
if not self.dryrun:
shutil.copy(src, dst)
def replace_placeholders(self, filename, placeholders, **repl_vars):
filename = os.path.join(self.working_dir, filename)
if self.verbosity > 0:
self.log('Replacing placeholders in %s' % filename)
if not self.dryrun:
s = open(filename, 'rU').read()
for (placeholder, repl) in placeholders:
repl = repl % repl_vars
s = s.replace(placeholder, repl)
out = open(filename, 'w')
out.write(s)
out.close()
def log(self, msg):
if self.dryrun: msg = '[dryrun] %s' % msg
print textwrap.fill(msg, subsequent_indent=' ')
#----------------------------------------------------------------------
# Main Script
def install(self):
if self.verbosity >= 0:
print 'Creating a SERIF pipeline in:'
print ' %r' % self.working_dir
# Create the working directory
if not os.path.exists(self.working_dir):
self.make_dir('')
# Create a templates directory and put the parameter files there.
self.copy_dir('par', 'templates')
# Copy over the server_byblos.dtd file.
self.copy_file(SERVER_BYBLOS_DTD, 'server_byblos.dtd')
# Copy the configuration files
for config_file in CONFIG_FILES:
self.copy_file(os.path.join('config', config_file), config_file)
# Replace placeholders in the config files.
self.replace_placeholders('pipeline_config_server_english.xml',
CONFIG_PLACEHOLDERS,
working_dir=self.working_dir,
release_dir=self.release_dir)
self.replace_placeholders('pipeline_config_english.xml',
CONFIG_PLACEHOLDERS,
working_dir=self.working_dir,
release_dir=self.release_dir)
self.replace_placeholders('pipeline_config_fbf_english.xml',
FBF_CONFIG_PLACEHOLDERS,
working_dir=self.working_dir,
release_dir=self.release_dir,
language='English')
# Copy the server script and edit it to point to server_byblos_driver.
self.copy_file('bin/serif_server_start.sh', 'serif_server_start.sh')
self.replace_placeholders('serif_server_start.sh',
SERIF_SERVER_START_PLACEHOLDERS,
byblos_dist=os.path.join(self.release_dir,
BYBLOS_DIR))
# Copy the process script and edit it to point to the working dir
for script in ['serif_process_sgm_file.sh']:
self.copy_file(os.path.join('bin', script), script)
self.replace_placeholders(script, SERIF_PROCESS_SGM_PLACEHOLDERS,
serif_pipelines=self.working_dir)
def print_instructions(self):
print
print ' SERIF INSTRUCTIONS '.center(75, '=')
print
print 'To start a SERIF server:'
print ' $ cd %s' % self.working_dir
print ' $ serif_server_start.sh pipeline_config_english.xml'
print
print 'To queue a job to process a single SGM file:'
print ' $ cd %s' % self.working_dir
print ' $ serif_process_sgm_file.sh /ABSOLUTE/PATH/TO/FILE.SGM'
print
print 'To queue a job to process multiple SGM files:'
print ' $ cd %s' % self.working_dir
print (' $ cp /PATH/TO/FILE/LIST '
'pipeline-1000/input_dir/JOB_NAME.source_files')
print ' $ touch pipeline-1000/input_dir/JOB_NAME.go'
if __name__ == '__main__':
parser = optparse.OptionParser(usage=USAGE % DEFAULT_RELEASE_DIR)
parser.add_option("-v", action='count', dest='verbose', default=0,
help="Generate more verbose output")
parser.add_option("-q", action='count', dest='quiet', default=0,
help="Generate less verbose output")
parser.add_option('--dryrun', dest='dryrun', action='store_true',
default=False)
parser.add_option('-f', '--force', dest='force', action='store_true',
default=False)
(options, args) = parser.parse_args()
if len(args) == 2:
(release_dir, working_dir) = args
elif len(args) == 1:
(release_dir, working_dir) = (None, args[0])
elif len(args) == 0:
parser.print_help()
sys.exit(-1)
else:
parser.error('Too many arguments')
if release_dir is None:
release_dir = DEFAULT_RELEASE_DIR
release_dir = os.path.abspath(release_dir)
working_dir = os.path.abspath(working_dir)
if not os.path.exists(release_dir):
parser.error('Release dir %r not found' % release_dir)
if not os.path.isdir(release_dir):
parser.error('Release dir %r is not a directory' % release_dir)
if os.path.exists(working_dir):
if options.force:
print 'Deleting old pipeline dir: %r' % working_dir
shutil.rmtree(working_dir)
else:
parser.error('Pipeline dir %r already exists' % working_dir)
#verbosity = (1 + options.verbose - options.quiet)
verbosity = (options.verbose - options.quiet)
installer = PipelineInstaller(release_dir, working_dir, verbosity,
options.dryrun)
installer.install()
installer.print_instructions()
|
from utils import *
from pprint import pprint as pp
import requests, json, sys, decimal, pytz
from datetime import datetime, timedelta
timezone = pytz.timezone('Europe/Oslo')
if (len(sys.argv) != 13 and len(sys.argv) != 6) \
or (len(sys.argv) == 6 and sys.argv[4] != '-'):
sys.stderr.write('Usage: %s <offset> <days> <previous amount> <500> <200> <100> <50> <20> <10> <5> <1> <removed>\n')
sys.stderr.write(' %s <offset> <days> <previous amount> - <removed>\n')
sys.exit(1)
access_token = get_access_token()
if len(sys.argv) == 13:
sys_argv = sys.argv
else:
sys_argv = sys.argv[:4]
for line in sys.stdin.readlines():
line = line.strip()
if len(line) != 0:
sys_argv.append(line)
sys_argv.append(sys.argv[5])
offset = int(sys_argv[1])
days_number = int(sys_argv[2])
prev_amount = decimal.Decimal(sys_argv[3])
removed = decimal.Decimal(sys_argv[12])
cash_types = [500, 200, 100, 50, 20, 10, 5, 1, 1]
cash_amount = decimal.Decimal(0)
for cash_str in sys_argv[4:]:
cash_type = cash_types.pop(0)
if '=' in cash_str:
cash_type, cash_str = cash_str.split('=')
cash_type = int(cash_type)
cash_amount += decimal.Decimal(cash_str) * cash_type
current_ts = datetime.now()
reference_ts = datetime(current_ts.year, current_ts.month, current_ts.day, 0, 0).replace(tzinfo = timezone)
start_date = (reference_ts - timedelta(days = offset)).strftime('%Y-%m-%d')
stop_date = reference_ts - timedelta(days = offset) + timedelta(days = days_number)
method = 'get'
server = 'purchase'
path = 'purchases/v2'
url = 'https://%s.izettle.com/%s' % (server, path)
res = getattr(requests, method)(url,
headers = {'Authorization': 'Bearer %s' % access_token},
params = {'startDate': start_date})
data = json.loads(res.text)
last_date_created = None
full_amount = decimal.Decimal(0)
to_break = False
for purchase in data['purchases']:
cash = False;
try: timestamp = datetime.strptime(purchase['created'], '%Y-%m-%dT%H:%M:%S.%f%z')
except: timestamp = datetime.strptime(purchase['created'], '%Y-%m-%dT%H:%M:%S%z')
if timestamp > stop_date: continue
timestamp = timestamp.astimezone(timezone)
date_created = timestamp.strftime('%Y-%m-%d')
time_created = timestamp.strftime('%H:%M')
amount = decimal.Decimal(purchase['amount']);
handout = decimal.Decimal(0)
change = decimal.Decimal(0)
for payment in purchase['payments']:
if payment['type'] == 'IZETTLE_CASH':
cash = True
handout += payment['attributes']['handedAmount']
if 'changeAmount' in payment['attributes']:
change += payment['attributes']['changeAmount']
if cash and amount > 0:
if last_date_created != date_created and (full_amount > 0 or last_date_created is None):
if last_date_created is not None:
print("SUM AMOUNT:", "%7.0f" % (full_amount / 100))
print("OUT KASSE: ", "%7.0f" % (prev_amount + (full_amount / 100)), "\n")
prev_amount = prev_amount + (full_amount/100)
print(date_created, "IN KASSE:", "%7.0f" % prev_amount)
full_amount = decimal.Decimal(0)
last_date_created = date_created
full_amount += amount
print("%s %s % 8s % 8s % 8s" % (date_created, time_created,
"%7.0f" % (amount / 100),
"%7.0f" % (handout / 100),
"%7.0f" % (change / 100)))
print("SUM AMOUNT:", "%7.0f" % (full_amount / 100))
new_amount = prev_amount + (full_amount / 100)
print("OUT KASSE: ", "%7.0f" % new_amount)
print("\nREAL STATE:", "%7.0f-%7.0f=%7.0f" % (cash_amount, removed, cash_amount-removed))
print("DIFFERENCE:", "%7.0f" % (cash_amount - new_amount))
|
# -*- coding: utf-8 -*-
import requests
from base import Base
from constants import API_BASE_URL
from image import Image
from line import Line
class Step(Base):
'''One step in a guide.
:var int guideid: The id of the :class:`pyfixit.guide.Guide` owning this
step. Ex: ``5``.
:var int stepid: This step's id. Ex: ``14``.
:var int orderby: This step's location in its guide relative to the other
steps. Lower numbers come first. Counting starts at 0 and
should not duplicate or skip over any natural numbers. Ex:
``0``.
:var int revision: The revisionid associated with this version of the step
in the database, suitable for determining equality of
objects not modified after being pulled from the API. Ex:
``33880``.
:var string title: The title of this step. May be an empty string.
:var iterable lines: An iterable of the :class:`pyfixit.line.Line` objects
composing this step.
:var iterable media: *(Optional)* A list of :class:`pyfixit.image.Image`
objects illustrating the step.
'''
def __init__(self, guideid, stepid, data=None):
self.guideid = guideid
self.stepid = stepid
# Usually we're going to have a blob of data, since a GET of a guide
# includes all the data of its steps.
if data:
self._update(data)
def refresh(self):
'''Refetch instance data from the API.
'''
# There's no GET endpoint for steps, so get the parent guide and loop
# through its steps until we find the right one.
response = requests.get('%s/guides/%s' % (API_BASE_URL, self.guideid))
attributes = response.json()
for step in attributes['steps']:
if step['stepid'] == self.stepid:
self._update(step)
return
raise Exception('Step with id %s not found in guide %s.' \
% (self.stepid, self.guideid))
def _update(self, data):
'''Update the step using the blob of json-parsed data directly from the
API.
'''
self.orderby = data['orderby']
self.revision = data['revisionid']
self.title = data['title']
self.lines = [Line(self.guideid, self.stepid, line['lineid'], data=line)
for line in data['lines']]
# TODO: Support video.
if data['media']['type'] == 'image':
self.media = []
for image in data['media']['data']:
self.media.append(Image(image['id']))
|
pragma solidity =0.7.3;
interface IERC20 {
function totalSupply() external view returns (uint256);
function balanceOf(address account) external view returns (uint256);
function allowance(address owner, address spender) external view returns (uint256);
function transfer(address recipient, uint256 amount) external returns (bool);
function approve(address spender, uint256 amount) external returns (bool);
function transferFrom(address sender, address recipient, uint256 amount) external returns (bool);
event Transfer(address indexed from, address indexed to, uint256 value);
event Approval(address indexed owner, address indexed spender, uint256 value);
}
contract ERC20Basic is IERC20 {
string public constant name = "ERC20Basic";
string public constant symbol = "ERC";
uint8 public constant decimals = 18;
//event Approval(address indexed tokenOwner, address indexed spender, uint tokens);
//event Transfer(address indexed from, address indexed to, uint tokens);
mapping(address => uint256) balances;
mapping(address => mapping (address => uint256)) allowed;
uint256 totalSupply_;
using SafeMath for uint256;
constructor(uint256 total) public {
totalSupply_ = total;
balances[msg.sender] = totalSupply_;
}
function totalSupply() public override view returns (uint256) {
return totalSupply_;
}
function balanceOf(address tokenOwner) public override view returns (uint256) {
return balances[tokenOwner];
}
function transfer(address receiver, uint256 numTokens) public override returns (bool) {
require(numTokens <= balances[msg.sender]);
balances[msg.sender] = balances[msg.sender].sub(numTokens);
balances[receiver] = balances[receiver].add(numTokens);
emit Transfer(msg.sender, receiver, numTokens);
return true;
}
function approve(address delegate, uint256 numTokens) public override returns (bool) {
allowed[msg.sender][delegate] = numTokens;
emit Approval(msg.sender, delegate, numTokens);
return true;
}
function allowance(address owner, address delegate) public override view returns (uint) {
return allowed[owner][delegate];
}
function transferFrom(address owner, address buyer, uint256 numTokens) public override returns (bool) {
require(numTokens <= balances[owner]);
require(numTokens <= allowed[owner][msg.sender]);
balances[owner] = balances[owner].sub(numTokens);
allowed[owner][msg.sender] = allowed[owner][msg.sender].sub(numTokens);
balances[buyer] = balances[buyer].add(numTokens);
emit Transfer(owner, buyer, numTokens);
return true;
}
}
library SafeMath {
function sub(uint256 a, uint256 b) internal pure returns (uint256) {
assert(b <= a);
return a - b;
}
function add(uint256 a, uint256 b) internal pure returns (uint256) {
uint256 c = a + b;
assert(c >= a);
return c;
}
}
|
from __future__ import print_function
import torch
import torch.optim as optim
from torch.autograd import Variable
<<<<<<< HEAD
torch.backends.cudnn.bencmark = True
import os,sys,datetime
import math
import argparse
import numpy as np
import net_sphere
from data_loader import get_train_loader
from data_loader import get_val_loader
parser = argparse.ArgumentParser(description='FIW Sphereface Baseline')
parser.add_argument('--net','-n', default='sphere20a', type=str)
parser.add_argument('--lr', default=0.01, type=float, help='inital learning rate')
parser.add_argument('--n_epochs', default=3, type=int, help='number of training epochs')
parser.add_argument('--batch_size', default=16, type=int, help='training batch size')
parser.add_argument('--no_train', action='store_true', help='set to not train')
parser.add_argument('--finetune', action='store_true', help='set to fine-tune the pretrained model')
parser.add_argument('--pretrained', default='model/sphere20a_20171020.pth', type=str,
help='the pretrained model to point to')
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
ptypes = ['bb', 'fd', 'fs', 'md', 'ms', 'sibs', 'ss'] # FIW pair types
=======
# torch.backends.cudnn.benchmark = True
import os
import sys
import datetime
import math
import argparse
import numpy as np
from torchtools import TorchTools, Tensor, cuda
import net_sphere
from data_loader import get_train_loader, get_val_loader
ptypes = ['bb', 'fd', 'fs', 'md', 'ms', 'sibs', 'ss'] # FIW pair types
csv_base_path = 'pairs/{}_val.csv'
>>>>>>> d5d57c0ffdab6a2eac16d8809ae66bd6ab8f5f19
def printoneline(*argv):
s = ''
for arg in argv: s += str(arg) + ' '
s = s[:-1]
<<<<<<< HEAD
sys.stdout.write('\r'+s)
sys.stdout.flush()
=======
sys.stdout.write('\r' + s)
sys.stdout.flush()
>>>>>>> d5d57c0ffdab6a2eac16d8809ae66bd6ab8f5f19
def save_model(model, filename):
state = model.state_dict()
for key in state: state[key] = state[key].clone().cpu()
torch.save(state, filename)
<<<<<<< HEAD
def dt():
return datetime.datetime.now().strftime('%H:%M:%S')
=======
def dt():
return datetime.datetime.now().strftime('%H:%M:%S')
>>>>>>> d5d57c0ffdab6a2eac16d8809ae66bd6ab8f5f19
def KFold(n, n_folds=5, shuffle=False):
folds = []
base = list(range(n))
for i in range(n_folds):
<<<<<<< HEAD
test = base[int(i*n/n_folds):int(math.ceil((i+1)*n/n_folds))]
=======
test = base[int(i * n / n_folds):int(math.ceil((i + 1) * n / n_folds))]
>>>>>>> d5d57c0ffdab6a2eac16d8809ae66bd6ab8f5f19
train = list(set(base) - set(test))
folds.append([train, test])
return folds
<<<<<<< HEAD
=======
>>>>>>> d5d57c0ffdab6a2eac16d8809ae66bd6ab8f5f19
def eval_acc(threshold, diff):
y_true = []
y_predict = []
for d in diff:
same = True if float(d[0]) > threshold else False
y_predict.append(same)
y_true.append(bool(d[1]))
y_true = np.array(y_true)
y_predict = np.array(y_predict)
tp = np.sum(np.logical_and(y_predict, y_true))
fp = np.sum(np.logical_and(y_predict, np.logical_not(y_true)))
tn = np.sum(np.logical_and(np.logical_not(y_predict), np.logical_not(y_true)))
fn = np.sum(np.logical_and(np.logical_not(y_predict), y_true))
<<<<<<< HEAD
tpr = 0 if (tp+fn==0) else float(tp) / float(tp+fn)
fpr = 0 if (fp+tn==0) else float(fp) / float(fp+tn)
acc = float(tp+tn)/len(y_true)
return tpr, fpr, acc
=======
tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
acc = float(tp + tn) / len(y_true)
return tpr, fpr, acc
>>>>>>> d5d57c0ffdab6a2eac16d8809ae66bd6ab8f5f19
def find_best_threshold(thresholds, predicts):
best_threshold = best_acc = 0
for threshold in thresholds:
_, _, accuracy = eval_acc(threshold, predicts)
if accuracy >= best_acc:
best_acc = accuracy
best_threshold = threshold
return best_threshold
<<<<<<< HEAD
def validate(base_dir='val'):
print('Begin validation')
net.eval()
csv_base_path = 'pairs_withlabel-CSV/{}_val.csv'
for ptype in ptypes:
=======
def validate(net, base_dir='val'):
print('Begin validation')
net.eval()
accuracy = []
for i, ptype in enumerate(ptypes):
if i > 0:
return np.mean(accuracy)
>>>>>>> d5d57c0ffdab6a2eac16d8809ae66bd6ab8f5f19
csv_path = os.path.join(base_dir, csv_base_path.format(ptype))
loader = get_val_loader(base_dir, csv_path)
dists = []
for pairs, labels in iter(loader):
<<<<<<< HEAD
img_a = torch.FloatTensor(pairs[0]).cuda()
img_b = torch.FloatTensor(pairs[1]).cuda()
=======
img_a = Variable(pairs[0]).type(Tensor)
img_b = Variable(pairs[1]).type(Tensor)
# img_b = Tensor(pairs[1])
>>>>>>> d5d57c0ffdab6a2eac16d8809ae66bd6ab8f5f19
_, embs_a = net(img_a)
_, embs_b = net(img_b)
embs_a = embs_a.data
embs_b = embs_b.data
for i in range(len(embs_a)):
cos_dis = embs_a[i].dot(embs_b[i]) / (embs_a[i].norm() * embs_b[i].norm() + 1e-5)
dists.append([cos_dis, int(labels[i])])
<<<<<<< HEAD
=======
>>>>>>> d5d57c0ffdab6a2eac16d8809ae66bd6ab8f5f19
dists = np.array(dists)
tprs = []
fprs = []
accuracy = []
thd = []
folds = KFold(n=len(loader), n_folds=5, shuffle=False)
thresh = np.arange(-1.0, 1.0, 0.005)
for idx, (train, test) in enumerate(folds):
best_thresh = find_best_threshold(thresh, dists[train])
tpr, fpr, acc = eval_acc(best_thresh, dists[test])
<<<<<<< HEAD
tprs.append(tpr)
fprs.append(fpr)
accuracy.append(acc)
thd.append(best_thresh)
=======
tprs += [tpr]
fprs += [fpr]
accuracy += [acc]
thd.append(best_thresh)
# Compute ROC curve and ROC area for each class
# fpr = dict()
# tpr = dict()
# roc_auc = dict()
# for i in range(n_classes):
# fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
# roc_auc[i] = auc(fpr[i], tpr[i])
>>>>>>> d5d57c0ffdab6a2eac16d8809ae66bd6ab8f5f19
print('PTYPE={} TPR={:.4f} FPR={:.4f} ACC={:.4f} std={:.4f} thd={:.4f}'.format(ptype,
np.mean(tprs),
np.mean(fprs),
np.mean(accuracy),
np.std(accuracy),
np.mean(thd)))
<<<<<<< HEAD
def train(epoch, loader, args):
=======
def train(net, optimizer, epoch, loader):
>>>>>>> d5d57c0ffdab6a2eac16d8809ae66bd6ab8f5f19
net.train()
train_loss = 0
correct = 0
total = 0
batch_idx = 0
for inputs, targets in iter(loader):
<<<<<<< HEAD
if use_cuda: inputs, targets = inputs.cuda(), targets.cuda()
=======
if cuda: inputs, targets = inputs.cuda(), targets.cuda()
>>>>>>> d5d57c0ffdab6a2eac16d8809ae66bd6ab8f5f19
optimizer.zero_grad()
inputs, targets = Variable(inputs), Variable(targets)
outputs, _ = net(inputs)
loss = criterion(outputs, targets)
<<<<<<< HEAD
lossd = loss.data[0]
loss.backward()
optimizer.step()
train_loss += loss.data[0]
outputs = outputs[0] # 0=cos_theta 1=phi_theta
=======
loss.backward()
optimizer.step()
lossd = loss.data[0]
del loss
train_loss += lossd
outputs = outputs[0] # 0=cos_theta 1=phi_theta
>>>>>>> d5d57c0ffdab6a2eac16d8809ae66bd6ab8f5f19
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
<<<<<<< HEAD
printoneline(dt(),'Te=%d Loss=%.4f | AccT=%.4f%% (%d/%d) %.4f %.2f %d'
% (epoch,train_loss/(batch_idx+1), 100.0*correct/total, correct, total,
lossd, criterion.lamb, criterion.it))
=======
printoneline(dt(), 'Te=%d Loss=%.4f | AccT=%.4f%% (%d/%d) %.4f %.2f %d'
% (epoch, train_loss / (batch_idx + 1), 100.0 * correct / total, correct, total,
lossd, criterion.lamb, criterion.it))
>>>>>>> d5d57c0ffdab6a2eac16d8809ae66bd6ab8f5f19
batch_idx += 1
print('')
<<<<<<< HEAD
net = getattr(net_sphere, args.net)(classnum=300)
model_state = net.state_dict()
if args.finetune:
print('Fine-tuning pretrained model at {}'.format(args.pretrained))
for name, param in net.named_parameters():
if (name[:4] == 'conv' or name[:4] == 'relu') and name[4] is not '4':
param.requires_grad = False
pretrained_state = torch.load(args.pretrained)
pretrained_state = {k:v for k,v in pretrained_state.items() if k in model_state and v.size() == model_state[k].size()}
model_state.update(pretrained_state)
net.load_state_dict(model_state)
if use_cuda: net.cuda()
criterion = net_sphere.AngleLoss()
train_loader = get_train_loader('train', batch_size=args.batch_size)
print('start: time={}'.format(dt()))
if not args.no_train:
print('Begin train')
for epoch in range(args.n_epochs):
if epoch in [0, 2]:
if epoch != 0: args.lr *= 0.1 # hardcoded for now (n_epochs = 3)
params = [x for x in net.parameters() if x.requires_grad]
optimizer = optim.SGD(params, lr=args.lr, momentum=0.9, weight_decay=5e-4)
train(epoch,train_loader,args)
save_model(net, '{}_{}.pth'.format(args.net,epoch))
validate()
print('finish: time={}\n'.format(dt()))
=======
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='FIW Sphereface Baseline')
parser.add_argument('--net', '-n', default='sphere20a', type=str)
parser.add_argument('--lr', default=0.01, type=float, help='inital learning rate')
parser.add_argument('--n_epochs', default=10, type=int, help='number of training epochs')
parser.add_argument('--batch_size', default=64, type=int, help='training batch size')
parser.add_argument('--train', action='store_true', help='set to not train')
parser.add_argument('--finetune', action='store_true', help='set to fine-tune the pretrained model')
parser.add_argument('--pretrained', default='model/sphere20a_20171020.7z', type=str,
help='the pretrained model to point to')
parser.add_argument('--data_dir', '-d', type=str, default='/home/jrobby/datasets/FIW/RFIW/',
help='Root directory of data (assumed to contain traindata and valdata)')
args = parser.parse_args()
net = getattr(net_sphere, args.net)(classnum=300)
model_state = net.state_dict()
if args.finetune:
print('Fine-tuning pretrained model at {}'.format(args.pretrained))
for name, param in net.named_parameters():
if (name[:4] == 'conv' or name[:4] == 'relu') and name[4] is not '4':
param.requires_grad = False
pretrained_state = torch.load(args.pretrained)
pretrained_state = {k: v for k, v in pretrained_state.items() if
k in model_state and v.size() == model_state[k].size()}
model_state.update(pretrained_state)
net.load_state_dict(model_state)
if cuda:
net.cuda()
criterion = net_sphere.AngleLoss()
# train_dir = '/Users/josephrobinson/Downloads/'
train_dir = args.data_dir + '/train/'
val_dir = args.data_dir + '/val/'
# 'train'
train_loader = get_train_loader(train_dir, batch_size=args.batch_size)
print('start: time={}'.format(dt()))
# optimizer = optim.Adam(net.parameters(), lr=args.lr)
best_acc =0
if not args.train:
print('Begin train')
for epoch in range(args.n_epochs):
if epoch in [0, 2, 4, 6, 8]:
if epoch != 0: args.lr *= 0.1 # hardcoded for now (n_epochs = 3)
params = [x for x in net.parameters() if x.requires_grad]
optimizer = optim.SGD(params, lr=args.lr, momentum=0.9, weight_decay=5e-4)
train(net, optimizer, epoch, train_loader)
acc = validate(net, val_dir)
TorchTools.save_checkpoint({'epoch': epoch + 1,
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict(),
'best_acc': acc}, is_best=acc < best_acc,
checkpoint_dir='/home/jrobby/FIW_KRT/sphereface_rfiw_baseline/finetuned/')
best_acc = acc if acc > best_acc else best_acc
# if best_acc < acc:
# save_model(net, '{}_{}.pth'.format(args.net, epoch))
# best_acc = acc
print('finish: time={}\n'.format(dt()))
>>>>>>> d5d57c0ffdab6a2eac16d8809ae66bd6ab8f5f19
|
## Issue related to time resolution/smoothness
# http://bulletphysics.org/mediawiki-1.5.8/index.php/Stepping_The_World
from gibson.core.physics.scene_building import SinglePlayerBuildingScene
from gibson.core.physics.scene_stadium import SinglePlayerStadiumScene
import pybullet as p
import time
import random
import zmq
import math
import argparse
import os
import json
import numpy as np
from transforms3d import euler, quaternions
from gibson.core.physics.physics_object import PhysicsObject
from gibson.core.render.profiler import Profiler
import gym, gym.spaces, gym.utils, gym.utils.seeding
import sys
import yaml
class BaseEnv(gym.Env):
"""
Base class for loading environments in a Scene.
Handles scene loading, starting physical simulation
These environments create single-player scenes and behave like normal Gym environments.
Multiplayer is not yet supported
"""
def __init__(self, config, scene_type, tracking_camera):
## Properties already instantiated from SensorEnv/CameraEnv
# @self.robot
self.gui = config["mode"] == "gui"
self.model_id = config["model_id"]
self.timestep = config["speed"]["timestep"]
self.frame_skip = config["speed"]["frameskip"]
self.resolution = config["resolution"]
self.tracking_camera = tracking_camera
self.robot = None
#target_orn, target_pos = config["target_orn"], config["target_pos"]
#initial_orn, initial_pos = config["initial_orn"], config["initial_pos"]
if config["display_ui"]:
#self.physicsClientId = p.connect(p.DIRECT)
self.physicsClientId = p.connect(p.GUI, "--opengl2")
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
elif (self.gui):
self.physicsClientId = p.connect(p.GUI, "--opengl2")
else:
self.physicsClientId = p.connect(p.DIRECT)
self.camera = Camera()
self._seed()
self._cam_dist = 3
self._cam_yaw = 0
self._cam_pitch = -30
self.scene_type = scene_type
self.scene = None
def _close(self):
p.disconnect()
def parse_config(self, config):
with open(config, 'r') as f:
config_data = yaml.load(f, Loader=yaml.FullLoader)
return config_data
def create_scene(self):
if self.scene is not None:
return
if self.scene_type == "stadium":
self.scene = self.create_single_player_stadium_scene()
elif self.scene_type == "building":
self.scene = self.create_single_player_building_scene()
else:
raise AssertionError()
self.robot.scene = self.scene
def create_single_player_building_scene(self):
return SinglePlayerBuildingScene(self.robot, model_id=self.model_id, gravity=9.8, timestep=self.timestep, frame_skip=self.frame_skip, env=self)
def create_single_player_stadium_scene(self):
return SinglePlayerStadiumScene(self.robot, gravity=9.8, timestep=self.timestep, frame_skip=self.frame_skip, env=self)
def configure(self, args):
self.robot.args = args
def _seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
return [seed]
def _reset(self):
assert self.robot is not None, "Pleases introduce robot to environment before resetting."
p.configureDebugVisualizer(p.COV_ENABLE_GUI,0)
p.configureDebugVisualizer(p.COV_ENABLE_KEYBOARD_SHORTCUTS, 0)
p.configureDebugVisualizer(p.COV_ENABLE_MOUSE_PICKING, 0)
p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, 1)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
self.frame = 0
self.done = 0
self.reward = 0
state = self.robot.reset()
self.scene.episode_restart()
return state
def _render(self, mode, close):
base_pos=[0,0,0]
if (hasattr(self,'robot')):
if (hasattr(self.robot,'body_xyz')):
base_pos = self.robot.body_xyz
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL
)
rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
if close: return None
rgb_array = rgb_array[:, :, :3]
return rgb_array
def render_physics(self):
robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=robot_pos,
distance=self.tracking_camera["distance"],
yaw=self.tracking_camera["yaw"],
pitch=self.tracking_camera["pitch"],
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
with Profiler("render physics: Get camera image"):
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_TINY_RENDERER
)
rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
rgb_array = rgb_array[:, :, :3]
return rgb_array
def render_map(self):
base_pos=[0, 0, -3]
if (hasattr(self,'robot')):
if (hasattr(self.robot,'body_xyz')):
base_pos[0] = self.robot.body_xyz[0]
base_pos[1] = self.robot.body_xyz[1]
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=35,
yaw=0,
pitch=-89,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL
)
rgb_array = np.array(px).reshape((self._render_width, self._render_height, -1))
rgb_array = rgb_array[:, :, :3]
return rgb_array
def get_action_dim(self):
return len(self.robot.ordered_joints)
def get_observation_dim(self):
return 1
def _close(self):
if (self.physicsClientId>=0):
p.disconnect(self.physicsClientId)
self.physicsClientId = -1
def set_window(self, posX, posY, sizeX, sizeY):
values = {
'name': "Robot",
'gravity': 0,
'posX': int(posX),
'posY': int(posY),
'sizeX': int(sizeX),
'sizeY': int(sizeY)
}
cmd = 'wmctrl -r \"Bullet Physics\" -e {gravity},{posX},{posY},{sizeX},{sizeY}'.format(**values)
os.system(cmd)
cmd = "xdotool search --name \"Bullet Physics\" set_window --name \"Robot's world\""
os.system(cmd)
class Camera:
def __init__(self):
#p.startStateLogging(p.STATE_LOGGING_VIDEO_MP4, "GUI_VID.mp4")
pass
def move_and_look_at(self,i,j,k,x,y,z):
lookat = [x,y,z]
distance = 10
yaw = 10
|
#!/usr/bin/python
import os
import math
import yaml
import sys
STATS_LOG = "/usr/local/var/log/suricata/stats.log"
def get_seconds(tm):
hms = tm.split(":")
return (int(hms[0]) * 3600) + (int(hms[1]) * 60) + int(hms[2])
def get_difference(values):
return [x - values[i - 1] for i, x in enumerate(values)][1:]
def get_median(values):
tmp = []
for value in values:
tmp.append(value)
tmp.sort()
length = len(tmp)
if length == 0:
return 0.0
median = length / 2
if length % 2 == 1:
return tmp[median]
else:
return ((tmp[median - 1] + tmp[median]) * 1.0) / 2
def get_stdev(values):
tmp = []
for value in values:
tmp.append(value)
tmp.sort()
length = len(tmp)
if length == 0:
return 0.0
avg = (sum(tmp) * 1.0) / length
dev = []
for x in tmp:
dev.append(x - avg)
sqr = []
for x in dev:
sqr.append(x * x)
if len(sqr) <= 1:
return 0.0
mean = sum(sqr) / len(sqr)
return math.sqrt(sum(sqr) / (len(sqr) - 1))
def get_median_filtered(values):
if len(values) == 0:
return 0.0
tmp = []
for value in values:
tmp.append(value)
tmp.sort()
med_tmp = get_median(tmp)
std_tmp = get_stdev(tmp)
for t in tmp:
if t < (med_tmp - (std_tmp * 0.34)):
tmp.remove(t)
elif t > (med_tmp + (std_tmp * 0.34)):
tmp.remove(t)
length = len(tmp)
if length == 0:
return 0.0
median = length / 2
if length % 2 == 1:
return tmp[median]
else:
return ((tmp[median - 1] + tmp[median]) * 1.0) / 2
os.system("sync")
os.system("sync")
os.system("sync")
print "Started result processing ..."
source = open(STATS_LOG, "r")
lines = source.read().splitlines()
source.close()
raw_times_inc = []
raw_frames_inc = []
raw_drops_inc = []
raw_packets_inc = []
raw_bytes_inc = []
raw_times = 0.0
raw_frames = 0.0
raw_drops = 0.0
raw_packets = 0.0
raw_bytes = 0.0
flag = 0
for index in range(len(lines)):
line = lines[index].split()
if line[0] == "Date:":
raw_times = get_seconds(line[3])
flag += 1
if line[0] == "capture.kernel_packets":
raw_frames = int(line[4]) * 1.0
flag += 1
elif line[0] == "capture.kernel_drops":
raw_drops = int(line[4]) * 1.0
flag += 1
elif line[0] == "decoder.pkts":
raw_packets = int(line[4]) * 1.0
flag += 1
elif line[0] == "decoder.bytes":
raw_bytes = int(line[4]) * 1.0
flag += 1
if flag == 5:
raw_times_inc.append(raw_times)
raw_frames_inc.append(raw_frames)
raw_drops_inc.append(raw_drops)
raw_packets_inc.append(raw_packets)
raw_bytes_inc.append(raw_bytes)
raw_times = 0.0
raw_frames = 0.0
raw_drops = 0.0
raw_packets = 0.0
raw_bytes = 0.0
flag = 0
time_diff = get_difference(raw_times_inc)
frames_diff = get_difference(raw_frames_inc)
drops_diff = get_difference(raw_drops_inc)
packets_diff = get_difference(raw_packets_inc)
bytes_diff = get_difference(raw_bytes_inc)
frames_end = []
drops_end = []
packets_end = []
bytes_end = []
delete = 0
for index in range(len(frames_diff)):
if delete > 2 and frames_diff[index] > 0:
try:
frames_end.append(frames_diff[index] / time_diff[index])
drops_end.append(drops_diff[index] / time_diff[index])
packets_end.append(packets_diff[index] / time_diff[index])
bytes_end.append(bytes_diff[index] / time_diff[index])
except BaseException as ex:
print("Exception at {}: {}".format(index, ex))
delete += 1
f = get_median_filtered(frames_end)
d = get_median_filtered(drops_end)
p = get_median_filtered(packets_end)
b = get_median_filtered(bytes_end)
if f == 0.0:
print 0.0, 0.0, 0.0
print "zero frames"
else:
print p, b, d / f
OUTPUT = sys.argv[1]
result = dict()
result["suricata_packets"] = p
result["suricata_bytes"] = b
result["suricata_dropped"] = d
if f > 0:
result["suricata_drops"] = d/f
else:
result["suricata_drops"] = "error f=0"
# write yml
print "Writing %r" % OUTPUT
with open(OUTPUT, "w") as f:
yaml.dump(result, f, default_flow_style=False)
print "done."
|
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import urllib
from unittest.mock import Mock, patch
from twisted.trial import unittest
from sydent.util.emailutils import sendEmail
from tests.utils import make_sydent
class TestTemplate(unittest.TestCase):
def setUp(self):
# Create a new sydent
config = {
"general": {
"templates.path": os.path.join(
os.path.dirname(os.path.dirname(__file__)), "res"
),
},
}
self.sydent = make_sydent(test_config=config)
def test_jinja_vector_invite(self):
substitutions = {
"address": "foo@example.com",
"medium": "email",
"room_alias": "#somewhere:exmaple.org",
"room_avatar_url": "mxc://example.org/s0meM3dia",
"room_id": "!something:example.org",
"room_name": "Bob's Emporium of Messages",
"sender": "@bob:example.com",
"sender_avatar_url": "mxc://example.org/an0th3rM3dia",
"sender_display_name": "<Bob Smith>",
"bracketed_verified_sender": "Bob Smith",
"bracketed_room_name": "Bob's Emporium of Messages",
"to": "person@test.test",
"token": "a_token",
"ephemeral_private_key": "mystery_key",
"web_client_location": "https://app.element.io",
"room_type": "",
}
# self.sydent.config.email.invite_template is deprecated
if self.sydent.config.email.invite_template is None:
templateFile = self.sydent.get_branded_template(
"vector-im",
"invite_template.eml",
)
else:
templateFile = self.sydent.config.email.invite_template
with patch("sydent.util.emailutils.smtplib") as smtplib:
sendEmail(self.sydent, templateFile, "test@test.com", substitutions)
smtp = smtplib.SMTP.return_value
email_contents = smtp.sendmail.call_args[0][2].decode("utf-8")
# test url input is encoded
self.assertIn(urllib.parse.quote("mxc://example.org/s0meM3dia"), email_contents)
# test html input is escaped
self.assertIn("Bob's Emporium of Messages", email_contents)
# test safe values are not escaped
self.assertIn("<Bob Smith>", email_contents)
# test our link is as expected
expected_url = (
"https://app.element.io/#/room/"
+ urllib.parse.quote("!something:example.org")
+ "?email="
+ urllib.parse.quote("test@test.com")
+ "&signurl=https%3A%2F%2Fvector.im%2F_matrix%2Fidentity%2Fapi%2Fv1%2Fsign-ed25519%3Ftoken%3D"
+ urllib.parse.quote("a_token")
+ "%26private_key%3D"
+ urllib.parse.quote("mystery_key")
+ "&room_name="
+ urllib.parse.quote("Bob's Emporium of Messages")
+ "&room_avatar_url="
+ urllib.parse.quote("mxc://example.org/s0meM3dia")
+ "&inviter_name="
+ urllib.parse.quote("<Bob Smith>")
+ "&guest_access_token=&guest_user_id=&room_type="
)
text = email_contents.splitlines()
link = text[19]
self.assertEqual(link, expected_url)
def test_jinja_matrix_invite(self):
substitutions = {
"address": "foo@example.com",
"medium": "email",
"room_alias": "#somewhere:exmaple.org",
"room_avatar_url": "mxc://example.org/s0meM3dia",
"room_id": "!something:example.org",
"room_name": "Bob's Emporium of Messages",
"sender": "@bob:example.com",
"sender_avatar_url": "mxc://example.org/an0th3rM3dia",
"sender_display_name": "<Bob Smith>",
"bracketed_verified_sender": "Bob Smith",
"bracketed_room_name": "Bob's Emporium of Messages",
"to": "person@test.test",
"token": "a_token",
"ephemeral_private_key": "mystery_key",
"web_client_location": "https://matrix.org",
"room_type": "",
}
# self.sydent.config.email.invite_template is deprecated
if self.sydent.config.email.invite_template is None:
templateFile = self.sydent.get_branded_template(
"matrix-org",
"invite_template.eml",
)
else:
templateFile = self.sydent.config.email.invite_template
with patch("sydent.util.emailutils.smtplib") as smtplib:
sendEmail(self.sydent, templateFile, "test@test.com", substitutions)
smtp = smtplib.SMTP.return_value
email_contents = smtp.sendmail.call_args[0][2].decode("utf-8")
# test url input is encoded
self.assertIn(urllib.parse.quote("mxc://example.org/s0meM3dia"), email_contents)
# test html input is escaped
self.assertIn("Bob's Emporium of Messages", email_contents)
# test safe values are not escaped
self.assertIn("<Bob Smith>", email_contents)
# test our link is as expected
expected_url = (
"https://matrix.org/#/room/"
+ urllib.parse.quote("!something:example.org")
+ "?email="
+ urllib.parse.quote("test@test.com")
+ "&signurl=https%3A%2F%2Fmatrix.org%2F_matrix%2Fidentity%2Fapi%2Fv1%2Fsign-ed25519%3Ftoken%3D"
+ urllib.parse.quote("a_token")
+ "%26private_key%3D"
+ urllib.parse.quote("mystery_key")
+ "&room_name="
+ urllib.parse.quote("Bob's Emporium of Messages")
+ "&room_avatar_url="
+ urllib.parse.quote("mxc://example.org/s0meM3dia")
+ "&inviter_name="
+ urllib.parse.quote("<Bob Smith>")
+ "&guest_access_token=&guest_user_id=&room_type="
)
text = email_contents.splitlines()
link = text[22]
self.assertEqual(link, expected_url)
def test_jinja_matrix_verification(self):
substitutions = {
"address": "foo@example.com",
"medium": "email",
"to": "person@test.test",
"token": "<<token>>",
"link": "https://link_test.com",
}
templateFile = self.sydent.get_branded_template(
"matrix-org",
"verification_template.eml",
)
with patch("sydent.util.emailutils.smtplib") as smtplib:
sendEmail(self.sydent, templateFile, "test@test.com", substitutions)
smtp = smtplib.SMTP.return_value
email_contents = smtp.sendmail.call_args[0][2].decode("utf-8")
# test html input is escaped
self.assertIn("<<token>>", email_contents)
# test safe values are not escaped
self.assertIn("<<token>>", email_contents)
@patch(
"sydent.util.emailutils.generateAlphanumericTokenOfLength",
Mock(return_value="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
)
def test_jinja_vector_verification(self):
substitutions = {
"address": "foo@example.com",
"medium": "email",
"to": "person@test.test",
"link": "https://link_test.com",
}
templateFile = self.sydent.get_branded_template(
"vector-im",
"verification_template.eml",
)
with patch("sydent.util.emailutils.smtplib") as smtplib:
sendEmail(self.sydent, templateFile, "test@test.com", substitutions)
smtp = smtplib.SMTP.return_value
email_contents = smtp.sendmail.call_args[0][2].decode("utf-8")
path = os.path.join(
self.sydent.config.general.templates_path,
"vector_verification_sample.txt",
)
with open(path, "r") as file:
expected_text = file.read()
# remove the email headers as they are variable
email_contents = email_contents[email_contents.index("Hello") :]
# test all ouput is as expected
self.assertEqual(email_contents, expected_text)
|
# -*- coding: utf-8 -*-
"""
Quantarhei job launcher
This script is ment to launch Quantarhei jobs on remote machines
The script transfers simulation inputs to the remote machine, launches
the simulation, monitors it, and transfers the results back to the machine
from which the job was launched. The simulation inputs are stored in a
single directory denoted by a suffix .in. This directory includes input
data and configuration settings. It is transferred to the target machine
and after the simulation is done it is returned back with the suffix .out.
"""
import argparse
import subprocess
import os
import fnmatch
import traceback
import pkg_resources
import sys
import quantarhei as qr
def do_launch():
# check for default configuration file: qlaunch.conf
# check for configuration file within launch directory: DIR/qlaunch.conf
# at least one of them has to be provided
pass
def main():
parser = argparse.ArgumentParser(
description='Quantarhei Remote Launcher')
parser.add_argument("directory", metavar='directory', type=str,
help='job directory to launch', nargs='?')
#
# Driver options
#
parser.add_argument("-v", "--version", action="store_true",
help="shows Quantarhei package version")
parser.add_argument("-i", "--info", action='store_true',
help="shows detailed information about Quantarhei"+
" installation")
parser.set_defaults(func=do_launch)
#
# Parsing all arguments
#
args = parser.parse_args()
if len(sys.argv) < 2:
parser.print_usage()
qr.exit()
#
# show longer info
#
if args.info:
qr.printlog("\n"
+"qrhei: Quantarhei Package Driver\n",
verbose=True, loglevel=1)
# +"\n"
# +"MPI parallelization enabled: ", flag_parallel,
# verbose=True, loglevel=0)
if not args.version:
qr.printlog("Package version: ", qr.Manager().version, "\n",
verbose=True, loglevel=1)
return
#
# show just Quantarhei version number
#
if args.version:
qr.printlog("Quantarhei package version: ", qr.Manager().version, "\n",
verbose=True, loglevel=1)
return
|
#!/usr/bin/python
import py_ball
from nba_api.stats.endpoints import teamgamelog, playerdashboardbygeneralsplits, boxscoreadvancedv2, playerdashboardbylastngames, playerdashboardbyclutch, playerdashboardbyopponent, boxscoresummaryv2, teamplayeronoffsummary, commonallplayers, commonplayerinfo
import csv
import time
import socket
import requests
from datetime import datetime, timedelta, date
# from lxml.html import fromstring
# from torrequest import TorRequest
import urllib.request
import random
import requests
import numpy as np
import pandas as pd
# from pytrends.request import TrendReq
from bs4 import BeautifulSoup
PLAYERS_PER_TEAM = 20
name2ID = {'Hawks': '1610612737','Celtics': '1610612738','Nets': '1610612751','Hornets': '1610612766','Bulls': '1610612741','Cavaliers': '1610612739','Mavericks': '1610612742','Nuggets': '1610612743','Pistons': '1610612765','Warriors': '1610612744','Rockets': '1610612745','Pacers': '1610612754','Clippers': '1610612746','Lakers': '1610612747','Grizzlies': '1610612763','Heat': '1610612748','Bucks': '1610612749','Timberwolves': '1610612750','Pelicans': '1610612740','Knicks': '1610612752','Thunder': '1610612760','Magic': '1610612753','76ers': '1610612755','Suns': '1610612756','Trail Blazers': '1610612757', 'Trailblazers': '1610612757','Kings': '1610612758','Spurs': '1610612759','Raptors': '1610612761','Jazz': '1610612762','Wizards': '1610612764'}
ID2name = {'1610612737':'Atlanta Hawks','1610612738':'Boston Celtics','1610612751':'Brooklyn Nets','1610612766':'Charlotte Hornets','1610612741':'Chicago Bulls','1610612739':'Cleveland Cavaliers','1610612742':'Dallas Mavericks','1610612743':'Denver Nuggets','1610612765':'Detroit Pistons','1610612744':'Golden State Warriors','1610612745':'Houston Rockets','1610612754':'Indiana Pacers','1610612746':'Los Angeles Clippers','1610612747':'Los Angeles Lakers','1610612763':'Memphis Grizzlies','1610612748':'Miami Heat','1610612749':'Milwaukee Bucks','1610612750':'Minnesota Timberwolves','1610612740':'New Orleans Pelicans','1610612752':'New York Knicks','1610612760':'Oklahoma City Thunder','1610612753':'Orlando Magic','1610612755':'Philadelphia 76ers','1610612756':'Phoenix Suns','1610612757':'Portland Trail Blazers','1610612758':'Sacramento Kings','1610612759':'San Antonio Spurs','1610612761':'Toronto Raptors','1610612762':'Utah Jazz','1610612764':'Washington Wizards'}
Mon2MM = {'DEC':12, 'NOV':11, 'OCT': 10, 'JAN':1, 'FEB':2, 'MAR':3, 'APR':4, 'MAY':5, 'JUN':6, 'JUL':7, 'AUG':8, 'SEP':9}
ESPN2ID = {'Atlanta': '1610612737','Boston': '1610612738','Brooklyn': '1610612751','Charlotte': '1610612766','Chicago': '1610612741','Cleveland': '1610612739','Dallas': '1610612742','Denver': '1610612743','Detroit': '1610612765','Golden State': '1610612744','Houston': '1610612745','Indiana': '1610612754','LA': '1610612746','Los Angeles': '1610612747','Memphis': '1610612763','Miami': '1610612748','Milwaukee': '1610612749','Minnesota': '1610612750','New Orleans': '1610612740','New York': '1610612752','Oklahoma City': '1610612760','Orlando': '1610612753','Philadelphia': '1610612755','Phoenix': '1610612756','Portland': '1610612757','Sacramento': '1610612758','San Antonio': '1610612759','Toronto': '1610612761','Utah': '1610612762','Washington': '1610612764'}
#################################################################################################
## set delay between url requests #####
#################################################################################################
def delayPing():
delays = [1.5]
delay = np.random.choice(delays)
time.sleep(delay)
#################################################################################################
## pulls current injuries from CBS #####
#################################################################################################
def createInjuredList():
answer = []
url = 'https://www.cbssports.com/nba/injuries/'
response = requests.get(url)
html_soup = BeautifulSoup(response.text, 'html.parser')
type(html_soup)
player_containers = html_soup.find_all('span', class_ = 'CellPlayerName--long')
for container in player_containers:
answer.append(container.a.text)
# print(answer)
return answer
#################################################################################################
## pulls tomorrow's games from ESPN and returns [homeTeam, visitingTeam, dateTime] #####
#################################################################################################
def createTodayGames():
answer = []
url = 'https://www.espn.com/nba/schedule'
response = requests.get(url)
html_soup = BeautifulSoup(response.text, 'html.parser')
# print(html_soup)
upcomingDays = html_soup.find_all('table', class_ = 'schedule has-team-logos align-left')
times = []
for upcomingDay in upcomingDays:
# print(upcomingDay)
curTimes = upcomingDay.find_all('td', attrs = {'data-behavior':'date_time'})
if len(curTimes) > 0:
times = curTimes
# print("found next day")
break
# while len(times) == 0:
# print("getting next day")
# upcomingDay = html_soup.find('table', class_ = 'schedule has-team-logos align-left')
# times = upcomingDay.find_all('td', attrs = {'data-behavior':'date_time'})
teams = upcomingDay.find_all('a', class_ = 'team-name')
allTeams = []
allTimes = []
for team in teams:
curTeam = team.span.text
allTeams.append(ESPN2ID[curTeam])
for time in times:
gameTime = time['data-date']
allTimes.append(gameTime)
for i in range(len(allTimes)):
currentRow = []
currentRow.append(allTeams[(2*i)+1]) # home
currentRow.append(allTeams[2*i]) # visitor
currentRow.append(allTimes[i]) # time
answer.append(currentRow)
# print(allTeams)
return answer
#################################################################################################
## returns of all active players this year
#################################################################################################
def createPlayerDict():
rawData = commonallplayers.CommonAllPlayers(is_only_current_season=1)
# rawData = commonplayerinfo.CommonPlayerInfo()
data = rawData.nba_response.get_dict()
data = data["resultSets"]
data = data[0]["rowSet"]
playerDict = {}
for row in data:
playerID = row[0]
playerFLname = row[2]
teamID = row[7]
teamName = row[9]
playerDict[playerID] = [teamID,teamName, playerID, playerFLname]
return playerDict
#################################################################################################
## takes dict of all active players and returns a dict of players by team
#################################################################################################
def createTeamPlayerDict(playerDict):
teamPlayerDict = {}
for teamName in name2ID:
teamID = name2ID[teamName]
teamPlayers = []
for playerID in playerDict:
curTeamID = str(playerDict[playerID][0])
if teamID == curTeamID:
curTeamName = playerDict[playerID][1]
curPlayerID = str(playerDict[playerID][2])
curPlayerName = playerDict[playerID][3]
teamPlayers.append([curTeamID, curTeamName, curPlayerID, curPlayerName])
teamPlayerDict[teamID] = teamPlayers
return teamPlayerDict
#################################################################################################
## grabs the relevant teams' players #####
#################################################################################################
def findRelevantTeams(team1ID, team2ID, teamPlayerDict):
team1PlayerList = teamPlayerDict[team1ID]
team2PlayerList = teamPlayerDict[team2ID]
return team1PlayerList, team2PlayerList
#################################################################################################
## filters out players that are injured in order to avoid their stats. returns dict of playerID:teamID #####
#################################################################################################
def idEligblePlayers(team1PlayerList, team2PlayerList):
answer = {}
team1ID = team1PlayerList[0][0]
team2ID = team2PlayerList[0][0]
for i in range(len(team1PlayerList)):
answer[team1PlayerList[i][2]] = team1ID
for i in range(len(team2PlayerList)):
answer[team2PlayerList[i][2]] = team2ID
return answer
#########################################################################################################
#################### finds the game date for later use ##################################################
#########################################################################################################
def getGameDate(team1Gamelog, gameID):
notfound = []
for i in range(len(team1Gamelog)):
if team1Gamelog[i][1] == gameID:
return team1Gamelog[i][2]
# print("wrong season")
return notfound
#########################################################################################################
#####################takes list of all players, looks up basic stats, appends teamID, returns list
#########################################################################################################
def overallBasePlayerData(players):#, proxies):
answer = {}
for playerID in players:
# headers = randUserAgent()
# proxy = randProxy(proxies)
try:
rawPlayerData = playerdashboardbygeneralsplits.PlayerDashboardByGeneralSplits(player_id= playerID, measure_type_detailed= "Base", per_mode_detailed="PerGame")#, season_type_playoffs= "Pre Season")
playerData = rawPlayerData.overall_player_dashboard.get_dict()
# print(playerData)
answer[playerID] = playerData['data'][0][6:27]
teamID = players[playerID]
answer[playerID].append(teamID)
except:
print("no data for " + str(playerID))
# print(answer[playerID])
# if len(answer) > 0:
# break
delayPing()
return answer
#########################################################################################################
#####################takes list of all players, looks up advanced stats, returns dict of player:[stats]
#########################################################################################################
def overallAdvPlayerData(playerIDs):#, proxies):
answer = {}
for playerID in playerIDs:
rawPlayerData = playerdashboardbygeneralsplits.PlayerDashboardByGeneralSplits(player_id= playerID, measure_type_detailed= "Advanced", per_mode_detailed="PerGame")#, season_type_playoffs= "Pre Season")
playerData = rawPlayerData.overall_player_dashboard.get_dict()
answer[playerID] = playerData['data'][0][6:39]
# print(answer[playerID])
delayPing()
return answer
#########################################################################################################
######takes list of relevant playerIDs (team1 are first 6, team2 are second 6), tries to lookup stats in games 0-10,
###### and games 10-20, then returns dicts of playerID: [past10] and playerID: [past10-20] entries. If the player hasn't been in 20 games,
###### uses overall stats instead
#########################################################################################################
def past10gamesPlayerData(playerIDs, BasePlayerData):
answerCur10 = {}
answerPast10 = {}
for playerID in playerIDs:
rawPlayerData = playerdashboardbylastngames.PlayerDashboardByLastNGames(player_id= playerID, measure_type_detailed= "Base", per_mode_detailed="PerGame")#, season_type_playoffs= "Pre Season")#
playerData = rawPlayerData.data_sets[5].get_dict()
answerCur10[playerID] = playerData['data'][-1][6:27]
try:
answerPast10[playerID] = playerData['data'][-2][6:27]
except:
answerPast10[playerID] = BasePlayerData[playerID][:-1]
print("overall stats instead of mom")
delayPing()
return answerCur10, answerPast10
#########################################################################################################
######takes list of relevant playerIDs (team1 are first 8, team2 are second 8), looks up basic stats for
###### performance in close games, then returns dict of playerID: [clutchBasicData].
######
#########################################################################################################
def clutchPlayerData(playerIDs, BasePlayerData):
answer3Min = {}
answer30Sec = {}
for playerID in playerIDs:
rawPlayerData = playerdashboardbyclutch.PlayerDashboardByClutch(player_id= playerID, measure_type_detailed= "Base")#, season_type_playoffs= "Pre Season")
playerData3Min = rawPlayerData.data_sets[2].get_dict() #Last3Min5PointPlayerDashboard
try:
answer3Min[playerID] = playerData3Min['data'][0][6:27]
except:
answer3Min[playerID] = BasePlayerData[playerID][:-1]
print("overall stats instead of clutch")
delayPing()
return answer3Min
#########################################################################################################
######takes gameID and both team IDs, and returns result(homeWin?) and answer (a dict of teamID:[score,home?])
###### CAN CAUSE SERIOUS ISSUES AND NEEDS TO BE REWORKED IN NEXT VERSION
#########################################################################################################
def teamData(gameID, team1ID, team2ID):
answer = {}
result = 1
rawBoxScore = boxscoresummaryv2.BoxScoreSummaryV2(game_id=gameID)
delayPing()
boxScore = rawBoxScore.data_sets[5].get_dict()
teamVID = boxScore['data'][0][3]
teamHID = boxScore['data'][1][3]
# print(boxScore['data'])
teamVScore = boxScore['data'][0][-1]
# print(ID2name[str(teamVID)] + " are V and their score was " + str(teamVScore))
teamHScore = boxScore['data'][1][-1]
# print(ID2name[str(teamHID)] + " are H and their score was " + str(teamHScore))
if teamVScore > teamHScore:
result = 0
if teamVID == team1ID:
answer[team1ID] = [teamVScore,0]
answer[team2ID] = [teamHScore,1]
elif teamVID == team2ID:
answer[team1ID] = [teamHScore,1]
answer[team2ID] = [teamVScore,0]
else:
print("teamID issue")
# delay = np.random.choice(delays)
# time.sleep(delay)
return result, answer
#########################################################################################################
#####################take all players and return list of top 20 per team by minutes #####################
#########################################################################################################
def sortPlayersByMin(players, players_per_team= PLAYERS_PER_TEAM):
answer = {}
maxMin = 100000
for player in players:
currentMax = 0
currentMaxID = 0
for player in players:
if players[player][0] > currentMax and players[player][0] < maxMin:
currentMax = players[player][0]
currentMaxID = player
answer[currentMaxID] = players[currentMaxID]
maxMin = currentMax
if len(answer) >= PLAYERS_PER_TEAM:
break
return answer
#########################################################################################################
#####################take all players and return array of relevent player IDs ([playerID1-playerID6]),
#####################return sortedOverallStats (playerID: [stats] for both teams
#########################################################################################################
def filterBenchPlayers(allOverallPlayerData, team1ID, team2ID, players_per_team= PLAYERS_PER_TEAM):
allTeam1Players = {}
allTeam2Players = {}
sortedPlayerStats = {}
for playerID in allOverallPlayerData:
if allOverallPlayerData[playerID][-1] == team1ID:
allTeam1Players[playerID] = allOverallPlayerData[playerID]
elif allOverallPlayerData[playerID][-1] == team2ID:
allTeam2Players[playerID] = allOverallPlayerData[playerID]
else:
print("got a bad teamID")
sortedTeam1Players = sortPlayersByMin(allTeam1Players)
sortedTeam2Players = sortPlayersByMin(allTeam2Players)
playerIDs = []
for playerID in sortedTeam1Players:
playerIDs.append(playerID)
sortedPlayerStats[playerID] = sortedTeam1Players[playerID]
for playerID in sortedTeam2Players:
playerIDs.append(playerID)
sortedPlayerStats[playerID] = sortedTeam2Players[playerID]
return playerIDs, sortedPlayerStats
#########################################################################################################
#####################takes NBA's date format and converts it for google trends
#####################
#########################################################################################################
def gameDate2Timeframe(gameDate, Mon2MM= Mon2MM):
answer = ''
year = int(gameDate[-4:])
day = int(gameDate[-8:-6])
mon = int(Mon2MM[gameDate[0:3]])
# answer = year + '-' + mon + '-'+ day
# return answer
return year, mon, day
#########################################################################################################
#####################take all players and return array of relevent player IDs (top 8 per team),###########
#####################return sortedOverallStats for Teams 1 and 2
##################### NOT CURRENTLY IN USE. NEED TO RUN SEPERATE PROGRAM AND APPEND TO OVERALL DATASET IN NEXT VERSION
#########################################################################################################
def getTrends(teamID, ID2name= ID2name):
answer = {}
yearE, monE, dayE = gameDate2Timeframe(gameDate)
endDate = date(yearE, monE, dayE)
startDate = endDate - timedelta(days= 11)
yearS = startDate.year
monS = startDate.month
dayS = startDate.day
teamName = ID2name[str(teamID)]
pytrend = TrendReq()
pytrend.build_payload(kw_list=[teamName])
historicalInterest = pytrend.get_historical_interest(keywords=[teamName], year_start= yearS, month_start = monS, day_start = dayS, year_end= yearE, month_end= monE, day_end= dayE, sleep= 60)
delayPing()
historicalData = historicalInterest[teamName].values.tolist()
historicalData = historicalData[-240:]
answer[teamID] = historicalData
del pytrend
# interest_over_time_df = pytrend.interest_over_time(year_start= year - 1, month_start = mon, day_start = day, year_end= year, month_end= mon, day_end= day)
# print(interest_over_time_df.head())
# related_queries_dict = pytrend.related_queries()
# print(related_queries_dict)
# suggestions_dict = pytrend.suggestions(keyword= teamName)
# print(suggestions_dict)
return answer
#########################################################################################################
##################### METHOD THAT PRODUCES THE DATA FOR EACH GAME
##################### IT DOES NOT ADD THE MARKET'S PROBS, WHICH IS HANDLED BY combineDataAndOdds.py
#########################################################################################################
def makeGameData(homeTeamID,vistTeamID,hImpliedProb, vImpliedProb, teamPlayerDict):#(gameID, season):
answer = []
### find box score to filter out non-participants ###
# curBoxScore = boxscoreadvancedv2.BoxScoreAdvancedV2(game_id=gameID)#, headers= headers)#, proxy= randomProxy)
# delayPing()
# boxscorePlayers = curBoxScore.player_stats.get_dict()
# boxscoreTeams = curBoxScore.team_stats.get_dict()
### save team IDs for future use ###
team1ID = homeTeamID
team2ID = vistTeamID
team1PlayerList, team2PlayerList = findRelevantTeams(team1ID, team2ID, teamPlayerDict)
### get game date for future use ###
# curGamelog = teamgamelog.TeamGameLog(team_id= team1ID, season= season[0:4])#, headers= headers)#, proxy= randomProxy)
# delayPing()
# curGamelog = curGamelog.nba_response.get_dict()['resultSets'][0]['rowSet']
# GameDate = getGameDate(curGamelog, gameID)
### get game result in form of home team W and dict with each team's pts ###
#### returns 1 for result if h wins, 0 if v wins. returns dict of team 1 with array of [score, boolean was home]
# gameResult, teamHV = teamData(gameID, team1ID, team2ID)
teamHV = {team1ID:[100,1],team2ID:[100,0]}
### continuing to filter eligible players ###
eligiblePlayers = idEligblePlayers(team1PlayerList, team2PlayerList)
### get basic data on eligible players ###
curBasePlayerData = overallBasePlayerData(eligiblePlayers)
### get google trends data per hour for past 10 days. 100 is max value. putting this here to break up NBA Calls
# team1Trends = getTrends(GameDate, team1ID)
### filter out bench players ###
playerIDs, BasePlayerData = filterBenchPlayers(curBasePlayerData, team1ID, team2ID)
### create arrays with each team's players for future use
team1PlayerIDs = playerIDs[:PLAYERS_PER_TEAM]
team2PlayerIDs = playerIDs[PLAYERS_PER_TEAM:]
### get player performance in current set of 10 games and past set. this will be weird due to
### current 10 being based on a variable amount of games, but is set to per game stats
current10GameData, past10GameData = past10gamesPlayerData(playerIDs, BasePlayerData)
### get player clutch data
player3MinData = clutchPlayerData(playerIDs, BasePlayerData)
### get overall advanced statistics on each player
AdvPlayerData = overallAdvPlayerData(playerIDs)
playerData = []
for playerID in team1PlayerIDs:
curPlayer =[]
curPlayer.append(team1ID)
base = curBasePlayerData[playerID]
for b in range(len(base)-1):
curPlayer.append(base[b])
cur10 = current10GameData[playerID]
for c1 in range(len(cur10)):
curPlayer.append(cur10[c1])
past10 = past10GameData[playerID]
for p1 in range(len(past10)):
curPlayer.append(past10[p1])
clutch = player3MinData[playerID]
for cl in range(len(clutch)):
curPlayer.append(clutch[cl])
adv = AdvPlayerData[playerID]
for a in range(len(adv)):
curPlayer.append(adv[a])
playerData.append(curPlayer)
for playerID in team2PlayerIDs:
curPlayer = []
curPlayer.append(team2ID)
base = curBasePlayerData[playerID]
for b in range(len(base)-1):
curPlayer.append(base[b])
cur10 = current10GameData[playerID]
for c1 in range(len(cur10)):
curPlayer.append(cur10[c1])
past10 = past10GameData[playerID]
for p1 in range(len(past10)):
curPlayer.append(past10[p1])
clutch = player3MinData[playerID]
for cl in range(len(clutch)):
curPlayer.append(clutch[cl])
adv = AdvPlayerData[playerID]
for a in range(len(adv)):
curPlayer.append(adv[a])
playerData.append(curPlayer)
return playerData
date = datetime.today() - timedelta(hours=14, minutes=00)
outTrainFile = "tomorrowPlayerData.csv"
oTrainFile = open(outTrainFile, "w")
writerTrain = csv.writer(oTrainFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
# rawBoxScore = boxscoresummaryv2.BoxScoreSummaryV2(game_id='0021900165')
# createInjuredList()
playerDict = createPlayerDict()
teamPlayerDict = createTeamPlayerDict(playerDict)
# print(teamPlayerDict)
count = 0
todayGames = createTodayGames()
# print(todayGames)
todayMoneyLines = findCurrentMoneyLine()
fixedMoneyLines = []
for moneyLine in todayMoneyLines:
curRow = []
curRow.append(moneyLine[0]) #teamID
curRow.append(moneylineToImpliedProbability(moneyLine[1]))
fixedMoneyLines.append(curRow)
# print(fixedMoneyLines)
for row in todayGames:
if row[0] == 'Home Team':
count +=1
continue
homeTeamID = row[0]
visitTeamID = row[1]
# homeMoneyLine = float(row[2])
# visitMoneyLine = float(row[3])
# hImpliedProb = moneylineToImpliedProbability(homeMoneyLine)
# vImpliedProb = moneylineToImpliedProbability(visitMoneyLine)
hImpliedProb = 0.5
vImpliedProb = 0.5
for ml in fixedMoneyLines:
if homeTeamID == ml[0]:
hImpliedProb = ml[1]
elif visitTeamID == ml[0]:
vImpliedProb = ml[1]
curGameData = []
skippedGame = ''
playerData = makeGameData(homeTeamID, visitTeamID, hImpliedProb, vImpliedProb, teamPlayerDict)
# curGameData.append(homeTeamID) ### only to keep consistent with training data which had gameID first
for d in playerData:
writerTrain.writerow(d)
print(" team - "+ ID2name[d[0]])
oTrainFile.close()
|
# coding: utf-8
"""
SendinBlue API
SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | # noqa: E501
OpenAPI spec version: 3.0.0
Contact: contact@sendinblue.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DeleteHardbounces(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'start_date': 'date',
'end_date': 'date',
'contact_email': 'str'
}
attribute_map = {
'start_date': 'startDate',
'end_date': 'endDate',
'contact_email': 'contactEmail'
}
def __init__(self, start_date=None, end_date=None, contact_email=None): # noqa: E501
"""DeleteHardbounces - a model defined in Swagger""" # noqa: E501
self._start_date = None
self._end_date = None
self._contact_email = None
self.discriminator = None
if start_date is not None:
self.start_date = start_date
if end_date is not None:
self.end_date = end_date
if contact_email is not None:
self.contact_email = contact_email
@property
def start_date(self):
"""Gets the start_date of this DeleteHardbounces. # noqa: E501
Starting date (YYYY-MM-DD) of the time period for deletion. The hardbounces occurred after this date will be deleted. Must be less than or equal to the endDate # noqa: E501
:return: The start_date of this DeleteHardbounces. # noqa: E501
:rtype: date
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this DeleteHardbounces.
Starting date (YYYY-MM-DD) of the time period for deletion. The hardbounces occurred after this date will be deleted. Must be less than or equal to the endDate # noqa: E501
:param start_date: The start_date of this DeleteHardbounces. # noqa: E501
:type: date
"""
self._start_date = start_date
@property
def end_date(self):
"""Gets the end_date of this DeleteHardbounces. # noqa: E501
Ending date (YYYY-MM-DD) of the time period for deletion. The hardbounces until this date will be deleted. Must be greater than or equal to the startDate # noqa: E501
:return: The end_date of this DeleteHardbounces. # noqa: E501
:rtype: date
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this DeleteHardbounces.
Ending date (YYYY-MM-DD) of the time period for deletion. The hardbounces until this date will be deleted. Must be greater than or equal to the startDate # noqa: E501
:param end_date: The end_date of this DeleteHardbounces. # noqa: E501
:type: date
"""
self._end_date = end_date
@property
def contact_email(self):
"""Gets the contact_email of this DeleteHardbounces. # noqa: E501
Target a specific email address # noqa: E501
:return: The contact_email of this DeleteHardbounces. # noqa: E501
:rtype: str
"""
return self._contact_email
@contact_email.setter
def contact_email(self, contact_email):
"""Sets the contact_email of this DeleteHardbounces.
Target a specific email address # noqa: E501
:param contact_email: The contact_email of this DeleteHardbounces. # noqa: E501
:type: str
"""
self._contact_email = contact_email
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteHardbounces):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
productions = {
(52, 1): [1, 25, 47, 53, 49],
(53, 2): [54, 57, 59, 62, 64],
(53, 3): [54, 57, 59, 62, 64],
(53, 4): [54, 57, 59, 62, 64],
(53, 5): [54, 57, 59, 62, 64],
(53, 6): [54, 57, 59, 62, 64],
(54, 2): [2, 55, 47],
(54, 3): [0],
(54, 4): [0],
(54, 5): [0],
(54, 6): [0],
(55, 25): [25, 56],
(56, 39): [0],
(56, 46): [46, 25, 56],
(56, 47): [0],
(57, 3): [3, 25, 40, 26, 47, 58],
(57, 4): [0],
(57, 5): [0],
(57, 6): [0],
(58, 4): [0],
(58, 5): [0],
(58, 6): [0],
(58, 25): [25, 40, 26, 47, 58],
(59, 4): [4, 55, 39, 61, 47, 60],
(59, 5): [0],
(59, 6): [0],
(60, 5): [0],
(60, 6): [0],
(60, 25): [55, 39, 61, 47, 60],
(61, 8): [8],
(61, 9): [9, 34, 26, 50, 26, 35, 10, 8],
(62, 5): [5, 25, 63, 47, 53, 47, 62],
(62, 6): [0],
(63, 36): [36, 55, 39, 8, 37],
(63, 39): [0],
(64, 6): [6, 66, 65, 7],
(65, 7): [0],
(65, 47): [47, 66, 65],
(66, 6): [64],
(66, 7): [0],
(66, 11): [11, 25, 69],
(66, 12): [12, 25],
(66, 13): [13, 77, 14, 66, 71],
(66, 15): [0],
(66, 16): [16, 77, 16, 66],
(66, 18): [18, 66, 19, 77],
(66, 19): [0],
(66, 20): [20, 36, 72, 74, 37],
(66, 21): [21, 36, 75, 76, 37],
(66, 25): [25, 67],
(66, 27): [27, 25, 38, 77, 28, 77, 16, 66],
(66, 29): [29, 77, 10, 84, 7],
(66, 47): [0],
(67, 34): [68, 38, 77],
(67, 38): [68, 38, 77],
(67, 39): [39, 66],
(68, 34): [34, 77, 35],
(68, 38): [0],
(69, 7): [0],
(69, 15): [0],
(69, 19): [0],
(69, 36): [36, 77, 70, 37],
(69, 47): [0],
(70, 37): [0],
(70, 46): [46, 77, 70],
(71, 7): [0],
(71, 15): [15, 66],
(71, 19): [0],
(71, 47): [0],
(72, 25): [25, 73],
(73, 7): [0],
(73, 10): [0],
(73, 14): [0],
(73, 15): [0],
(73, 17): [0],
(73, 19): [0],
(73, 22): [0],
(73, 23): [0],
(73, 28): [0],
(73, 30): [0],
(73, 31): [0],
(73, 32): [0],
(73, 33): [0],
(73, 34): [34, 77, 35],
(73, 35): [0],
(73, 37): [0],
(73, 40): [0],
(73, 41): [0],
(73, 42): [0],
(73, 43): [0],
(73, 44): [0],
(73, 45): [0],
(73, 46): [0],
(73, 47): [0],
(74, 37): [0],
(74, 46): [46, 72, 74],
(75, 24): [77],
(75, 25): [77],
(75, 26): [77],
(75, 30): [77],
(75, 31): [77],
(75, 36): [77],
(75, 48): [48],
(76, 37): [0],
(76, 46): [46, 75, 76],
(77, 24): [79, 78],
(77, 25): [79, 78],
(77, 26): [79, 78],
(77, 30): [79, 78],
(77, 31): [79, 78],
(77, 36): [79, 78],
(78, 7): [0],
(78, 10): [0],
(78, 14): [0],
(78, 15): [0],
(78, 17): [0],
(78, 19): [0],
(78, 28): [0],
(78, 35): [0],
(78, 37): [0],
(78, 40): [40, 79],
(78, 41): [41, 79],
(78, 42): [42, 79],
(78, 43): [43, 79],
(78, 44): [44, 79],
(78, 45): [45, 79],
(78, 46): [0],
(78, 47): [0],
(79, 24): [81, 80],
(79, 25): [81, 80],
(79, 26): [81, 80],
(79, 30): [30, 81, 80],
(79, 31): [31, 81, 80],
(79, 36): [81, 80],
(80, 7): [0],
(80, 10): [0],
(80, 14): [0],
(80, 15): [0],
(80, 17): [0],
(80, 19): [0],
(80, 22): [22, 81, 80],
(80, 28): [0],
(80, 30): [30, 81, 80],
(80, 31): [31, 81, 80],
(80, 35): [0],
(80, 37): [0],
(80, 40): [0],
(80, 41): [0],
(80, 42): [0],
(80, 43): [0],
(80, 44): [0],
(80, 45): [0],
(80, 46): [0],
(80, 47): [0],
(81, 24): [83, 82],
(81, 25): [83, 82],
(81, 26): [83, 82],
(81, 36): [83, 82],
(82, 7): [0],
(82, 10): [0],
(82, 14): [0],
(82, 15): [0],
(82, 17): [0],
(82, 19): [0],
(82, 22): [0],
(82, 23): [23, 83, 82],
(82, 28): [0],
(82, 30): [0],
(82, 31): [0],
(82, 32): [32, 83, 82],
(82, 33): [33, 83, 82],
(82, 35): [0],
(82, 37): [0],
(82, 40): [0],
(82, 41): [0],
(82, 42): [0],
(82, 43): [0],
(82, 44): [0],
(82, 45): [0],
(82, 46): [0],
(82, 47): [0],
(83, 24): [24, 83],
(83, 25): [72],
(83, 26): [26],
(83, 36): [36, 77, 37],
(84, 26): [26, 86, 39, 66, 85],
(85, 7): [0],
(85, 47): [47, 84],
(86, 39): [0],
(86, 46): [46, 26, 86]
}
|
from ThesisAnalysis import get_data, ThesisHDF5Writer
import numpy as np
import pandas as pd
from CHECLabPy.core.io import DL1Reader
def main():
input_file = "/Volumes/gct-jason/thesis_data/checs/mc/dynrange/2_no_noise/Run43489_dl1.h5"
reader = DL1Reader(input_file)
mapping = reader.mapping
pixel, true = reader.select_columns(['pixel', 'mc_true'])
xpix = mapping['xpix'].values
ypix = mapping['ypix'].values
dist = np.sqrt(xpix ** 2 + ypix ** 2)
n_pixels = mapping.metadata['n_pixels']
n_events = reader.n_events
true_p = true.values.reshape((n_events, 2048)).mean(0)
df = pd.DataFrame(dict(
pixel=np.arange(n_pixels),
distance=dist,
true=true_p,
))
with ThesisHDF5Writer(get_data("mc_illumination_profile.h5")) as writer:
writer.write(data=df)
writer.write_mapping(mapping)
writer.write_metadata(
n_events=n_events,
n_pixels=n_pixels,
)
if __name__ == '__main__':
main()
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
from modeling.deformable_conv.deform_conv_v3 import *
from torch.nn import BatchNorm2d as bn
class _DenseAsppBlock(nn.Module):
""" ConvNet block for building DenseASPP. """
def __init__(self, input_num, num1, num2, dilation_rate, drop_out, bn_start=True,modulation=True,adaptive_d=True):
super(_DenseAsppBlock, self).__init__()
self.modulation = modulation
self.adaptive_d = adaptive_d
self.bn_start = bn_start
self.bn1 = bn(input_num, momentum=0.0003)
self.relu1 = nn.ReLU(inplace = True)
self.conv_1 = nn.Conv2d(in_channels=input_num, out_channels=num1, kernel_size=1)
self.bn2 = bn(num1, momentum=0.0003)
self.relu2 = nn.ReLU(inplace = True)
self.deform_conv = DeformConv2d(num1,num2,3,padding=1,dilation=dilation_rate,modulation=self.modulation,adaptive_d=self.adaptive_d)
#self.offset = ConvOffset2D(num1)
#self.conv_2 = nn.Conv2d(in_channels=num1, out_channels=num2, kernel_size=3,padding=1)
#self.conv_3 =nn.Conv2d(in_channels=num2, out_channels=num2, kernel_size=3,dilation=dilation_rate,
# padding=dilation_rate)
def forward(self,input):
if self.bn_start == True:
input = self.bn1(input)
feature = self.relu1(input)
feature = self.conv_1(feature)
feature = self.bn2(feature)
#feature1 =self.offset(feature)
#feature1 = self.conv_2(feature1)
feature1 = self.deform_conv(feature)
#feature1 = self.conv_3(feature1)
#feature2 = self.conv_3(feature)
#feature3 = feature1 + feature2
return feature1
class DenseASPP(nn.Module):
def __init__(self,num_features,d_feature0,d_feature1,dropout0,modulation=True,adaptive_d=True):
super(DenseASPP,self).__init__()
self.num_features = num_features
self.d_feature0 = d_feature0
self.d_feature1 = d_feature1
self.init_feature = 2048 - 5*d_feature1
self.dropout0 = dropout0
self.adaptive_all = adaptive_d
self.modulation_all = modulation
self.init_conv_aspp = nn.Conv2d(self.num_features,self.init_feature,kernel_size=(3,3),padding=1)
self.num_features = self.init_feature
self.ASPP_3 = _DenseAsppBlock(input_num=self.num_features, num1=self.d_feature0, num2=self.d_feature1,
dilation_rate=3, drop_out=self.dropout0, bn_start=False,modulation= self.modulation_all,adaptive_d=self.adaptive_all)
self.ASPP_6 = _DenseAsppBlock(input_num=self.num_features + self.d_feature1 * 1, num1=self.d_feature0, num2=self.d_feature1,
dilation_rate=6, drop_out=self.dropout0, bn_start=True,modulation= self.modulation_all,adaptive_d=self.adaptive_all)
self.ASPP_12 = _DenseAsppBlock(input_num=self.num_features + self.d_feature1 * 2, num1=self.d_feature0, num2=self.d_feature1,
dilation_rate=12, drop_out=self.dropout0, bn_start=True,modulation= self.modulation_all,adaptive_d=self.adaptive_all)
self.ASPP_18 = _DenseAsppBlock(input_num=self.num_features + self.d_feature1 * 3, num1=self.d_feature0, num2=self.d_feature1,
dilation_rate=18, drop_out=self.dropout0, bn_start=True,modulation= self.modulation_all,adaptive_d=self.adaptive_all)
self.ASPP_24 = _DenseAsppBlock(input_num=self.num_features + self.d_feature1 * 4, num1=self.d_feature0, num2=self.d_feature1,
dilation_rate=24, drop_out=self.dropout0, bn_start=True,modulation= self.modulation_all,adaptive_d=self.adaptive_all)
def forward(self,feature):
feature = self.init_conv_aspp(feature)
aspp3 = self.ASPP_3(feature)
feature = torch.cat((aspp3, feature), dim=1)
aspp6 = self.ASPP_6(feature)
feature = torch.cat((aspp6, feature), dim=1)
aspp12 = self.ASPP_12(feature)
feature = torch.cat((aspp12, feature), dim=1)
aspp18 = self.ASPP_18(feature)
feature = torch.cat((aspp18, feature), dim=1)
aspp24 = self.ASPP_24(feature)
feature = torch.cat((aspp24, feature), dim=1)
return feature
def build_densedspp_v3(modulation=True,adaptive_d=True):
return DenseASPP(2048,512,128,.1,modulation = modulation,adaptive_d=adaptive_d)
|
"""
ml/misc/grid.py
"""
import copy
class Grid:
'''
Grid is an N x M grid of "alive" or "dead" grid cells.
A transformation on the input grid using the following rules:
- An "alive" cell remains alive if 2 or 3 neighbors are "alive";
otherwise, it becomes "dead."
- A "dead" cell becomes alive if exactly 3 neighbors are "alive";
otherwise, it remains "dead."
- The term "neighbor" refers to the at-most-8 adjacent cells
horizontally, vertically, and diagonally.
@example: Suppose x is alive and o is dead
* initial state:
oooooooooo
oooxxooooo
ooooxooooo
oooooooooo
oooooooooo
* next transforming state:
oooooooooo
oooxxooooo
oooxxooooo
oooooooooo
oooooooooo
'''
def __init__(self, grid):
self.grid = copy.deepcopy(grid) # grid matrix is a list of list
self.size_col = len(grid[0])
self.size_row = len(grid)
# print('grid [%d, %d]: %s' % (self.size_col, self.size_row, self.grid))
def _get_colum_sum(self, x, y):
sum = 0
if y >= 0 and y < self.size_col:
for i in [-1, 0, 1]:
dx = x + i
if dx >= 0 and dx < self.size_row:
sum += self.grid[dx][y]
return sum
def _get_next_state(self, current_state, count):
new_state = current_state
if current_state == 1:
if not (count >= 2 and count <= 3):
new_state = 0
elif count == 3:
new_state = 1
return new_state
# solution 2
def get_next_grid(self):
"""
Return the next grid (with less computation).
"""
next = copy.deepcopy(self.grid)
for x in range(self.size_row):
lv, mv, rv = 0, 0, self._get_colum_sum(x, 0)
for y in range(self.size_col):
cv = self.grid[x][y]
lv, mv, rv = mv, rv, self._get_colum_sum(x, y+1)
count = lv + mv + rv - cv
next[x][y] = self._get_next_state(cv, count)
return next
def _get_next_cell_state(self, row, col):
"""
Return next state (0 or 1) for a cell.
"""
current_state = self.grid[row][col]
count = 0
for i in [-1, 0, 1]:
dx = col + i
for j in [-1, 0, 1]:
dy = row + j
neighbor = not (dx == col and dy == row)
ranged = 0 <= dx and dx < self.size_col and 0 <= dy and dy < self.size_row
if ranged and neighbor:
# print("grid[%d,%d], neighbor[%d,%d] = %d" % (row, col, dy, dx, self.grid[dy][dx]))
count += self.grid[dy][dx]
new_state = self._get_next_state(current_state, count)
# print('> current: %d => new: %d, count [%d,%d] = %d' % (current_state, new_state, row, col, count))
return new_state
# solution 1
def get_next_grid_states(self):
"""
Return the next grid by calculating next states of each cell.
"""
next = copy.deepcopy(self.grid)
for i in range(self.size_row):
for j in range(self.size_col):
next[i][j] = self._get_next_cell_state(i, j)
return next
def is_stable(self):
changing = False
nextgrid = self.get_next_grid_states()
for i in range(self.size_row):
for j in range(self.size_col):
if nextgrid[i][j] != self.grid[i][j]:
changing = True
break
return not changing
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 9 08:51:55 2015
@author: ksansom
"""
#!/usr/bin/env python
# A simple script to demonstrate the vtkCutter function
import vtk
#Create a cube
cube=vtk.vtkCubeSource()
cube.SetXLength(40)
cube.SetYLength(30)
cube.SetZLength(20)
cubeMapper=vtk.vtkPolyDataMapper()
cubeMapper.SetInputConnection(cube.GetOutputPort())
#create a plane to cut,here it cuts in the XZ direction (xz normal=(1,0,0);XY =(0,0,1),YZ =(0,1,0)
plane=vtk.vtkPlane()
plane.SetOrigin(10,0,0)
plane.SetNormal(1,0,0)
#create cutter
cutter=vtk.vtkCutter()
cutter.SetCutFunction(plane)
cutter.SetInputConnection(cube.GetOutputPort())
cutter.Update()
cutterMapper=vtk.vtkPolyDataMapper()
cutterMapper.SetInputConnection( cutter.GetOutputPort())
#create plane actor
planeActor=vtk.vtkActor()
planeActor.GetProperty().SetColor(1.0,1,0)
planeActor.GetProperty().SetLineWidth(2)
planeActor.SetMapper(cutterMapper)
#create cube actor
cubeActor=vtk.vtkActor()
cubeActor.GetProperty().SetColor(0.5,1,0.5)
cubeActor.GetProperty().SetOpacity(0.5)
cubeActor.SetMapper(cubeMapper)
#create renderers and add actors of plane and cube
ren = vtk.vtkRenderer()
ren.AddActor(planeActor)
ren.AddActor(cubeActor)
#Add renderer to renderwindow and render
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(600, 600)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.SetBackground(0,0,0)
renWin.Render()
iren.Start()
|
'''
CLUBB standard variables
zhunguo : guozhun@lasg.iap.ac.cn ; guozhun@uwm.edu
'''
import Ngl
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import pylab
import os
import Common_functions
from subprocess import call
def clubb_std_prf (ptype,cseason, ncases, cases, casenames, nsite, lats, lons, filepath, filepathobs, casedir,varis,cscale,chscale,pname):
# ncases, the number of models
# cases, the name of models
# casename, the name of cases
# filepath, model output filepath
# filepathobs, filepath for observational data
# inptrs = [ncases]
if not os.path.exists(casedir):
os.mkdir(casedir)
_Font = 25
interp = 2
extrap = False
mkres = Ngl.Resources()
mkres.gsMarkerIndex = 2
mkres.gsMarkerColor = "Red"
mkres.gsMarkerSizeF = 15.
infiles = ["" for x in range(ncases)]
ncdfs = ["" for x in range(ncases)]
nregions = nsite
# varisobs = ["CC_ISBL", "OMEGA","SHUM","CLWC_ISBL", "THETA","RELHUM","U","CIWC_ISBL","T" ]
nvaris = len(varis)
# cunits = ["%","mba/day","g/kg","g/kg","K", "%", "m/s", "g/kg", "m/s", "m/s","K","m" ]
# cscaleobs = [100, 1, 1, 1000 , 1., 1, 1, 1000, 1,1,1,1,1,1,1]
# obsdataset =["ERAI", "ERAI", "ERAI", "ERAI", "ERAI", "ERAI", "ERAI", "ERAI","ERAI","ERAI", "ERAI", "ERAI", "ERAI", "ERAI", "ERAI","ERAI","ERAI"]
plotstd=["" for x in range(nsite)]
for ire in range (0, nsite):
if not os.path.exists(casedir+'/'+str(lons[ire])+'E_'+str(lats[ire])+'N'):
os.mkdir(casedir+'/'+str(lons[ire])+'E_'+str(lats[ire])+'N')
plotname = casedir+'/'+str(lons[ire])+'E_'+str(lats[ire])+'N/'+pname+'_'+str(lons[ire])+"E_"+str(lats[ire])+"N_"+cseason
plotstd[ire] = pname+'_'+str(lons[ire])+"E_"+str(lats[ire])+"N_"+cseason
wks= Ngl.open_wks(ptype,plotname)
Ngl.define_colormap(wks,"GMT_paired")
plot = []
res = Ngl.Resources()
res.nglDraw = False
res.nglFrame = False
res.lgLabelFontHeightF = .02 # change font height
res.lgPerimOn = False # no box around
res.vpWidthF = 0.30 # set width and height
res.vpHeightF = 0.30
#res.vpXF = 0.04
# res.vpYF = 0.30
res.tmYLLabelFont = _Font
res.tmXBLabelFont = _Font
res.tmXBLabelFontHeightF = 0.01
res.tmXBLabelFontThicknessF = 2.0
res.xyMarkLineMode = "Lines"
res.xyLineThicknesses = [3.0, 3.0, 3.0, 3.0, 3.0, 3.0,3.,3.,3.,3.,3,3,3,3,3,3,3]
res.xyDashPatterns = np.arange(0,24,1)
# res.xyMarkers = np.arange(16,40,1)
# res.xyMarkerSizeF = 0.005
pres = Ngl.Resources()
pres.nglMaximize = True
pres.nglFrame = False
pres.txFont = _Font
pres.nglPanelYWhiteSpacePercent = 5
pres.nglPanelXWhiteSpacePercent = 5
pres.nglPanelTop = 0.88
pres.wkWidth = 5000
pres.wkHeight = 5000
for iv in range (0, nvaris):
if(iv == nvaris-1):
res.pmLegendDisplayMode = "NEVER"
res.xyExplicitLegendLabels = casenames[:]
res.pmLegendSide = "top"
res.pmLegendParallelPosF = 0.6
res.pmLegendOrthogonalPosF = -0.5
res.pmLegendWidthF = 0.10
res.pmLegendHeightF = 0.10
res.lgLabelFontHeightF = .02
res.lgLabelFontThicknessF = 1.5
res.lgPerimOn = True
else:
res.pmLegendDisplayMode = "NEVER"
# if(obsdataset[iv] =="CCCM"):
# if(cseason == "ANN"):
# fileobs = "/Users/guoz/databank/CLD/CCCm/cccm_cloudfraction_2007-"+cseason+".nc"
# else:
# fileobs = "/Users/guoz/databank/CLD/CCCm/cccm_cloudfraction_2007-2010-"+cseason+".nc"
# inptrobs = Dataset(fileobs,'r')
# B=inptrobs.variables[varisobs[iv]][:,(lats[ire]),(lons[ire])]
# else:
# if (varisobs[iv] =="PRECT"):
# fileobs = filepathobs+'/GPCP_'+cseason+'_climo.nc'
# else:
# fileobs = filepathobs + obsdataset[iv]+'_'+cseason+'_climo.nc'
# inptrobs = Dataset(fileobs,'r')
# if (varisobs[iv] =="THETA"):
# B = inptrobs.variables['T'][0,:,(lats[ire]),(lons[ire])]
# pre1 = inptrobs.variables['lev'][:]
# for il1 in range (0, len(pre1)):
# B[il1] = B[il1]*(1000/pre1[il1])**0.286
# else:
# pre1 = inptrobs.variables['lev'][:]
# B = inptrobs.variables[varisobs[iv]][0,:,(lats[ire]),(lons[ire])]
#
# B[:]=B[:] * cscaleobs[iv]
for im in range (0,ncases):
ncdfs[im] = './data/'+cases[im]+'_site_location.nc'
infiles[im]= filepath[im]+cases[im]+'/'+cases[im]+'_'+cseason+'_climo.nc'
inptrs = Dataset(infiles[im],'r') # pointer to file1
lat=inptrs.variables['lat'][:]
nlat=len(lat)
lon=inptrs.variables['lon'][:]
nlon=len(lon)
lev=inptrs.variables['ilev'][:]
nlev=len(lev)
ncdf= Dataset(ncdfs[im],'r')
n =ncdf.variables['n'][:]
idx_cols=ncdf.variables['idx_cols'][:,:]
ncdf.close()
if (im ==0):
A_field = np.zeros((ncases,nlev),np.float32)
for subc in range( 0, n[ire]):
npoint=idx_cols[ire,n[subc]-1]-1
if (varis[iv] == 'THETA'):
tmp = inptrs.variables['T'][0,:,npoint]
hyam =inptrs.variables['hyam'][:]
hybm =inptrs.variables['hybm'][:]
ps=inptrs.variables['PS'][0,npoint]
ps=ps
p0=inptrs.variables['P0']
pre = np.zeros((nlev),np.float32)
for il in range (0, nlev):
pre[il] = hyam[il]*p0 + hybm[il] * ps
tmp[il] = tmp[il] * (100000/pre[il])**0.286
theunits=str(chscale[iv])+"x"+inptrs.variables['T'].units
else:
tmp=inptrs.variables[varis[iv]][0,:,npoint]
# tmp2=inptrs.variables['C6rt_Skw_fnc'][0,:,npoint]
# tmp3=inptrs.variables['tau_zm'][0,:,npoint]
# tmp4=inptrs.variables['tau_wpxp_zm'][0,:,npoint]
theunits=str(chscale[iv])+'x'+inptrs.variables[varis[iv]].units
if (varis[iv] == 'tau_zm' or varis[iv] == 'tau_wp2_zm' \
or varis[iv] == 'tau_wp3_zm' or varis[iv] == 'tau_xp2_zm' \
or varis[iv] == 'tau_no_N2_zm' or varis[iv] == 'tau_wpxp_zm'):
tmp=1/tmp
tmp [0:10] = 0.0
theunits=str(chscale[iv])+'x'+inptrs.variables[varis[iv]].units+'^-1'
A_field[im,:] = (A_field[im,:]+tmp[:]/n[ire]).astype(np.float32 )
A_field[im,:] = A_field[im,:] *cscale[iv]
inptrs.close()
if (varis[iv] == 'tau_zm' or varis[iv] == 'tau_wp2_zm' \
or varis[iv] == 'tau_wp3_zm' or varis[iv] == 'tau_xp2_zm' \
or varis[iv] == 'tau_no_N2_zm' or varis[iv] == 'tau_wpxp_zm'):
res.tiMainString = "invrs_"+varis[iv]+" "+theunits
else:
res.tiMainString = varis[iv]+" "+theunits
res.trYReverse = True
res.xyLineColors = np.arange(3,20,2)
res.xyMarkerColors = np.arange(2,20,2)
p = Ngl.xy(wks,A_field,lev,res)
# res.trYReverse = False
# res.xyLineColors = ["black"]
# pt = Ngl.xy(wks,B,pre1,res)
# Ngl.overlay(p,pt)
plot.append(p)
pres.txString = "CLUBB VAR at"+ str(lons[ire])+"E,"+str(lats[ire])+"N"
txres = Ngl.Resources()
txres.txFontHeightF = 0.02
txres.txFont = _Font
Ngl.text_ndc(wks,"CLUBB VAR at"+ str(lons[ire])+"E,"+str(lats[ire])+"N",0.5,0.92+ncases*0.01,txres)
Common_functions.create_legend(wks,casenames,np.arange(3,20,2),0.1,0.89+ncases*0.01)
Ngl.panel(wks,plot[:],[nvaris/3,3],pres)
Ngl.frame(wks)
Ngl.destroy(wks)
return plotstd
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
import argparse
import csv
import logging
import os
import random
import sys
from io import open
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers.file_utils import (CONFIG_NAME,
PYTORCH_PRETRAINED_BERT_CACHE,
WEIGHTS_NAME)
from transformers.modeling_bert import BertConfig, BertForMultipleChoice
from transformers.optimization import AdamW, WarmupLinearSchedule
from transformers.tokenization_bert import BertTokenizer
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class SwagExample(object):
"""A single training/test example for the SWAG dataset."""
def __init__(self,
swag_id,
context_sentence,
start_ending,
ending_0,
ending_1,
ending_2,
ending_3,
label=None):
self.swag_id = swag_id
self.context_sentence = context_sentence
self.start_ending = start_ending
self.endings = [
ending_0,
ending_1,
ending_2,
ending_3,
]
self.label = label
def __str__(self):
return self.__repr__()
def __repr__(self):
l = [
"swag_id: {}".format(self.swag_id),
"context_sentence: {}".format(self.context_sentence),
"start_ending: {}".format(self.start_ending),
"ending_0: {}".format(self.endings[0]),
"ending_1: {}".format(self.endings[1]),
"ending_2: {}".format(self.endings[2]),
"ending_3: {}".format(self.endings[3]),
]
if self.label is not None:
l.append("label: {}".format(self.label))
return ", ".join(l)
class InputFeatures(object):
def __init__(self,
example_id,
choices_features,
label
):
self.example_id = example_id
self.choices_features = [
{
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
for _, input_ids, input_mask, segment_ids in choices_features
]
self.label = label
def read_swag_examples(input_file, is_training):
with open(input_file, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
if is_training and lines[0][-1] != 'label':
raise ValueError(
"For training, the input file must contain a label column."
)
examples = [
SwagExample(
swag_id=line[2],
context_sentence=line[4],
start_ending=line[5], # in the swag dataset, the
# common beginning of each
# choice is stored in "sent2".
ending_0=line[7],
ending_1=line[8],
ending_2=line[9],
ending_3=line[10],
label=int(line[11]) if is_training else None
) for line in lines[1:] # we skip the line with the column names
]
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
is_training):
"""Loads a data file into a list of `InputBatch`s."""
# Swag is a multiple choice task. To perform this task using Bert,
# we will use the formatting proposed in "Improving Language
# Understanding by Generative Pre-Training" and suggested by
# @jacobdevlin-google in this issue
# https://github.com/google-research/bert/issues/38.
#
# Each choice will correspond to a sample on which we run the
# inference. For a given Swag example, we will create the 4
# following inputs:
# - [CLS] context [SEP] choice_1 [SEP]
# - [CLS] context [SEP] choice_2 [SEP]
# - [CLS] context [SEP] choice_3 [SEP]
# - [CLS] context [SEP] choice_4 [SEP]
# The model will output a single value for each input. To get the
# final decision of the model, we will run a softmax over these 4
# outputs.
features = []
for example_index, example in enumerate(examples):
context_tokens = tokenizer.tokenize(example.context_sentence)
start_ending_tokens = tokenizer.tokenize(example.start_ending)
choices_features = []
for ending_index, ending in enumerate(example.endings):
# We create a copy of the context tokens in order to be
# able to shrink it according to ending_tokens
context_tokens_choice = context_tokens[:]
ending_tokens = start_ending_tokens + tokenizer.tokenize(ending)
# Modifies `context_tokens_choice` and `ending_tokens` in
# place so that the total length is less than the
# specified length. Account for [CLS], [SEP], [SEP] with
# "- 3"
_truncate_seq_pair(context_tokens_choice,
ending_tokens, max_seq_length - 3)
tokens = ["[CLS]"] + context_tokens_choice + \
["[SEP]"] + ending_tokens + ["[SEP]"]
segment_ids = [0] * (len(context_tokens_choice) + 2) + \
[1] * (len(ending_tokens) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
choices_features.append(
(tokens, input_ids, input_mask, segment_ids))
label = example.label
if example_index < 5:
logger.info("*** Example ***")
logger.info("swag_id: {}".format(example.swag_id))
for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features):
logger.info("choice: {}".format(choice_idx))
logger.info("tokens: {}".format(' '.join(tokens)))
logger.info("input_ids: {}".format(
' '.join(map(str, input_ids))))
logger.info("input_mask: {}".format(
' '.join(map(str, input_mask))))
logger.info("segment_ids: {}".format(
' '.join(map(str, segment_ids))))
if is_training:
logger.info("label: {}".format(label))
features.append(
InputFeatures(
example_id=example.swag_id,
choices_features=choices_features,
label=label
)
)
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def select_field(features, field):
return [
[
choice[field]
for choice in feature.choices_features
]
for feature in features
]
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .csv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
# Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError(
"At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError(
"Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(
args.bert_model, do_lower_case=args.do_lower_case)
# Prepare model
model = BertForMultipleChoice.from_pretrained(args.bert_model,
cache_dir=os.path.join(
str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank)),
num_choices=4)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.do_train:
# Prepare data loader
train_examples = read_swag_examples(os.path.join(
args.data_dir, 'train.csv'), is_training=True)
train_features = convert_examples_to_features(
train_examples, tokenizer, args.max_seq_length, True)
all_input_ids = torch.tensor(select_field(
train_features, 'input_ids'), dtype=torch.long)
all_input_mask = torch.tensor(select_field(
train_features, 'input_mask'), dtype=torch.long)
all_segment_ids = torch.tensor(select_field(
train_features, 'segment_ids'), dtype=torch.long)
all_label = torch.tensor(
[f.label for f in train_features], dtype=torch.long)
train_data = TensorDataset(
all_input_ids, all_input_mask, all_segment_ids, all_label)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(
train_data, sampler=train_sampler, batch_size=args.train_batch_size)
num_train_optimization_steps = len(
train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# Prepare optimizer
param_optimizer = list(model.named_parameters())
# hack to remove pooler, which is not used
# thus it produce None grad that break apex
param_optimizer = [n for n in param_optimizer]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(
nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(
optimizer, static_loss_scale=args.loss_scale)
warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
global_step = 0
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss = model(input_ids, segment_ids, input_mask, label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.fp16 and args.loss_scale != 1.0:
# rescale loss for fp16 training
# see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html
loss = loss * args.loss_scale
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * \
warmup_linear.get_lr(
global_step, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if args.do_train:
# Save a trained model, configuration and tokenizer
model_to_save = model.module if hasattr(
model, 'module') else model # Only save the model it-self
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
model = BertForMultipleChoice.from_pretrained(
args.output_dir, num_choices=4)
tokenizer = BertTokenizer.from_pretrained(
args.output_dir, do_lower_case=args.do_lower_case)
else:
model = BertForMultipleChoice.from_pretrained(
args.bert_model, num_choices=4)
model.to(device)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = read_swag_examples(os.path.join(
args.data_dir, 'val.csv'), is_training=True)
eval_features = convert_examples_to_features(
eval_examples, tokenizer, args.max_seq_length, True)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor(select_field(
eval_features, 'input_ids'), dtype=torch.long)
all_input_mask = torch.tensor(select_field(
eval_features, 'input_mask'), dtype=torch.long)
all_segment_ids = torch.tensor(select_field(
eval_features, 'segment_ids'), dtype=torch.long)
all_label = torch.tensor(
[f.label for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(
all_input_ids, all_input_mask, all_segment_ids, all_label)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(
eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss = model(
input_ids, segment_ids, input_mask, label_ids)
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_eval_accuracy = accuracy(logits, label_ids)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'global_step': global_step,
'loss': tr_loss/global_step}
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
#
# (C) Copyright 2018, Xilinx, Inc.
#
"""MIT License from https://github.com/ysh329/darknet-to-caffe-model-convertor/
Copyright (c) 2015 Preferred Infrastructure, Inc.
Copyright (c) 2015 Preferred Networks, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
from collections import OrderedDict
import numpy
def parse_cfg(cfgfile):
def erase_comment(line):
line = line.split('#')[0]
return line
blocks = []
fp = open(cfgfile, 'r')
block = None
line = fp.readline()
while line != '':
line = line.rstrip()
if line == '' or line[0] == '#':
line = fp.readline()
continue
elif line[0] == '[':
if block:
blocks.append(block)
block = OrderedDict()
block['type'] = line.lstrip('[').rstrip(']')
# set default value
if block['type'] == 'convolutional':
block['batch_normalize'] = 0
else:
line = erase_comment(line)
key,value = line.split('=')
key = key.strip()
if key == 'type':
key = '_type'
value = value.strip()
block[key] = value
line = fp.readline()
if block:
blocks.append(block)
fp.close()
return blocks
def print_cfg(blocks):
for block in blocks:
print('[%s]' % (block['type']))
for key,value in block.items():
if key != 'type':
print('%s=%s' % (key, value))
print('')
def save_cfg(blocks, cfgfile):
with open(cfgfile, 'w') as fp:
for block in blocks:
fp.write('[%s]\n' % (block['type']))
for key,value in block.items():
if key != 'type':
fp.write('%s=%s\n' % (key, value))
fp.write('\n')
def print_cfg_nicely(blocks):
print('layer filters size input output');
prev_width = 416
prev_height = 416
prev_filters = 3
out_filters =[]
out_widths =[]
out_heights =[]
ind = -2
for block in blocks:
ind = ind + 1
if block['type'] == 'net':
prev_width = int(block['width'])
prev_height = int(block['height'])
continue
elif block['type'] == 'convolutional':
filters = int(block['filters'])
kernel_size = int(block['size'])
stride = int(block['stride'])
is_pad = int(block['pad'])
pad = (kernel_size-1)/2 if is_pad else 0
width = (prev_width + 2*pad - kernel_size)/stride + 1
height = (prev_height + 2*pad - kernel_size)/stride + 1
print('%5d %-6s %4d %d x %d / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'conv', filters, kernel_size, kernel_size, stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'maxpool':
pool_size = int(block['size'])
stride = int(block['stride'])
width = prev_width/stride
height = prev_height/stride
print('%5d %-6s %d x %d / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'max', pool_size, pool_size, stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'avgpool':
width = 1
height = 1
print('%5d %-6s %3d x %3d x%4d -> %3d' % (ind, 'avg', prev_width, prev_height, prev_filters, prev_filters))
prev_width = 1
prev_height = 1
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'softmax':
print('%5d %-6s -> %3d' % (ind, 'softmax', prev_filters))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'cost':
print('%5d %-6s -> %3d' % (ind, 'cost', prev_filters))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'reorg':
stride = int(block['stride'])
filters = stride * stride * prev_filters
width = prev_width/stride
height = prev_height/stride
print('%5d %-6s / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'reorg', stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'route':
layers = block['layers'].split(',')
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
if len(layers) == 1:
print('%5d %-6s %d' % (ind, 'route', layers[0]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
prev_filters = out_filters[layers[0]]
elif len(layers) == 2:
print('%5d %-6s %d %d' % (ind, 'route', layers[0], layers[1]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
assert(prev_width == out_widths[layers[1]])
assert(prev_height == out_heights[layers[1]])
prev_filters = out_filters[layers[0]] + out_filters[layers[1]]
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'region':
print('%5d %-6s' % (ind, 'detection'))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'shortcut':
from_id = int(block['from'])
from_id = from_id if from_id > 0 else from_id+ind
print('%5d %-6s %d' % (ind, 'shortcut', from_id))
prev_width = out_widths[from_id]
prev_height = out_heights[from_id]
prev_filters = out_filters[from_id]
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'softmax':
print('%5d %-6s' % (ind, 'softmax'))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'connected':
filters = int(block['output'])
print('%5d %-6s %d -> %3d' % (ind, 'connected', prev_filters, filters))
prev_filters = filters
out_widths.append(1)
out_heights.append(1)
out_filters.append(prev_filters)
else:
print('unknown type %s' % (block['type']))
def load_conv(buf, start, conv_model):
num_w = conv_model.weight.numel()
num_b = conv_model.bias.numel()
conv_model.bias.data.copy_(numpy.asarray(buf[start:start+num_b])); start = start + num_b
conv_model.weight.data.copy_(numpy.asarray(buf[start:start+num_w])); start = start + num_w
return start
def save_conv(fp, conv_model):
if conv_model.bias.is_cuda:
convert2cpu(conv_model.bias.data).numpy().tofile(fp)
convert2cpu(conv_model.weight.data).numpy().tofile(fp)
else:
conv_model.bias.data.numpy().tofile(fp)
conv_model.weight.data.numpy().tofile(fp)
def load_conv_bn(buf, start, conv_model, bn_model):
num_w = conv_model.weight.numel()
num_b = bn_model.bias.numel()
bn_model.bias.data.copy_(numpy.asarray(buf[start:start+num_b])); start = start + num_b
bn_model.weight.data.copy_(numpy.asarray(buf[start:start+num_b])); start = start + num_b
bn_model.running_mean.copy_(numpy.asarray(buf[start:start+num_b])); start = start + num_b
bn_model.running_var.copy_(numpy.asarray(buf[start:start+num_b])); start = start + num_b
conv_model.weight.data.copy_(numpy.asarray(buf[start:start+num_w])); start = start + num_w
return start
def save_conv_bn(fp, conv_model, bn_model):
if bn_model.bias.is_cuda:
convert2cpu(bn_model.bias.data).numpy().tofile(fp)
convert2cpu(bn_model.weight.data).numpy().tofile(fp)
convert2cpu(bn_model.running_mean).numpy().tofile(fp)
convert2cpu(bn_model.running_var).numpy().tofile(fp)
convert2cpu(conv_model.weight.data).numpy().tofile(fp)
else:
bn_model.bias.data.numpy().tofile(fp)
bn_model.weight.data.numpy().tofile(fp)
bn_model.running_mean.numpy().tofile(fp)
bn_model.running_var.numpy().tofile(fp)
conv_model.weight.data.numpy().tofile(fp)
def save_conv_shrink_bn(fp, conv_model, bn_model, eps=1e-5):
if bn_model.bias.is_cuda:
bias = bn_model.bias.data - bn_model.running_mean * bn_model.weight.data / numpy.sqrt(bn_model.running_var + eps)
convert2cpu(bias).numpy().tofile(fp)
s = conv_model.weight.data.size()
weight = conv_model.weight.data * (bn_model.weight.data / numpy.sqrt(bn_model.running_var + eps)).view(-1,1,1,1).repeat(1, s[1], s[2], s[3])
convert2cpu(weight).numpy().tofile(fp)
else:
bias = bn_model.bias.data - bn_model.running_mean * bn_model.weight.data / numpy.sqrt(bn_model.running_var + eps)
bias.numpy().tofile(fp)
s = conv_model.weight.data.size()
weight = conv_model.weight.data * (bn_model.weight.data / numpy.sqrt(bn_model.running_var + eps)).view(-1,1,1,1).repeat(1, s[1], s[2], s[3])
weight.numpy().tofile(fp)
def load_fc(buf, start, fc_model):
num_w = fc_model.weight.numel()
num_b = fc_model.bias.numel()
fc_model.bias.data.copy_(numpy.asarray(buf[start:start+num_b])); start = start + num_b
fc_model.weight.data.copy_(numpy.asarray(buf[start:start+num_w])); start = start + num_w
return start
def save_fc(fp, fc_model):
fc_model.bias.data.numpy().tofile(fp)
fc_model.weight.data.numpy().tofile(fp)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage: python cfg.py model.cfg')
exit()
blocks = parse_cfg(sys.argv[1])
print_cfg_nicely(blocks)
|
# Copyright 2009-2017 Wander Lairson Costa
# Copyright 2009-2021 PyUSB contributors
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Integration tests
import utils
import unittest
import usb.core
import devinfo
import usb._interop
from usb._debug import methodtrace
import usb.util
import usb.backend.libusb0 as libusb0
import usb.backend.libusb1 as libusb1
import usb.backend.openusb as openusb
import time
import sys
def make_data_list(length = 8):
return (utils.get_array_data1(length),
utils.get_array_data2(length),
utils.get_list_data1(length),
utils.get_list_data2(length),
utils.get_str_data1(length),
utils.get_str_data1(length))
class DeviceTest(unittest.TestCase):
__test__ = False
@methodtrace(utils.logger)
def __init__(self, dev):
unittest.TestCase.__init__(self)
self.dev = dev
@methodtrace(utils.logger)
def runTest(self):
try:
self.test_attributes()
self.test_timeout()
self.test_set_configuration()
self.test_set_interface_altsetting()
self.test_write_read()
self.test_write_array()
self.test_ctrl_transfer()
self.test_clear_halt()
#self.test_reset()
finally:
usb.util.dispose_resources(self.dev)
@methodtrace(utils.logger)
def test_attributes(self):
self.assertEqual(self.dev.bLength, 18)
self.assertEqual(self.dev.bDescriptorType, usb.util.DESC_TYPE_DEVICE)
self.assertEqual(self.dev.bcdUSB, 0x0200)
self.assertEqual(self.dev.idVendor, devinfo.ID_VENDOR)
self.assertEqual(self.dev.idProduct, devinfo.ID_PRODUCT)
self.assertEqual(self.dev.bcdDevice, 0x0001)
self.assertEqual(self.dev.iManufacturer, 0x01)
self.assertEqual(self.dev.iProduct, 0x02)
self.assertEqual(self.dev.iSerialNumber, 0x03)
self.assertEqual(self.dev.bNumConfigurations, 0x01)
self.assertEqual(self.dev.bMaxPacketSize0, 8)
self.assertEqual(self.dev.bDeviceClass, 0x00)
self.assertEqual(self.dev.bDeviceSubClass, 0x00)
self.assertEqual(self.dev.bDeviceProtocol, 0x00)
@methodtrace(utils.logger)
def test_timeout(self):
def set_invalid_timeout():
self.dev.default_timeout = -1
tmo = self.dev.default_timeout
self.dev.default_timeout = 1
self.assertEqual(self.dev.default_timeout, 1)
self.dev.default_timeout = tmo
self.assertEqual(self.dev.default_timeout, tmo)
self.assertRaises(ValueError, set_invalid_timeout)
self.assertEqual(self.dev.default_timeout, tmo)
@methodtrace(utils.logger)
def test_set_configuration(self):
cfg = self.dev[0].bConfigurationValue
self.dev.set_configuration(cfg)
self.dev.set_configuration()
self.assertEqual(cfg, self.dev.get_active_configuration().bConfigurationValue)
@methodtrace(utils.logger)
def test_set_interface_altsetting(self):
intf = self.dev.get_active_configuration()[(0,0)]
self.dev.set_interface_altsetting(intf.bInterfaceNumber, intf.bAlternateSetting)
self.dev.set_interface_altsetting()
@methodtrace(utils.logger)
def test_reset(self):
self.dev.reset()
utils.delay_after_reset()
@methodtrace(utils.logger)
def test_write_read(self):
altsettings = [devinfo.INTF_BULK, devinfo.INTF_INTR]
eps = [devinfo.EP_BULK, devinfo.EP_INTR]
data_len = [8, 8]
if utils.is_iso_test_allowed():
altsettings.append(devinfo.INTF_ISO)
eps.append(devinfo.EP_ISO)
data_len.append(64)
def delay(alt):
# Hack to avoid two consecutive isochronous transfers to fail
if alt == devinfo.INTF_ISO and utils.is_windows():
time.sleep(0.5)
for alt, length in zip(altsettings, data_len):
self.dev.set_interface_altsetting(0, alt)
for data in make_data_list(length):
adata = utils.to_array(data)
length = utils.data_len(data)
buff = usb.util.create_buffer(length)
try:
ret = self.dev.write(eps[alt], data)
except NotImplementedError:
continue
self.assertEqual(ret, length)
self.assertEqual(
ret,
length,
'Failed to write data: ' + \
str(data) + ', in interface = ' + \
str(alt))
try:
ret = self.dev.read(eps[alt] | usb.util.ENDPOINT_IN, length)
except NotImplementedError:
continue
self.assertTrue(
utils.array_equals(ret, adata),
str(ret) + ' != ' + \
str(adata) + ', in interface = ' + \
str(alt))
delay(alt)
try:
ret = self.dev.write(eps[alt], data)
except NotImplementedError:
continue
self.assertEqual(ret, length)
self.assertEqual(
ret,
length,
'Failed to write data: ' + \
str(data) + ', in interface = ' + \
str(alt))
try:
ret = self.dev.read(eps[alt] | usb.util.ENDPOINT_IN, buff)
except NotImplementedError:
continue
self.assertEqual(ret, length)
self.assertTrue(
utils.array_equals(buff, adata),
str(buff) + ' != ' + \
str(adata) + ', in interface = ' + \
str(alt))
delay(alt)
@methodtrace(utils.logger)
def test_write_array(self):
a = usb._interop.as_array('test')
self.dev.set_interface_altsetting(0, devinfo.INTF_BULK)
self.assertEquals(self.dev.write(devinfo.EP_BULK, a), len(a))
self.assertTrue(utils.array_equals(
self.dev.read(devinfo.EP_BULK | usb.util.ENDPOINT_IN, len(a)),
a))
@methodtrace(utils.logger)
def test_ctrl_transfer(self):
for data in make_data_list():
length = utils.data_len(data)
adata = utils.to_array(data)
ret = self.dev.ctrl_transfer(
0x40,
devinfo.PICFW_SET_VENDOR_BUFFER,
0,
0,
data)
self.assertEqual(ret,
length,
'Failed to write data: ' + str(data))
ret = utils.to_array(self.dev.ctrl_transfer(
0xC0,
devinfo.PICFW_GET_VENDOR_BUFFER,
0,
0,
length))
self.assertTrue(utils.array_equals(ret, adata),
str(ret) + ' != ' + str(adata))
buff = usb.util.create_buffer(length)
ret = self.dev.ctrl_transfer(
0x40,
devinfo.PICFW_SET_VENDOR_BUFFER,
0,
0,
data)
self.assertEqual(ret,
length,
'Failed to write data: ' + str(data))
ret = self.dev.ctrl_transfer(
0xC0,
devinfo.PICFW_GET_VENDOR_BUFFER,
0,
0,
buff)
self.assertEqual(ret, length)
self.assertTrue(utils.array_equals(buff, adata),
str(buff) + ' != ' + str(adata))
@methodtrace(utils.logger)
def test_clear_halt(self):
self.dev.set_interface_altsetting(0, 0)
self.dev.clear_halt(0x01)
self.dev.clear_halt(0x81)
class ConfigurationTest(unittest.TestCase):
__test__ = False
@methodtrace(utils.logger)
def __init__(self, dev):
unittest.TestCase.__init__(self)
self.cfg = dev[0]
@methodtrace(utils.logger)
def runTest(self):
try:
self.test_attributes()
self.test_set()
finally:
usb.util.dispose_resources(self.cfg.device)
@methodtrace(utils.logger)
def test_attributes(self):
self.assertEqual(self.cfg.bLength, 9)
self.assertEqual(self.cfg.bDescriptorType, usb.util.DESC_TYPE_CONFIG)
self.assertEqual(self.cfg.wTotalLength, 78)
self.assertEqual(self.cfg.bNumInterfaces, 0x01)
self.assertEqual(self.cfg.bConfigurationValue, 0x01)
self.assertEqual(self.cfg.iConfiguration, 0x00)
self.assertEqual(self.cfg.bmAttributes, 0xC0)
self.assertEqual(self.cfg.bMaxPower, 50)
@methodtrace(utils.logger)
def test_set(self):
self.cfg.set()
class InterfaceTest(unittest.TestCase):
__test__ = False
@methodtrace(utils.logger)
def __init__(self, dev):
unittest.TestCase.__init__(self)
self.dev = dev
self.intf = dev[0][(0,0)]
@methodtrace(utils.logger)
def runTest(self):
try:
self.dev.set_configuration()
self.test_attributes()
self.test_set_altsetting()
finally:
usb.util.dispose_resources(self.intf.device)
@methodtrace(utils.logger)
def test_attributes(self):
self.assertEqual(self.intf.bLength, 9)
self.assertEqual(self.intf.bDescriptorType, usb.util.DESC_TYPE_INTERFACE)
self.assertEqual(self.intf.bInterfaceNumber, 0)
self.assertEqual(self.intf.bAlternateSetting, 0)
self.assertEqual(self.intf.bNumEndpoints, 2)
self.assertEqual(self.intf.bInterfaceClass, 0x00)
self.assertEqual(self.intf.bInterfaceSubClass, 0x00)
self.assertEqual(self.intf.bInterfaceProtocol, 0x00)
self.assertEqual(self.intf.iInterface, 0x00)
@methodtrace(utils.logger)
def test_set_altsetting(self):
self.intf.set_altsetting()
class EndpointTest(unittest.TestCase):
__test__ = False
@methodtrace(utils.logger)
def __init__(self, dev):
unittest.TestCase.__init__(self)
self.dev = dev
intf = dev[0][(0,0)]
self.ep_out = usb.util.find_descriptor(intf, bEndpointAddress=0x01)
self.ep_in = usb.util.find_descriptor(intf, bEndpointAddress=0x81)
@methodtrace(utils.logger)
def runTest(self):
try:
self.dev.set_configuration()
self.test_attributes()
self.test_write_read()
finally:
usb.util.dispose_resources(self.dev)
@methodtrace(utils.logger)
def test_attributes(self):
self.assertEqual(self.ep_out.bLength, 7)
self.assertEqual(self.ep_out.bDescriptorType, usb.util.DESC_TYPE_ENDPOINT)
self.assertEqual(self.ep_out.bEndpointAddress, 0x01)
self.assertEqual(self.ep_out.bmAttributes, 0x02)
self.assertEqual(self.ep_out.wMaxPacketSize, 16)
self.assertEqual(self.ep_out.bInterval, 0)
@methodtrace(utils.logger)
def test_write_read(self):
self.dev.set_interface_altsetting(0, 0)
for data in make_data_list():
adata = utils.to_array(data)
length = utils.data_len(data)
buff = usb.util.create_buffer(length)
ret = self.ep_out.write(data)
self.assertEqual(ret, length, 'Failed to write data: ' + str(data))
ret = self.ep_in.read(length)
self.assertTrue(utils.array_equals(ret, adata), str(ret) + ' != ' + str(adata))
ret = self.ep_out.write(data)
self.assertEqual(ret, length, 'Failed to write data: ' + str(data))
ret = self.ep_in.read(buff)
self.assertEqual(ret, length)
self.assertTrue(utils.array_equals(buff, adata), str(buff) + ' != ' + str(adata))
def get_suite():
suite = unittest.TestSuite()
test_cases = (DeviceTest, ConfigurationTest, InterfaceTest, EndpointTest)
for m in (libusb1, libusb0, openusb):
b = m.get_backend()
if b is None:
continue
dev = utils.find_my_device(b)
if dev is None:
utils.logger.warning('Test hardware not found for backend %s', m.__name__)
continue
for ObjectTestCase in test_cases:
utils.logger.info('Adding %s(%s) to test suite...', ObjectTestCase.__name__, m.__name__)
suite.addTest(ObjectTestCase(dev))
return suite
if __name__ == '__main__':
utils.run_tests(get_suite())
|
import os
import shutil
import subprocess
from dataclasses import dataclass
from pathlib import Path
import pytest
from tests.utils import CI, Compose
def _add_version_var(name: str, env_path: Path):
value = os.getenv(name)
if not value:
return
if value == "develop":
os.environ[name] = "latest"
with open(env_path, "a") as f:
f.write(f"\n{name}={os.environ[name]}")
@pytest.fixture(scope="session")
def env_file(tmp_path_factory: pytest.TempPathFactory):
tmp_path = tmp_path_factory.mktemp("frappe-docker")
file_path = tmp_path / ".env"
shutil.copy("example.env", file_path)
for var in ("FRAPPE_VERSION", "ERPNEXT_VERSION"):
_add_version_var(name=var, env_path=file_path)
yield str(file_path)
os.remove(file_path)
@pytest.fixture(scope="session")
def compose(env_file: str):
return Compose(project_name="test", env_file=env_file)
@pytest.fixture(autouse=True, scope="session")
def frappe_setup(compose: Compose):
compose.stop()
compose("up", "-d", "--quiet-pull")
yield
compose.stop()
@pytest.fixture(scope="session")
def frappe_site(compose: Compose):
site_name = "tests"
compose.bench(
"new-site",
site_name,
"--mariadb-root-password",
"123",
"--admin-password",
"admin",
)
compose("restart", "backend")
yield site_name
@pytest.fixture(scope="class")
def erpnext_setup(compose: Compose):
compose.stop()
args = ["-f", "overrides/compose.erpnext.yaml"]
if CI:
args += ("-f", "tests/compose.ci-erpnext.yaml")
compose(*args, "up", "-d", "--quiet-pull")
yield
compose.stop()
@pytest.fixture(scope="class")
def erpnext_site(compose: Compose):
site_name = "test_erpnext_site"
compose.bench(
"new-site",
site_name,
"--mariadb-root-password",
"123",
"--admin-password",
"admin",
"--install-app",
"erpnext",
)
compose("restart", "backend")
yield site_name
@pytest.fixture
def postgres_setup(compose: Compose):
compose.stop()
compose("-f", "overrides/compose.postgres.yaml", "up", "-d", "--quiet-pull")
compose.bench("set-config", "-g", "root_login", "postgres")
compose.bench("set-config", "-g", "root_password", "123")
yield
compose.stop()
@pytest.fixture
def python_path():
return "/home/frappe/frappe-bench/env/bin/python"
@dataclass
class S3ServiceResult:
access_key: str
secret_key: str
@pytest.fixture
def s3_service(python_path: str, compose: Compose):
access_key = "AKIAIOSFODNN7EXAMPLE"
secret_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
cmd = (
"docker",
"run",
"--name",
"minio",
"-d",
"-e",
f"MINIO_ACCESS_KEY={access_key}",
"-e",
f"MINIO_SECRET_KEY={secret_key}",
"--network",
f"{compose.project_name}_default",
"minio/minio",
"server",
"/data",
)
subprocess.check_call(cmd)
compose("cp", "tests/_create_bucket.py", "backend:/tmp")
compose.exec(
"-e",
f"S3_ACCESS_KEY={access_key}",
"-e",
f"S3_SECRET_KEY={secret_key}",
"backend",
python_path,
"/tmp/_create_bucket.py",
)
yield S3ServiceResult(access_key=access_key, secret_key=secret_key)
subprocess.call(("docker", "rm", "minio", "-f"))
|
def items_equal(xs, ys):
'''Compare two structures but ignore item order
Arguments:
xs {[type]} -- First structure
ys {[type]} -- Second structure
Returns:
bool -- True if the two structures are equal when ignoring item order
'''
if isinstance(xs, dict) and isinstance(ys, dict):
if len(xs) != len(ys):
return False
for key in xs.keys():
try:
if not items_equal(xs[key], ys[key]):
return False
except KeyError:
return False
return True
elif isinstance(xs, list) and isinstance(ys, list):
if len(xs) != len(ys):
return False
sxs = xs
sys = ys
try:
sxs = sorted(xs)
sys = sorted(ys)
for x, y in zip(sxs, sys):
if not items_equal(x, y):
return False
except TypeError:
ys_copy = ys.copy()
for x in xs:
matches = [i for i, y in enumerate(ys_copy) if items_equal(x, y)]
if len(matches):
del ys_copy[matches[0]]
continue
else:
return False
return True
else:
return xs == ys
def assert_items_equal(xs, ys):
import pprint
pp = pprint.PrettyPrinter()
assert items_equal(xs, ys), \
f'Difference between\n{pp.pformat(xs)}\nand\n{pp.pformat(ys)}'
|
#! /usr/bin/python
"""
Zero Matrix: Write an algorithm such that if an element in an MxN matrix is 0, its entire row and column are set to 0.
"""
from typing import List
# 1 2 0
# 3 4 5
# 0 0 0
# 3 4 0
def zero_matrix(matrix: List[List[int]]):
if not matrix or not matrix[0]:
return matrix
row_has_zero, col_has_zero = 0, 0
row_len, col_len = len(matrix), len(matrix[0])
for i in range(col_len):
if matrix[0][i] == 0:
row_has_zero = True
break
for i in range(row_len):
if matrix[i][0] == 0:
col_has_zero = True
break
for i in range(1, row_len):
for j in range(1, col_len):
if matrix[i][j] == 0:
matrix[0][j] = 0
matrix[i][0] = 0
for i in range(col_len):
if matrix[0][i] == 0:
for j in range(1, row_len):
matrix[j][i] = 0
for i in range(row_len):
if matrix[i][0] == 0:
for j in range(1, col_len):
matrix[i][j] = 0
if row_has_zero:
for j in range(col_len):
matrix[0][j] = 0
if col_has_zero:
for i in range(row_len):
matrix[i][0] = 0
return matrix
if __name__ == "__main__":
import sys
matrix = []
for line in sys.stdin:
words = line.replace("\n", "").split(", ")
ints = [int(c) for c in words]
matrix.append(ints)
for row in matrix:
print("".join((str(c) for c in row)))
print("")
zeroed = zero_matrix(matrix)
for row in zeroed:
print("".join((str(c) for c in row)))
|
# Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import webob
from jacket.api.storage import extensions
from jacket.api.storage.openstack import wsgi
from jacket.storage.i18n import _, _LI
from jacket.objects import storage
LOG = logging.getLogger(__name__)
def authorize(context, action_name):
action = 'snapshot_actions:%s' % action_name
extensions.extension_authorizer('snapshot', action)(context)
class SnapshotActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SnapshotActionsController, self).__init__(*args, **kwargs)
LOG.debug("SnapshotActionsController initialized")
@wsgi.action('os-update_snapshot_status')
def _update_snapshot_status(self, req, id, body):
"""Update database fields related to status of a snapshot.
Intended for creation of snapshots, so snapshot state
must start as 'creating' and be changed to 'available',
'creating', or 'error'.
"""
context = req.environ['storage.context']
authorize(context, 'update_snapshot_status')
LOG.debug("body: %s", body)
try:
status = body['os-update_snapshot_status']['status']
except KeyError:
msg = _("'status' must be specified.")
raise webob.exc.HTTPBadRequest(explanation=msg)
# Allowed state transitions
status_map = {'creating': ['creating', 'available', 'error'],
'deleting': ['deleting', 'error_deleting']}
current_snapshot = storage.Snapshot.get_by_id(context, id)
if current_snapshot.status not in status_map:
msg = _("Snapshot status %(cur)s not allowed for "
"update_snapshot_status") % {
'cur': current_snapshot.status}
raise webob.exc.HTTPBadRequest(explanation=msg)
if status not in status_map[current_snapshot.status]:
msg = _("Provided snapshot status %(provided)s not allowed for "
"snapshot with status %(current)s.") % \
{'provided': status,
'current': current_snapshot.status}
raise webob.exc.HTTPBadRequest(explanation=msg)
update_dict = {'id': id,
'status': status}
progress = body['os-update_snapshot_status'].get('progress', None)
if progress:
# This is expected to be a string like '73%'
msg = _('progress must be an integer percentage')
try:
integer = int(progress[:-1])
except ValueError:
raise webob.exc.HTTPBadRequest(explanation=msg)
if integer < 0 or integer > 100 or progress[-1] != '%':
raise webob.exc.HTTPBadRequest(explanation=msg)
update_dict.update({'progress': progress})
LOG.info(_LI("Updating snapshot %(id)s with info %(dict)s"),
{'id': id, 'dict': update_dict})
current_snapshot.update(update_dict)
current_snapshot.save()
return webob.Response(status_int=202)
class Snapshot_actions(extensions.ExtensionDescriptor):
"""Enable snapshot manager actions."""
name = "SnapshotActions"
alias = "os-snapshot-actions"
namespace = \
"http://docs.openstack.org/volume/ext/snapshot-actions/api/v1.1"
updated = "2013-07-16T00:00:00+00:00"
def get_controller_extensions(self):
controller = SnapshotActionsController()
extension = extensions.ControllerExtension(self,
'snapshots',
controller)
return [extension]
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libssh2(CMakePackage):
"""libssh2 is a client-side C library implementing the SSH2 protocol"""
homepage = "https://www.libssh2.org/"
url = "https://www.libssh2.org/download/libssh2-1.7.0.tar.gz"
version('1.7.0', 'b01662a210e94cccf2f76094db7dac5c')
version('1.4.3', '071004c60c5d6f90354ad1b701013a0b') # CentOS7
variant('shared', default=True,
description="Build shared libraries")
depends_on('cmake@2.8.11:', type='build')
depends_on('openssl')
depends_on('zlib')
depends_on('xz')
def cmake_args(self):
spec = self.spec
return [
'-DBUILD_SHARED_LIBS=%s' % ('YES' if '+shared' in spec else 'NO')]
|
from datasets import load_dataset, load_metric
import pandas as pd
import numpy as np
import hazm
from num2fawords import words, ordinal_words
from tqdm import tqd
from sklearn.model_selection import train_test_split
import os
import string
import six
import re
import glob
from dataPrepration import prepareData, trainTestSplit, prepareDataset
import argparse
from utils import *
from audioPrepratation import playSample
import json
from tokenize import *
from dataCollatorCTC import DataCollatorCTCWithPadding
from model import loadModel
from train import train
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("data", help="Path to your data")
parser.add_argument("common", help="Path to Common Voice dataset")
parser.add_argument("save_path", help="Path for saving models and outputs")
parser.add_argument("num_proc", help="Number of processor for calculation", type=int)
parser.add_argument("batch_size", help="Batch Size", type=int)
parser.add_argument("--cer", help="Use CER metric", action="store_true")
args = parser.parse_args()
metric_name = 'wer'
if args.cer:
metric_name = 'cer'
normalizer = hazm.Normalizer()
df = prepareData(arg.data)
df["status"] = df["path"].apply(lambda path: True if os.path.exists(path) else None)
df = df.dropna(subset=["path"])
df = df.drop("status", 1)
print(f"Step 1: {len(df)}")
df["sentence"] = df["sentence"].apply(lambda t: normalizer(t))
df = df.dropna(subset=["sentence"])
print(f"Step 2: {len(df)}")
df = df.sample(frac=1)
df = df.reset_index(drop=True)
text = " ".join(df["sentence"].values.tolist())
vocab = list(sorted(set(text)))
for v in main_vocab:
if v not in vocab:
print("v", v)
print("length of main characters:", len(main_vocab))
print("length of dataset characters:", len(vocab))
print("datasets characters:",vocab)
playSample(df)
train_df, test_df = trainTestSplit(df)
save_path = "/".join(args.data.split('/'))
print("path for saving splitted data: ",save_path)
train_df.to_csv(f"{save_path}/train.csv", sep="\t", encoding="utf-8", index=False)
test_df.to_csv(f"{save_path}/test.csv", sep="\t", encoding="utf-8", index=False)
print("shape of training data: ",train_df.shape)
print("shape of testing data: ",test_df.shape)
common_voice_train = load_dataset("csv", data_files={"train": f"{args.common}/train.csv"}, delimiter="\t")["train"]
common_voice_test = load_dataset("csv", data_files={"test": f"{args.common}/test.csv"}, delimiter="\t")["test"]
print("csv file contents for training:",common_voice_train)
print("csv file contents for testing:",common_voice_test)
common_voice_train = common_voice_train.map(normalize, fn_kwargs={"normalizer": normalizer, "chars_to_ignore": chars_to_ignore, "chars_to_mapping": chars_to_mapping})
common_voice_test = common_voice_test.map(normalize, fn_kwargs={"normalizer": normalizer, "chars_to_ignore": chars_to_ignore, "chars_to_mapping": chars_to_mapping})
print("Common Voice Test:")
print(common_voice_test[0:10])
vocab_train = common_voice_train.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=common_voice_train.column_names)
vocab_test = common_voice_train.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=common_voice_test.column_names)
print("Vocab Train:")
print(vocab_train[0])
vocab_list = list(sorted(set(vocab_train["vocab"][0]) | set(vocab_test["vocab"][0])))
vocab_list = [vocab for vocab in vocab_list if vocab not in [" ", "\u0307"]]
print("number of letters after delete space: ",len(vocab_list))
print("include letters: ",vocab_list)
vocab_dict = {v: k for k, v in enumerate(special_vocab + vocab_list)}
print("number of tokens after adding some special letters:",len(vocab_dict))
print("final included tokens:",vocab_dict)
with open('vocab.json', 'w') as vocab_file:
json.dump(vocab_dict, vocab_file)
tokenizer = createTokenizer()
feature_extractor = createFeatureExtractor()
processor = createProcessor()
processor.save_pretrained(f"{args.save_path}/Wav2VecProcessor")
common_voice_train = common_voice_train.map(speech_file_to_array_fn, remove_columns=common_voice_train.column_names, num_proc=args.num_proc)
common_voice_test = common_voice_test.map(speech_file_to_array_fn, remove_columns=common_voice_test.column_names, num_proc=args.num_proc)
_common_voice_train = common_voice_train.map(prepareDataset, remove_columns=common_voice_train.column_names, batch_size=args.batch_size, num_proc=args.num_proc, batched=True)
_common_voice_test = common_voice_test.map(prepareDataset, remove_columns=common_voice_test.column_names, batch_size=args.batch_size, num_proc=args.num_proc, batched=True)
data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)
metric = load_metric(metric_name)
model = loadModel(processor)
model.freeze_feature_extractor()
train(model, args.save_path, data_collator, compute_metrics, common_voice_train, common_voice_test, processor)
|
"""
WSGI config for trek_30009 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'trek_30009.settings')
application = get_wsgi_application()
|
# Generated by Django 2.2.16 on 2021-12-09 17:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('monitor', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='sysfiles',
name='creator',
field=models.ForeignKey(db_constraint=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_query_name='creator_query', to=settings.AUTH_USER_MODEL, verbose_name='创建者'),
),
migrations.AddField(
model_name='sysfiles',
name='monitor',
field=models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to='monitor.Monitor', verbose_name='关联服务器监控信息'),
),
migrations.AddField(
model_name='monitor',
name='creator',
field=models.ForeignKey(db_constraint=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_query_name='creator_query', to=settings.AUTH_USER_MODEL, verbose_name='创建者'),
),
migrations.AddField(
model_name='monitor',
name='server',
field=models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to='monitor.Server', verbose_name='关联服务器信息'),
),
]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'ClusterBootstrapAction',
'ClusterCoreInstanceFleet',
'ClusterCoreInstanceFleetInstanceTypeConfig',
'ClusterCoreInstanceFleetInstanceTypeConfigConfiguration',
'ClusterCoreInstanceFleetInstanceTypeConfigEbsConfig',
'ClusterCoreInstanceFleetLaunchSpecifications',
'ClusterCoreInstanceFleetLaunchSpecificationsOnDemandSpecification',
'ClusterCoreInstanceFleetLaunchSpecificationsSpotSpecification',
'ClusterCoreInstanceGroup',
'ClusterCoreInstanceGroupEbsConfig',
'ClusterEc2Attributes',
'ClusterKerberosAttributes',
'ClusterMasterInstanceFleet',
'ClusterMasterInstanceFleetInstanceTypeConfig',
'ClusterMasterInstanceFleetInstanceTypeConfigConfiguration',
'ClusterMasterInstanceFleetInstanceTypeConfigEbsConfig',
'ClusterMasterInstanceFleetLaunchSpecifications',
'ClusterMasterInstanceFleetLaunchSpecificationsOnDemandSpecification',
'ClusterMasterInstanceFleetLaunchSpecificationsSpotSpecification',
'ClusterMasterInstanceGroup',
'ClusterMasterInstanceGroupEbsConfig',
'ClusterStep',
'ClusterStepHadoopJarStep',
'InstanceFleetInstanceTypeConfig',
'InstanceFleetInstanceTypeConfigConfiguration',
'InstanceFleetInstanceTypeConfigEbsConfig',
'InstanceFleetLaunchSpecifications',
'InstanceFleetLaunchSpecificationsOnDemandSpecification',
'InstanceFleetLaunchSpecificationsSpotSpecification',
'InstanceGroupEbsConfig',
'ManagedScalingPolicyComputeLimit',
]
@pulumi.output_type
class ClusterBootstrapAction(dict):
def __init__(__self__, *,
name: str,
path: str,
args: Optional[Sequence[str]] = None):
"""
:param str name: Friendly name given to the instance fleet.
:param str path: Location of the script to run during a bootstrap action. Can be either a location in Amazon S3 or on a local file system
:param Sequence[str] args: List of command line arguments passed to the JAR file's main function when executed.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "path", path)
if args is not None:
pulumi.set(__self__, "args", args)
@property
@pulumi.getter
def name(self) -> str:
"""
Friendly name given to the instance fleet.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> str:
"""
Location of the script to run during a bootstrap action. Can be either a location in Amazon S3 or on a local file system
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def args(self) -> Optional[Sequence[str]]:
"""
List of command line arguments passed to the JAR file's main function when executed.
"""
return pulumi.get(self, "args")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterCoreInstanceFleet(dict):
def __init__(__self__, *,
id: Optional[str] = None,
instance_type_configs: Optional[Sequence['outputs.ClusterCoreInstanceFleetInstanceTypeConfig']] = None,
launch_specifications: Optional['outputs.ClusterCoreInstanceFleetLaunchSpecifications'] = None,
name: Optional[str] = None,
provisioned_on_demand_capacity: Optional[int] = None,
provisioned_spot_capacity: Optional[int] = None,
target_on_demand_capacity: Optional[int] = None,
target_spot_capacity: Optional[int] = None):
"""
:param str id: The ID of the EMR Cluster
:param Sequence['ClusterCoreInstanceFleetInstanceTypeConfigArgs'] instance_type_configs: Configuration block for instance fleet
:param 'ClusterCoreInstanceFleetLaunchSpecificationsArgs' launch_specifications: Configuration block for launch specification
:param str name: Friendly name given to the instance fleet.
:param int target_on_demand_capacity: The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand instances to provision.
:param int target_spot_capacity: The target capacity of Spot units for the instance fleet, which determines how many Spot instances to provision.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if instance_type_configs is not None:
pulumi.set(__self__, "instance_type_configs", instance_type_configs)
if launch_specifications is not None:
pulumi.set(__self__, "launch_specifications", launch_specifications)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioned_on_demand_capacity is not None:
pulumi.set(__self__, "provisioned_on_demand_capacity", provisioned_on_demand_capacity)
if provisioned_spot_capacity is not None:
pulumi.set(__self__, "provisioned_spot_capacity", provisioned_spot_capacity)
if target_on_demand_capacity is not None:
pulumi.set(__self__, "target_on_demand_capacity", target_on_demand_capacity)
if target_spot_capacity is not None:
pulumi.set(__self__, "target_spot_capacity", target_spot_capacity)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The ID of the EMR Cluster
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="instanceTypeConfigs")
def instance_type_configs(self) -> Optional[Sequence['outputs.ClusterCoreInstanceFleetInstanceTypeConfig']]:
"""
Configuration block for instance fleet
"""
return pulumi.get(self, "instance_type_configs")
@property
@pulumi.getter(name="launchSpecifications")
def launch_specifications(self) -> Optional['outputs.ClusterCoreInstanceFleetLaunchSpecifications']:
"""
Configuration block for launch specification
"""
return pulumi.get(self, "launch_specifications")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Friendly name given to the instance fleet.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisionedOnDemandCapacity")
def provisioned_on_demand_capacity(self) -> Optional[int]:
return pulumi.get(self, "provisioned_on_demand_capacity")
@property
@pulumi.getter(name="provisionedSpotCapacity")
def provisioned_spot_capacity(self) -> Optional[int]:
return pulumi.get(self, "provisioned_spot_capacity")
@property
@pulumi.getter(name="targetOnDemandCapacity")
def target_on_demand_capacity(self) -> Optional[int]:
"""
The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand instances to provision.
"""
return pulumi.get(self, "target_on_demand_capacity")
@property
@pulumi.getter(name="targetSpotCapacity")
def target_spot_capacity(self) -> Optional[int]:
"""
The target capacity of Spot units for the instance fleet, which determines how many Spot instances to provision.
"""
return pulumi.get(self, "target_spot_capacity")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterCoreInstanceFleetInstanceTypeConfig(dict):
def __init__(__self__, *,
instance_type: str,
bid_price: Optional[str] = None,
bid_price_as_percentage_of_on_demand_price: Optional[float] = None,
configurations: Optional[Sequence['outputs.ClusterCoreInstanceFleetInstanceTypeConfigConfiguration']] = None,
ebs_configs: Optional[Sequence['outputs.ClusterCoreInstanceFleetInstanceTypeConfigEbsConfig']] = None,
weighted_capacity: Optional[int] = None):
"""
:param str instance_type: An EC2 instance type, such as m4.xlarge.
:param str bid_price: The bid price for each EC2 Spot instance type as defined by `instance_type`. Expressed in USD. If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
:param float bid_price_as_percentage_of_on_demand_price: The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by `instance_type`. Expressed as a number (for example, 20 specifies 20%). If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
:param Sequence['ClusterCoreInstanceFleetInstanceTypeConfigConfigurationArgs'] configurations: A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster. List of `configuration` blocks.
:param Sequence['ClusterCoreInstanceFleetInstanceTypeConfigEbsConfigArgs'] ebs_configs: Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below.
:param int weighted_capacity: The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in `emr.InstanceFleet`.
"""
pulumi.set(__self__, "instance_type", instance_type)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if bid_price_as_percentage_of_on_demand_price is not None:
pulumi.set(__self__, "bid_price_as_percentage_of_on_demand_price", bid_price_as_percentage_of_on_demand_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if ebs_configs is not None:
pulumi.set(__self__, "ebs_configs", ebs_configs)
if weighted_capacity is not None:
pulumi.set(__self__, "weighted_capacity", weighted_capacity)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
An EC2 instance type, such as m4.xlarge.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[str]:
"""
The bid price for each EC2 Spot instance type as defined by `instance_type`. Expressed in USD. If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
"""
return pulumi.get(self, "bid_price")
@property
@pulumi.getter(name="bidPriceAsPercentageOfOnDemandPrice")
def bid_price_as_percentage_of_on_demand_price(self) -> Optional[float]:
"""
The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by `instance_type`. Expressed as a number (for example, 20 specifies 20%). If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
"""
return pulumi.get(self, "bid_price_as_percentage_of_on_demand_price")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.ClusterCoreInstanceFleetInstanceTypeConfigConfiguration']]:
"""
A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster. List of `configuration` blocks.
"""
return pulumi.get(self, "configurations")
@property
@pulumi.getter(name="ebsConfigs")
def ebs_configs(self) -> Optional[Sequence['outputs.ClusterCoreInstanceFleetInstanceTypeConfigEbsConfig']]:
"""
Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below.
"""
return pulumi.get(self, "ebs_configs")
@property
@pulumi.getter(name="weightedCapacity")
def weighted_capacity(self) -> Optional[int]:
"""
The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in `emr.InstanceFleet`.
"""
return pulumi.get(self, "weighted_capacity")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterCoreInstanceFleetInstanceTypeConfigConfiguration(dict):
def __init__(__self__, *,
classification: Optional[str] = None,
properties: Optional[Mapping[str, Any]] = None):
"""
:param str classification: The classification within a configuration.
:param Mapping[str, Any] properties: A map of properties specified within a configuration classification
"""
if classification is not None:
pulumi.set(__self__, "classification", classification)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def classification(self) -> Optional[str]:
"""
The classification within a configuration.
"""
return pulumi.get(self, "classification")
@property
@pulumi.getter
def properties(self) -> Optional[Mapping[str, Any]]:
"""
A map of properties specified within a configuration classification
"""
return pulumi.get(self, "properties")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterCoreInstanceFleetInstanceTypeConfigEbsConfig(dict):
def __init__(__self__, *,
size: int,
type: str,
iops: Optional[int] = None,
volumes_per_instance: Optional[int] = None):
"""
:param int size: The volume size, in gibibytes (GiB).
:param str type: The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
:param int iops: The number of I/O operations per second (IOPS) that the volume supports
:param int volumes_per_instance: The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
"""
pulumi.set(__self__, "size", size)
pulumi.set(__self__, "type", type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter
def size(self) -> int:
"""
The volume size, in gibibytes (GiB).
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def type(self) -> str:
"""
The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def iops(self) -> Optional[int]:
"""
The number of I/O operations per second (IOPS) that the volume supports
"""
return pulumi.get(self, "iops")
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[int]:
"""
The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
"""
return pulumi.get(self, "volumes_per_instance")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterCoreInstanceFleetLaunchSpecifications(dict):
def __init__(__self__, *,
on_demand_specifications: Optional[Sequence['outputs.ClusterCoreInstanceFleetLaunchSpecificationsOnDemandSpecification']] = None,
spot_specifications: Optional[Sequence['outputs.ClusterCoreInstanceFleetLaunchSpecificationsSpotSpecification']] = None):
"""
:param Sequence['ClusterCoreInstanceFleetLaunchSpecificationsOnDemandSpecificationArgs'] on_demand_specifications: Configuration block for on demand instances launch specifications
:param Sequence['ClusterCoreInstanceFleetLaunchSpecificationsSpotSpecificationArgs'] spot_specifications: Configuration block for spot instances launch specifications
"""
if on_demand_specifications is not None:
pulumi.set(__self__, "on_demand_specifications", on_demand_specifications)
if spot_specifications is not None:
pulumi.set(__self__, "spot_specifications", spot_specifications)
@property
@pulumi.getter(name="onDemandSpecifications")
def on_demand_specifications(self) -> Optional[Sequence['outputs.ClusterCoreInstanceFleetLaunchSpecificationsOnDemandSpecification']]:
"""
Configuration block for on demand instances launch specifications
"""
return pulumi.get(self, "on_demand_specifications")
@property
@pulumi.getter(name="spotSpecifications")
def spot_specifications(self) -> Optional[Sequence['outputs.ClusterCoreInstanceFleetLaunchSpecificationsSpotSpecification']]:
"""
Configuration block for spot instances launch specifications
"""
return pulumi.get(self, "spot_specifications")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterCoreInstanceFleetLaunchSpecificationsOnDemandSpecification(dict):
def __init__(__self__, *,
allocation_strategy: str):
"""
:param str allocation_strategy: Specifies the strategy to use in launching Spot instance fleets. Currently, the only option is `capacity-optimized` (the default), which launches instances from Spot instance pools with optimal capacity for the number of instances that are launching.
"""
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> str:
"""
Specifies the strategy to use in launching Spot instance fleets. Currently, the only option is `capacity-optimized` (the default), which launches instances from Spot instance pools with optimal capacity for the number of instances that are launching.
"""
return pulumi.get(self, "allocation_strategy")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterCoreInstanceFleetLaunchSpecificationsSpotSpecification(dict):
def __init__(__self__, *,
allocation_strategy: str,
timeout_action: str,
timeout_duration_minutes: int,
block_duration_minutes: Optional[int] = None):
"""
:param str allocation_strategy: Specifies the strategy to use in launching Spot instance fleets. Currently, the only option is `capacity-optimized` (the default), which launches instances from Spot instance pools with optimal capacity for the number of instances that are launching.
:param str timeout_action: The action to take when TargetSpotCapacity has not been fulfilled when the TimeoutDurationMinutes has expired; that is, when all Spot instances could not be provisioned within the Spot provisioning timeout. Valid values are `TERMINATE_CLUSTER` and `SWITCH_TO_ON_DEMAND`. SWITCH_TO_ON_DEMAND specifies that if no Spot instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.
:param int timeout_duration_minutes: The spot provisioning timeout period in minutes. If Spot instances are not provisioned within this time period, the TimeOutAction is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.
:param int block_duration_minutes: The defined duration for Spot instances (also known as Spot blocks) in minutes. When specified, the Spot instance does not terminate before the defined duration expires, and defined duration pricing for Spot instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates.
"""
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
pulumi.set(__self__, "timeout_action", timeout_action)
pulumi.set(__self__, "timeout_duration_minutes", timeout_duration_minutes)
if block_duration_minutes is not None:
pulumi.set(__self__, "block_duration_minutes", block_duration_minutes)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> str:
"""
Specifies the strategy to use in launching Spot instance fleets. Currently, the only option is `capacity-optimized` (the default), which launches instances from Spot instance pools with optimal capacity for the number of instances that are launching.
"""
return pulumi.get(self, "allocation_strategy")
@property
@pulumi.getter(name="timeoutAction")
def timeout_action(self) -> str:
"""
The action to take when TargetSpotCapacity has not been fulfilled when the TimeoutDurationMinutes has expired; that is, when all Spot instances could not be provisioned within the Spot provisioning timeout. Valid values are `TERMINATE_CLUSTER` and `SWITCH_TO_ON_DEMAND`. SWITCH_TO_ON_DEMAND specifies that if no Spot instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.
"""
return pulumi.get(self, "timeout_action")
@property
@pulumi.getter(name="timeoutDurationMinutes")
def timeout_duration_minutes(self) -> int:
"""
The spot provisioning timeout period in minutes. If Spot instances are not provisioned within this time period, the TimeOutAction is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.
"""
return pulumi.get(self, "timeout_duration_minutes")
@property
@pulumi.getter(name="blockDurationMinutes")
def block_duration_minutes(self) -> Optional[int]:
"""
The defined duration for Spot instances (also known as Spot blocks) in minutes. When specified, the Spot instance does not terminate before the defined duration expires, and defined duration pricing for Spot instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates.
"""
return pulumi.get(self, "block_duration_minutes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterCoreInstanceGroup(dict):
def __init__(__self__, *,
instance_type: str,
autoscaling_policy: Optional[str] = None,
bid_price: Optional[str] = None,
ebs_configs: Optional[Sequence['outputs.ClusterCoreInstanceGroupEbsConfig']] = None,
id: Optional[str] = None,
instance_count: Optional[int] = None,
name: Optional[str] = None):
"""
:param str instance_type: An EC2 instance type, such as m4.xlarge.
:param str autoscaling_policy: String containing the [EMR Auto Scaling Policy](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-automatic-scaling.html) JSON.
:param str bid_price: The bid price for each EC2 Spot instance type as defined by `instance_type`. Expressed in USD. If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
:param Sequence['ClusterCoreInstanceGroupEbsConfigArgs'] ebs_configs: Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below.
:param str id: The ID of the EMR Cluster
:param int instance_count: Target number of instances for the instance group. Must be 1 or 3. Defaults to 1. Launching with multiple master nodes is only supported in EMR version 5.23.0+, and requires this resource's `core_instance_group` to be configured. Public (Internet accessible) instances must be created in VPC subnets that have `map public IP on launch` enabled. Termination protection is automatically enabled when launched with multiple master nodes and this provider must have the `termination_protection = false` configuration applied before destroying this resource.
:param str name: Friendly name given to the instance fleet.
"""
pulumi.set(__self__, "instance_type", instance_type)
if autoscaling_policy is not None:
pulumi.set(__self__, "autoscaling_policy", autoscaling_policy)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if ebs_configs is not None:
pulumi.set(__self__, "ebs_configs", ebs_configs)
if id is not None:
pulumi.set(__self__, "id", id)
if instance_count is not None:
pulumi.set(__self__, "instance_count", instance_count)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
An EC2 instance type, such as m4.xlarge.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="autoscalingPolicy")
def autoscaling_policy(self) -> Optional[str]:
"""
String containing the [EMR Auto Scaling Policy](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-automatic-scaling.html) JSON.
"""
return pulumi.get(self, "autoscaling_policy")
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[str]:
"""
The bid price for each EC2 Spot instance type as defined by `instance_type`. Expressed in USD. If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
"""
return pulumi.get(self, "bid_price")
@property
@pulumi.getter(name="ebsConfigs")
def ebs_configs(self) -> Optional[Sequence['outputs.ClusterCoreInstanceGroupEbsConfig']]:
"""
Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below.
"""
return pulumi.get(self, "ebs_configs")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The ID of the EMR Cluster
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="instanceCount")
def instance_count(self) -> Optional[int]:
"""
Target number of instances for the instance group. Must be 1 or 3. Defaults to 1. Launching with multiple master nodes is only supported in EMR version 5.23.0+, and requires this resource's `core_instance_group` to be configured. Public (Internet accessible) instances must be created in VPC subnets that have `map public IP on launch` enabled. Termination protection is automatically enabled when launched with multiple master nodes and this provider must have the `termination_protection = false` configuration applied before destroying this resource.
"""
return pulumi.get(self, "instance_count")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Friendly name given to the instance fleet.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterCoreInstanceGroupEbsConfig(dict):
def __init__(__self__, *,
size: int,
type: str,
iops: Optional[int] = None,
volumes_per_instance: Optional[int] = None):
"""
:param int size: The volume size, in gibibytes (GiB).
:param str type: The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
:param int iops: The number of I/O operations per second (IOPS) that the volume supports
:param int volumes_per_instance: The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
"""
pulumi.set(__self__, "size", size)
pulumi.set(__self__, "type", type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter
def size(self) -> int:
"""
The volume size, in gibibytes (GiB).
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def type(self) -> str:
"""
The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def iops(self) -> Optional[int]:
"""
The number of I/O operations per second (IOPS) that the volume supports
"""
return pulumi.get(self, "iops")
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[int]:
"""
The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
"""
return pulumi.get(self, "volumes_per_instance")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterEc2Attributes(dict):
def __init__(__self__, *,
instance_profile: str,
additional_master_security_groups: Optional[str] = None,
additional_slave_security_groups: Optional[str] = None,
emr_managed_master_security_group: Optional[str] = None,
emr_managed_slave_security_group: Optional[str] = None,
key_name: Optional[str] = None,
service_access_security_group: Optional[str] = None,
subnet_id: Optional[str] = None):
"""
:param str instance_profile: Instance Profile for EC2 instances of the cluster assume this role
:param str additional_master_security_groups: String containing a comma separated list of additional Amazon EC2 security group IDs for the master node
:param str additional_slave_security_groups: String containing a comma separated list of additional Amazon EC2 security group IDs for the slave nodes as a comma separated string
:param str emr_managed_master_security_group: Identifier of the Amazon EC2 EMR-Managed security group for the master node
:param str emr_managed_slave_security_group: Identifier of the Amazon EC2 EMR-Managed security group for the slave nodes
:param str key_name: Amazon EC2 key pair that can be used to ssh to the master node as the user called `hadoop`
:param str service_access_security_group: Identifier of the Amazon EC2 service-access security group - required when the cluster runs on a private subnet
:param str subnet_id: VPC subnet id where you want the job flow to launch. Cannot specify the `cc1.4xlarge` instance type for nodes of a job flow launched in a Amazon VPC
"""
pulumi.set(__self__, "instance_profile", instance_profile)
if additional_master_security_groups is not None:
pulumi.set(__self__, "additional_master_security_groups", additional_master_security_groups)
if additional_slave_security_groups is not None:
pulumi.set(__self__, "additional_slave_security_groups", additional_slave_security_groups)
if emr_managed_master_security_group is not None:
pulumi.set(__self__, "emr_managed_master_security_group", emr_managed_master_security_group)
if emr_managed_slave_security_group is not None:
pulumi.set(__self__, "emr_managed_slave_security_group", emr_managed_slave_security_group)
if key_name is not None:
pulumi.set(__self__, "key_name", key_name)
if service_access_security_group is not None:
pulumi.set(__self__, "service_access_security_group", service_access_security_group)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
@property
@pulumi.getter(name="instanceProfile")
def instance_profile(self) -> str:
"""
Instance Profile for EC2 instances of the cluster assume this role
"""
return pulumi.get(self, "instance_profile")
@property
@pulumi.getter(name="additionalMasterSecurityGroups")
def additional_master_security_groups(self) -> Optional[str]:
"""
String containing a comma separated list of additional Amazon EC2 security group IDs for the master node
"""
return pulumi.get(self, "additional_master_security_groups")
@property
@pulumi.getter(name="additionalSlaveSecurityGroups")
def additional_slave_security_groups(self) -> Optional[str]:
"""
String containing a comma separated list of additional Amazon EC2 security group IDs for the slave nodes as a comma separated string
"""
return pulumi.get(self, "additional_slave_security_groups")
@property
@pulumi.getter(name="emrManagedMasterSecurityGroup")
def emr_managed_master_security_group(self) -> Optional[str]:
"""
Identifier of the Amazon EC2 EMR-Managed security group for the master node
"""
return pulumi.get(self, "emr_managed_master_security_group")
@property
@pulumi.getter(name="emrManagedSlaveSecurityGroup")
def emr_managed_slave_security_group(self) -> Optional[str]:
"""
Identifier of the Amazon EC2 EMR-Managed security group for the slave nodes
"""
return pulumi.get(self, "emr_managed_slave_security_group")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> Optional[str]:
"""
Amazon EC2 key pair that can be used to ssh to the master node as the user called `hadoop`
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="serviceAccessSecurityGroup")
def service_access_security_group(self) -> Optional[str]:
"""
Identifier of the Amazon EC2 service-access security group - required when the cluster runs on a private subnet
"""
return pulumi.get(self, "service_access_security_group")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[str]:
"""
VPC subnet id where you want the job flow to launch. Cannot specify the `cc1.4xlarge` instance type for nodes of a job flow launched in a Amazon VPC
"""
return pulumi.get(self, "subnet_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterKerberosAttributes(dict):
def __init__(__self__, *,
kdc_admin_password: str,
realm: str,
ad_domain_join_password: Optional[str] = None,
ad_domain_join_user: Optional[str] = None,
cross_realm_trust_principal_password: Optional[str] = None):
"""
:param str kdc_admin_password: The password used within the cluster for the kadmin service on the cluster-dedicated KDC, which maintains Kerberos principals, password policies, and keytabs for the cluster. This provider cannot perform drift detection of this configuration.
:param str realm: The name of the Kerberos realm to which all nodes in a cluster belong. For example, `EC2.INTERNAL`
:param str ad_domain_join_password: The Active Directory password for `ad_domain_join_user`. This provider cannot perform drift detection of this configuration.
:param str ad_domain_join_user: Required only when establishing a cross-realm trust with an Active Directory domain. A user with sufficient privileges to join resources to the domain. This provider cannot perform drift detection of this configuration.
:param str cross_realm_trust_principal_password: Required only when establishing a cross-realm trust with a KDC in a different realm. The cross-realm principal password, which must be identical across realms. This provider cannot perform drift detection of this configuration.
"""
pulumi.set(__self__, "kdc_admin_password", kdc_admin_password)
pulumi.set(__self__, "realm", realm)
if ad_domain_join_password is not None:
pulumi.set(__self__, "ad_domain_join_password", ad_domain_join_password)
if ad_domain_join_user is not None:
pulumi.set(__self__, "ad_domain_join_user", ad_domain_join_user)
if cross_realm_trust_principal_password is not None:
pulumi.set(__self__, "cross_realm_trust_principal_password", cross_realm_trust_principal_password)
@property
@pulumi.getter(name="kdcAdminPassword")
def kdc_admin_password(self) -> str:
"""
The password used within the cluster for the kadmin service on the cluster-dedicated KDC, which maintains Kerberos principals, password policies, and keytabs for the cluster. This provider cannot perform drift detection of this configuration.
"""
return pulumi.get(self, "kdc_admin_password")
@property
@pulumi.getter
def realm(self) -> str:
"""
The name of the Kerberos realm to which all nodes in a cluster belong. For example, `EC2.INTERNAL`
"""
return pulumi.get(self, "realm")
@property
@pulumi.getter(name="adDomainJoinPassword")
def ad_domain_join_password(self) -> Optional[str]:
"""
The Active Directory password for `ad_domain_join_user`. This provider cannot perform drift detection of this configuration.
"""
return pulumi.get(self, "ad_domain_join_password")
@property
@pulumi.getter(name="adDomainJoinUser")
def ad_domain_join_user(self) -> Optional[str]:
"""
Required only when establishing a cross-realm trust with an Active Directory domain. A user with sufficient privileges to join resources to the domain. This provider cannot perform drift detection of this configuration.
"""
return pulumi.get(self, "ad_domain_join_user")
@property
@pulumi.getter(name="crossRealmTrustPrincipalPassword")
def cross_realm_trust_principal_password(self) -> Optional[str]:
"""
Required only when establishing a cross-realm trust with a KDC in a different realm. The cross-realm principal password, which must be identical across realms. This provider cannot perform drift detection of this configuration.
"""
return pulumi.get(self, "cross_realm_trust_principal_password")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterMasterInstanceFleet(dict):
def __init__(__self__, *,
id: Optional[str] = None,
instance_type_configs: Optional[Sequence['outputs.ClusterMasterInstanceFleetInstanceTypeConfig']] = None,
launch_specifications: Optional['outputs.ClusterMasterInstanceFleetLaunchSpecifications'] = None,
name: Optional[str] = None,
provisioned_on_demand_capacity: Optional[int] = None,
provisioned_spot_capacity: Optional[int] = None,
target_on_demand_capacity: Optional[int] = None,
target_spot_capacity: Optional[int] = None):
"""
:param str id: The ID of the EMR Cluster
:param Sequence['ClusterMasterInstanceFleetInstanceTypeConfigArgs'] instance_type_configs: Configuration block for instance fleet
:param 'ClusterMasterInstanceFleetLaunchSpecificationsArgs' launch_specifications: Configuration block for launch specification
:param str name: Friendly name given to the instance fleet.
:param int target_on_demand_capacity: The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand instances to provision.
:param int target_spot_capacity: The target capacity of Spot units for the instance fleet, which determines how many Spot instances to provision.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if instance_type_configs is not None:
pulumi.set(__self__, "instance_type_configs", instance_type_configs)
if launch_specifications is not None:
pulumi.set(__self__, "launch_specifications", launch_specifications)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioned_on_demand_capacity is not None:
pulumi.set(__self__, "provisioned_on_demand_capacity", provisioned_on_demand_capacity)
if provisioned_spot_capacity is not None:
pulumi.set(__self__, "provisioned_spot_capacity", provisioned_spot_capacity)
if target_on_demand_capacity is not None:
pulumi.set(__self__, "target_on_demand_capacity", target_on_demand_capacity)
if target_spot_capacity is not None:
pulumi.set(__self__, "target_spot_capacity", target_spot_capacity)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The ID of the EMR Cluster
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="instanceTypeConfigs")
def instance_type_configs(self) -> Optional[Sequence['outputs.ClusterMasterInstanceFleetInstanceTypeConfig']]:
"""
Configuration block for instance fleet
"""
return pulumi.get(self, "instance_type_configs")
@property
@pulumi.getter(name="launchSpecifications")
def launch_specifications(self) -> Optional['outputs.ClusterMasterInstanceFleetLaunchSpecifications']:
"""
Configuration block for launch specification
"""
return pulumi.get(self, "launch_specifications")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Friendly name given to the instance fleet.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisionedOnDemandCapacity")
def provisioned_on_demand_capacity(self) -> Optional[int]:
return pulumi.get(self, "provisioned_on_demand_capacity")
@property
@pulumi.getter(name="provisionedSpotCapacity")
def provisioned_spot_capacity(self) -> Optional[int]:
return pulumi.get(self, "provisioned_spot_capacity")
@property
@pulumi.getter(name="targetOnDemandCapacity")
def target_on_demand_capacity(self) -> Optional[int]:
"""
The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand instances to provision.
"""
return pulumi.get(self, "target_on_demand_capacity")
@property
@pulumi.getter(name="targetSpotCapacity")
def target_spot_capacity(self) -> Optional[int]:
"""
The target capacity of Spot units for the instance fleet, which determines how many Spot instances to provision.
"""
return pulumi.get(self, "target_spot_capacity")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterMasterInstanceFleetInstanceTypeConfig(dict):
def __init__(__self__, *,
instance_type: str,
bid_price: Optional[str] = None,
bid_price_as_percentage_of_on_demand_price: Optional[float] = None,
configurations: Optional[Sequence['outputs.ClusterMasterInstanceFleetInstanceTypeConfigConfiguration']] = None,
ebs_configs: Optional[Sequence['outputs.ClusterMasterInstanceFleetInstanceTypeConfigEbsConfig']] = None,
weighted_capacity: Optional[int] = None):
"""
:param str instance_type: An EC2 instance type, such as m4.xlarge.
:param str bid_price: The bid price for each EC2 Spot instance type as defined by `instance_type`. Expressed in USD. If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
:param float bid_price_as_percentage_of_on_demand_price: The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by `instance_type`. Expressed as a number (for example, 20 specifies 20%). If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
:param Sequence['ClusterMasterInstanceFleetInstanceTypeConfigConfigurationArgs'] configurations: A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster. List of `configuration` blocks.
:param Sequence['ClusterMasterInstanceFleetInstanceTypeConfigEbsConfigArgs'] ebs_configs: Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below.
:param int weighted_capacity: The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in `emr.InstanceFleet`.
"""
pulumi.set(__self__, "instance_type", instance_type)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if bid_price_as_percentage_of_on_demand_price is not None:
pulumi.set(__self__, "bid_price_as_percentage_of_on_demand_price", bid_price_as_percentage_of_on_demand_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if ebs_configs is not None:
pulumi.set(__self__, "ebs_configs", ebs_configs)
if weighted_capacity is not None:
pulumi.set(__self__, "weighted_capacity", weighted_capacity)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
An EC2 instance type, such as m4.xlarge.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[str]:
"""
The bid price for each EC2 Spot instance type as defined by `instance_type`. Expressed in USD. If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
"""
return pulumi.get(self, "bid_price")
@property
@pulumi.getter(name="bidPriceAsPercentageOfOnDemandPrice")
def bid_price_as_percentage_of_on_demand_price(self) -> Optional[float]:
"""
The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by `instance_type`. Expressed as a number (for example, 20 specifies 20%). If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
"""
return pulumi.get(self, "bid_price_as_percentage_of_on_demand_price")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.ClusterMasterInstanceFleetInstanceTypeConfigConfiguration']]:
"""
A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster. List of `configuration` blocks.
"""
return pulumi.get(self, "configurations")
@property
@pulumi.getter(name="ebsConfigs")
def ebs_configs(self) -> Optional[Sequence['outputs.ClusterMasterInstanceFleetInstanceTypeConfigEbsConfig']]:
"""
Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below.
"""
return pulumi.get(self, "ebs_configs")
@property
@pulumi.getter(name="weightedCapacity")
def weighted_capacity(self) -> Optional[int]:
"""
The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in `emr.InstanceFleet`.
"""
return pulumi.get(self, "weighted_capacity")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterMasterInstanceFleetInstanceTypeConfigConfiguration(dict):
def __init__(__self__, *,
classification: Optional[str] = None,
properties: Optional[Mapping[str, Any]] = None):
"""
:param str classification: The classification within a configuration.
:param Mapping[str, Any] properties: A map of properties specified within a configuration classification
"""
if classification is not None:
pulumi.set(__self__, "classification", classification)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def classification(self) -> Optional[str]:
"""
The classification within a configuration.
"""
return pulumi.get(self, "classification")
@property
@pulumi.getter
def properties(self) -> Optional[Mapping[str, Any]]:
"""
A map of properties specified within a configuration classification
"""
return pulumi.get(self, "properties")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterMasterInstanceFleetInstanceTypeConfigEbsConfig(dict):
def __init__(__self__, *,
size: int,
type: str,
iops: Optional[int] = None,
volumes_per_instance: Optional[int] = None):
"""
:param int size: The volume size, in gibibytes (GiB).
:param str type: The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
:param int iops: The number of I/O operations per second (IOPS) that the volume supports
:param int volumes_per_instance: The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
"""
pulumi.set(__self__, "size", size)
pulumi.set(__self__, "type", type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter
def size(self) -> int:
"""
The volume size, in gibibytes (GiB).
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def type(self) -> str:
"""
The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def iops(self) -> Optional[int]:
"""
The number of I/O operations per second (IOPS) that the volume supports
"""
return pulumi.get(self, "iops")
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[int]:
"""
The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
"""
return pulumi.get(self, "volumes_per_instance")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterMasterInstanceFleetLaunchSpecifications(dict):
def __init__(__self__, *,
on_demand_specifications: Optional[Sequence['outputs.ClusterMasterInstanceFleetLaunchSpecificationsOnDemandSpecification']] = None,
spot_specifications: Optional[Sequence['outputs.ClusterMasterInstanceFleetLaunchSpecificationsSpotSpecification']] = None):
"""
:param Sequence['ClusterMasterInstanceFleetLaunchSpecificationsOnDemandSpecificationArgs'] on_demand_specifications: Configuration block for on demand instances launch specifications
:param Sequence['ClusterMasterInstanceFleetLaunchSpecificationsSpotSpecificationArgs'] spot_specifications: Configuration block for spot instances launch specifications
"""
if on_demand_specifications is not None:
pulumi.set(__self__, "on_demand_specifications", on_demand_specifications)
if spot_specifications is not None:
pulumi.set(__self__, "spot_specifications", spot_specifications)
@property
@pulumi.getter(name="onDemandSpecifications")
def on_demand_specifications(self) -> Optional[Sequence['outputs.ClusterMasterInstanceFleetLaunchSpecificationsOnDemandSpecification']]:
"""
Configuration block for on demand instances launch specifications
"""
return pulumi.get(self, "on_demand_specifications")
@property
@pulumi.getter(name="spotSpecifications")
def spot_specifications(self) -> Optional[Sequence['outputs.ClusterMasterInstanceFleetLaunchSpecificationsSpotSpecification']]:
"""
Configuration block for spot instances launch specifications
"""
return pulumi.get(self, "spot_specifications")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterMasterInstanceFleetLaunchSpecificationsOnDemandSpecification(dict):
def __init__(__self__, *,
allocation_strategy: str):
"""
:param str allocation_strategy: Specifies the strategy to use in launching Spot instance fleets. Currently, the only option is `capacity-optimized` (the default), which launches instances from Spot instance pools with optimal capacity for the number of instances that are launching.
"""
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> str:
"""
Specifies the strategy to use in launching Spot instance fleets. Currently, the only option is `capacity-optimized` (the default), which launches instances from Spot instance pools with optimal capacity for the number of instances that are launching.
"""
return pulumi.get(self, "allocation_strategy")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterMasterInstanceFleetLaunchSpecificationsSpotSpecification(dict):
def __init__(__self__, *,
allocation_strategy: str,
timeout_action: str,
timeout_duration_minutes: int,
block_duration_minutes: Optional[int] = None):
"""
:param str allocation_strategy: Specifies the strategy to use in launching Spot instance fleets. Currently, the only option is `capacity-optimized` (the default), which launches instances from Spot instance pools with optimal capacity for the number of instances that are launching.
:param str timeout_action: The action to take when TargetSpotCapacity has not been fulfilled when the TimeoutDurationMinutes has expired; that is, when all Spot instances could not be provisioned within the Spot provisioning timeout. Valid values are `TERMINATE_CLUSTER` and `SWITCH_TO_ON_DEMAND`. SWITCH_TO_ON_DEMAND specifies that if no Spot instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.
:param int timeout_duration_minutes: The spot provisioning timeout period in minutes. If Spot instances are not provisioned within this time period, the TimeOutAction is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.
:param int block_duration_minutes: The defined duration for Spot instances (also known as Spot blocks) in minutes. When specified, the Spot instance does not terminate before the defined duration expires, and defined duration pricing for Spot instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates.
"""
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
pulumi.set(__self__, "timeout_action", timeout_action)
pulumi.set(__self__, "timeout_duration_minutes", timeout_duration_minutes)
if block_duration_minutes is not None:
pulumi.set(__self__, "block_duration_minutes", block_duration_minutes)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> str:
"""
Specifies the strategy to use in launching Spot instance fleets. Currently, the only option is `capacity-optimized` (the default), which launches instances from Spot instance pools with optimal capacity for the number of instances that are launching.
"""
return pulumi.get(self, "allocation_strategy")
@property
@pulumi.getter(name="timeoutAction")
def timeout_action(self) -> str:
"""
The action to take when TargetSpotCapacity has not been fulfilled when the TimeoutDurationMinutes has expired; that is, when all Spot instances could not be provisioned within the Spot provisioning timeout. Valid values are `TERMINATE_CLUSTER` and `SWITCH_TO_ON_DEMAND`. SWITCH_TO_ON_DEMAND specifies that if no Spot instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.
"""
return pulumi.get(self, "timeout_action")
@property
@pulumi.getter(name="timeoutDurationMinutes")
def timeout_duration_minutes(self) -> int:
"""
The spot provisioning timeout period in minutes. If Spot instances are not provisioned within this time period, the TimeOutAction is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.
"""
return pulumi.get(self, "timeout_duration_minutes")
@property
@pulumi.getter(name="blockDurationMinutes")
def block_duration_minutes(self) -> Optional[int]:
"""
The defined duration for Spot instances (also known as Spot blocks) in minutes. When specified, the Spot instance does not terminate before the defined duration expires, and defined duration pricing for Spot instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates.
"""
return pulumi.get(self, "block_duration_minutes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterMasterInstanceGroup(dict):
def __init__(__self__, *,
instance_type: str,
bid_price: Optional[str] = None,
ebs_configs: Optional[Sequence['outputs.ClusterMasterInstanceGroupEbsConfig']] = None,
id: Optional[str] = None,
instance_count: Optional[int] = None,
name: Optional[str] = None):
"""
:param str instance_type: An EC2 instance type, such as m4.xlarge.
:param str bid_price: The bid price for each EC2 Spot instance type as defined by `instance_type`. Expressed in USD. If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
:param Sequence['ClusterMasterInstanceGroupEbsConfigArgs'] ebs_configs: Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below.
:param str id: The ID of the EMR Cluster
:param int instance_count: Target number of instances for the instance group. Must be 1 or 3. Defaults to 1. Launching with multiple master nodes is only supported in EMR version 5.23.0+, and requires this resource's `core_instance_group` to be configured. Public (Internet accessible) instances must be created in VPC subnets that have `map public IP on launch` enabled. Termination protection is automatically enabled when launched with multiple master nodes and this provider must have the `termination_protection = false` configuration applied before destroying this resource.
:param str name: Friendly name given to the instance fleet.
"""
pulumi.set(__self__, "instance_type", instance_type)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if ebs_configs is not None:
pulumi.set(__self__, "ebs_configs", ebs_configs)
if id is not None:
pulumi.set(__self__, "id", id)
if instance_count is not None:
pulumi.set(__self__, "instance_count", instance_count)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
An EC2 instance type, such as m4.xlarge.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[str]:
"""
The bid price for each EC2 Spot instance type as defined by `instance_type`. Expressed in USD. If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
"""
return pulumi.get(self, "bid_price")
@property
@pulumi.getter(name="ebsConfigs")
def ebs_configs(self) -> Optional[Sequence['outputs.ClusterMasterInstanceGroupEbsConfig']]:
"""
Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below.
"""
return pulumi.get(self, "ebs_configs")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The ID of the EMR Cluster
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="instanceCount")
def instance_count(self) -> Optional[int]:
"""
Target number of instances for the instance group. Must be 1 or 3. Defaults to 1. Launching with multiple master nodes is only supported in EMR version 5.23.0+, and requires this resource's `core_instance_group` to be configured. Public (Internet accessible) instances must be created in VPC subnets that have `map public IP on launch` enabled. Termination protection is automatically enabled when launched with multiple master nodes and this provider must have the `termination_protection = false` configuration applied before destroying this resource.
"""
return pulumi.get(self, "instance_count")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Friendly name given to the instance fleet.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterMasterInstanceGroupEbsConfig(dict):
def __init__(__self__, *,
size: int,
type: str,
iops: Optional[int] = None,
volumes_per_instance: Optional[int] = None):
"""
:param int size: The volume size, in gibibytes (GiB).
:param str type: The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
:param int iops: The number of I/O operations per second (IOPS) that the volume supports
:param int volumes_per_instance: The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
"""
pulumi.set(__self__, "size", size)
pulumi.set(__self__, "type", type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter
def size(self) -> int:
"""
The volume size, in gibibytes (GiB).
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def type(self) -> str:
"""
The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def iops(self) -> Optional[int]:
"""
The number of I/O operations per second (IOPS) that the volume supports
"""
return pulumi.get(self, "iops")
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[int]:
"""
The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
"""
return pulumi.get(self, "volumes_per_instance")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterStep(dict):
def __init__(__self__, *,
action_on_failure: str,
hadoop_jar_step: 'outputs.ClusterStepHadoopJarStep',
name: str):
"""
:param str action_on_failure: The action to take if the step fails. Valid values: `TERMINATE_JOB_FLOW`, `TERMINATE_CLUSTER`, `CANCEL_AND_WAIT`, and `CONTINUE`
:param 'ClusterStepHadoopJarStepArgs' hadoop_jar_step: The JAR file used for the step. Defined below.
:param str name: Friendly name given to the instance fleet.
"""
pulumi.set(__self__, "action_on_failure", action_on_failure)
pulumi.set(__self__, "hadoop_jar_step", hadoop_jar_step)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="actionOnFailure")
def action_on_failure(self) -> str:
"""
The action to take if the step fails. Valid values: `TERMINATE_JOB_FLOW`, `TERMINATE_CLUSTER`, `CANCEL_AND_WAIT`, and `CONTINUE`
"""
return pulumi.get(self, "action_on_failure")
@property
@pulumi.getter(name="hadoopJarStep")
def hadoop_jar_step(self) -> 'outputs.ClusterStepHadoopJarStep':
"""
The JAR file used for the step. Defined below.
"""
return pulumi.get(self, "hadoop_jar_step")
@property
@pulumi.getter
def name(self) -> str:
"""
Friendly name given to the instance fleet.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ClusterStepHadoopJarStep(dict):
def __init__(__self__, *,
jar: str,
args: Optional[Sequence[str]] = None,
main_class: Optional[str] = None,
properties: Optional[Mapping[str, str]] = None):
"""
:param str jar: Path to a JAR file run during the step.
:param Sequence[str] args: List of command line arguments passed to the JAR file's main function when executed.
:param str main_class: Name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.
:param Mapping[str, str] properties: A map of properties specified within a configuration classification
"""
pulumi.set(__self__, "jar", jar)
if args is not None:
pulumi.set(__self__, "args", args)
if main_class is not None:
pulumi.set(__self__, "main_class", main_class)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def jar(self) -> str:
"""
Path to a JAR file run during the step.
"""
return pulumi.get(self, "jar")
@property
@pulumi.getter
def args(self) -> Optional[Sequence[str]]:
"""
List of command line arguments passed to the JAR file's main function when executed.
"""
return pulumi.get(self, "args")
@property
@pulumi.getter(name="mainClass")
def main_class(self) -> Optional[str]:
"""
Name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.
"""
return pulumi.get(self, "main_class")
@property
@pulumi.getter
def properties(self) -> Optional[Mapping[str, str]]:
"""
A map of properties specified within a configuration classification
"""
return pulumi.get(self, "properties")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InstanceFleetInstanceTypeConfig(dict):
def __init__(__self__, *,
instance_type: str,
bid_price: Optional[str] = None,
bid_price_as_percentage_of_on_demand_price: Optional[float] = None,
configurations: Optional[Sequence['outputs.InstanceFleetInstanceTypeConfigConfiguration']] = None,
ebs_configs: Optional[Sequence['outputs.InstanceFleetInstanceTypeConfigEbsConfig']] = None,
weighted_capacity: Optional[int] = None):
"""
:param str instance_type: An EC2 instance type, such as m4.xlarge.
:param str bid_price: The bid price for each EC2 Spot instance type as defined by `instance_type`. Expressed in USD. If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
:param float bid_price_as_percentage_of_on_demand_price: The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by `instance_type`. Expressed as a number (for example, 20 specifies 20%). If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
:param Sequence['InstanceFleetInstanceTypeConfigConfigurationArgs'] configurations: A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster. List of `configuration` blocks.
:param Sequence['InstanceFleetInstanceTypeConfigEbsConfigArgs'] ebs_configs: Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below.
:param int weighted_capacity: The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in `emr.InstanceFleet`.
"""
pulumi.set(__self__, "instance_type", instance_type)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if bid_price_as_percentage_of_on_demand_price is not None:
pulumi.set(__self__, "bid_price_as_percentage_of_on_demand_price", bid_price_as_percentage_of_on_demand_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if ebs_configs is not None:
pulumi.set(__self__, "ebs_configs", ebs_configs)
if weighted_capacity is not None:
pulumi.set(__self__, "weighted_capacity", weighted_capacity)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
An EC2 instance type, such as m4.xlarge.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[str]:
"""
The bid price for each EC2 Spot instance type as defined by `instance_type`. Expressed in USD. If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
"""
return pulumi.get(self, "bid_price")
@property
@pulumi.getter(name="bidPriceAsPercentageOfOnDemandPrice")
def bid_price_as_percentage_of_on_demand_price(self) -> Optional[float]:
"""
The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by `instance_type`. Expressed as a number (for example, 20 specifies 20%). If neither `bid_price` nor `bid_price_as_percentage_of_on_demand_price` is provided, `bid_price_as_percentage_of_on_demand_price` defaults to 100%.
"""
return pulumi.get(self, "bid_price_as_percentage_of_on_demand_price")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.InstanceFleetInstanceTypeConfigConfiguration']]:
"""
A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster. List of `configuration` blocks.
"""
return pulumi.get(self, "configurations")
@property
@pulumi.getter(name="ebsConfigs")
def ebs_configs(self) -> Optional[Sequence['outputs.InstanceFleetInstanceTypeConfigEbsConfig']]:
"""
Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below.
"""
return pulumi.get(self, "ebs_configs")
@property
@pulumi.getter(name="weightedCapacity")
def weighted_capacity(self) -> Optional[int]:
"""
The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in `emr.InstanceFleet`.
"""
return pulumi.get(self, "weighted_capacity")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InstanceFleetInstanceTypeConfigConfiguration(dict):
def __init__(__self__, *,
classification: Optional[str] = None,
properties: Optional[Mapping[str, Any]] = None):
"""
:param str classification: The classification within a configuration.
:param Mapping[str, Any] properties: A map of properties specified within a configuration classification
"""
if classification is not None:
pulumi.set(__self__, "classification", classification)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def classification(self) -> Optional[str]:
"""
The classification within a configuration.
"""
return pulumi.get(self, "classification")
@property
@pulumi.getter
def properties(self) -> Optional[Mapping[str, Any]]:
"""
A map of properties specified within a configuration classification
"""
return pulumi.get(self, "properties")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InstanceFleetInstanceTypeConfigEbsConfig(dict):
def __init__(__self__, *,
size: int,
type: str,
iops: Optional[int] = None,
volumes_per_instance: Optional[int] = None):
"""
:param int size: The volume size, in gibibytes (GiB).
:param str type: The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
:param int iops: The number of I/O operations per second (IOPS) that the volume supports
:param int volumes_per_instance: The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
"""
pulumi.set(__self__, "size", size)
pulumi.set(__self__, "type", type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter
def size(self) -> int:
"""
The volume size, in gibibytes (GiB).
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def type(self) -> str:
"""
The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def iops(self) -> Optional[int]:
"""
The number of I/O operations per second (IOPS) that the volume supports
"""
return pulumi.get(self, "iops")
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[int]:
"""
The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
"""
return pulumi.get(self, "volumes_per_instance")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InstanceFleetLaunchSpecifications(dict):
def __init__(__self__, *,
on_demand_specifications: Optional[Sequence['outputs.InstanceFleetLaunchSpecificationsOnDemandSpecification']] = None,
spot_specifications: Optional[Sequence['outputs.InstanceFleetLaunchSpecificationsSpotSpecification']] = None):
"""
:param Sequence['InstanceFleetLaunchSpecificationsOnDemandSpecificationArgs'] on_demand_specifications: Configuration block for on demand instances launch specifications
:param Sequence['InstanceFleetLaunchSpecificationsSpotSpecificationArgs'] spot_specifications: Configuration block for spot instances launch specifications
"""
if on_demand_specifications is not None:
pulumi.set(__self__, "on_demand_specifications", on_demand_specifications)
if spot_specifications is not None:
pulumi.set(__self__, "spot_specifications", spot_specifications)
@property
@pulumi.getter(name="onDemandSpecifications")
def on_demand_specifications(self) -> Optional[Sequence['outputs.InstanceFleetLaunchSpecificationsOnDemandSpecification']]:
"""
Configuration block for on demand instances launch specifications
"""
return pulumi.get(self, "on_demand_specifications")
@property
@pulumi.getter(name="spotSpecifications")
def spot_specifications(self) -> Optional[Sequence['outputs.InstanceFleetLaunchSpecificationsSpotSpecification']]:
"""
Configuration block for spot instances launch specifications
"""
return pulumi.get(self, "spot_specifications")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InstanceFleetLaunchSpecificationsOnDemandSpecification(dict):
def __init__(__self__, *,
allocation_strategy: str):
"""
:param str allocation_strategy: Specifies the strategy to use in launching Spot instance fleets. Currently, the only option is `capacity-optimized` (the default), which launches instances from Spot instance pools with optimal capacity for the number of instances that are launching.
"""
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> str:
"""
Specifies the strategy to use in launching Spot instance fleets. Currently, the only option is `capacity-optimized` (the default), which launches instances from Spot instance pools with optimal capacity for the number of instances that are launching.
"""
return pulumi.get(self, "allocation_strategy")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InstanceFleetLaunchSpecificationsSpotSpecification(dict):
def __init__(__self__, *,
allocation_strategy: str,
timeout_action: str,
timeout_duration_minutes: int,
block_duration_minutes: Optional[int] = None):
"""
:param str allocation_strategy: Specifies the strategy to use in launching Spot instance fleets. Currently, the only option is `capacity-optimized` (the default), which launches instances from Spot instance pools with optimal capacity for the number of instances that are launching.
:param str timeout_action: The action to take when TargetSpotCapacity has not been fulfilled when the TimeoutDurationMinutes has expired; that is, when all Spot instances could not be provisioned within the Spot provisioning timeout. Valid values are `TERMINATE_CLUSTER` and `SWITCH_TO_ON_DEMAND`. SWITCH_TO_ON_DEMAND specifies that if no Spot instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.
:param int timeout_duration_minutes: The spot provisioning timeout period in minutes. If Spot instances are not provisioned within this time period, the TimeOutAction is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.
:param int block_duration_minutes: The defined duration for Spot instances (also known as Spot blocks) in minutes. When specified, the Spot instance does not terminate before the defined duration expires, and defined duration pricing for Spot instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates.
"""
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
pulumi.set(__self__, "timeout_action", timeout_action)
pulumi.set(__self__, "timeout_duration_minutes", timeout_duration_minutes)
if block_duration_minutes is not None:
pulumi.set(__self__, "block_duration_minutes", block_duration_minutes)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> str:
"""
Specifies the strategy to use in launching Spot instance fleets. Currently, the only option is `capacity-optimized` (the default), which launches instances from Spot instance pools with optimal capacity for the number of instances that are launching.
"""
return pulumi.get(self, "allocation_strategy")
@property
@pulumi.getter(name="timeoutAction")
def timeout_action(self) -> str:
"""
The action to take when TargetSpotCapacity has not been fulfilled when the TimeoutDurationMinutes has expired; that is, when all Spot instances could not be provisioned within the Spot provisioning timeout. Valid values are `TERMINATE_CLUSTER` and `SWITCH_TO_ON_DEMAND`. SWITCH_TO_ON_DEMAND specifies that if no Spot instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.
"""
return pulumi.get(self, "timeout_action")
@property
@pulumi.getter(name="timeoutDurationMinutes")
def timeout_duration_minutes(self) -> int:
"""
The spot provisioning timeout period in minutes. If Spot instances are not provisioned within this time period, the TimeOutAction is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.
"""
return pulumi.get(self, "timeout_duration_minutes")
@property
@pulumi.getter(name="blockDurationMinutes")
def block_duration_minutes(self) -> Optional[int]:
"""
The defined duration for Spot instances (also known as Spot blocks) in minutes. When specified, the Spot instance does not terminate before the defined duration expires, and defined duration pricing for Spot instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates.
"""
return pulumi.get(self, "block_duration_minutes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InstanceGroupEbsConfig(dict):
def __init__(__self__, *,
size: int,
type: str,
iops: Optional[int] = None,
volumes_per_instance: Optional[int] = None):
"""
:param int size: The volume size, in gibibytes (GiB). This can be a number from 1 - 1024. If the volume type is EBS-optimized, the minimum value is 10.
:param str type: The volume type. Valid options are 'gp2', 'io1' and 'standard'.
:param int iops: The number of I/O operations per second (IOPS) that the volume supports.
:param int volumes_per_instance: The number of EBS Volumes to attach per instance.
"""
pulumi.set(__self__, "size", size)
pulumi.set(__self__, "type", type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter
def size(self) -> int:
"""
The volume size, in gibibytes (GiB). This can be a number from 1 - 1024. If the volume type is EBS-optimized, the minimum value is 10.
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def type(self) -> str:
"""
The volume type. Valid options are 'gp2', 'io1' and 'standard'.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def iops(self) -> Optional[int]:
"""
The number of I/O operations per second (IOPS) that the volume supports.
"""
return pulumi.get(self, "iops")
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[int]:
"""
The number of EBS Volumes to attach per instance.
"""
return pulumi.get(self, "volumes_per_instance")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ManagedScalingPolicyComputeLimit(dict):
def __init__(__self__, *,
maximum_capacity_units: int,
minimum_capacity_units: int,
unit_type: str,
maximum_core_capacity_units: Optional[int] = None,
maximum_ondemand_capacity_units: Optional[int] = None):
"""
:param int maximum_capacity_units: The upper boundary of EC2 units. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.
:param int minimum_capacity_units: The lower boundary of EC2 units. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.
:param str unit_type: The unit type used for specifying a managed scaling policy. Valid Values: `InstanceFleetUnits` | `Instances` | `VCPU`
:param int maximum_core_capacity_units: The upper boundary of EC2 units for core node type in a cluster. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. The core units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between core and task nodes.
:param int maximum_ondemand_capacity_units: The upper boundary of On-Demand EC2 units. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. The On-Demand units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between On-Demand and Spot instances.
"""
pulumi.set(__self__, "maximum_capacity_units", maximum_capacity_units)
pulumi.set(__self__, "minimum_capacity_units", minimum_capacity_units)
pulumi.set(__self__, "unit_type", unit_type)
if maximum_core_capacity_units is not None:
pulumi.set(__self__, "maximum_core_capacity_units", maximum_core_capacity_units)
if maximum_ondemand_capacity_units is not None:
pulumi.set(__self__, "maximum_ondemand_capacity_units", maximum_ondemand_capacity_units)
@property
@pulumi.getter(name="maximumCapacityUnits")
def maximum_capacity_units(self) -> int:
"""
The upper boundary of EC2 units. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.
"""
return pulumi.get(self, "maximum_capacity_units")
@property
@pulumi.getter(name="minimumCapacityUnits")
def minimum_capacity_units(self) -> int:
"""
The lower boundary of EC2 units. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. Managed scaling activities are not allowed beyond this boundary. The limit only applies to the core and task nodes. The master node cannot be scaled after initial configuration.
"""
return pulumi.get(self, "minimum_capacity_units")
@property
@pulumi.getter(name="unitType")
def unit_type(self) -> str:
"""
The unit type used for specifying a managed scaling policy. Valid Values: `InstanceFleetUnits` | `Instances` | `VCPU`
"""
return pulumi.get(self, "unit_type")
@property
@pulumi.getter(name="maximumCoreCapacityUnits")
def maximum_core_capacity_units(self) -> Optional[int]:
"""
The upper boundary of EC2 units for core node type in a cluster. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. The core units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between core and task nodes.
"""
return pulumi.get(self, "maximum_core_capacity_units")
@property
@pulumi.getter(name="maximumOndemandCapacityUnits")
def maximum_ondemand_capacity_units(self) -> Optional[int]:
"""
The upper boundary of On-Demand EC2 units. It is measured through VCPU cores or instances for instance groups and measured through units for instance fleets. The On-Demand units are not allowed to scale beyond this boundary. The parameter is used to split capacity allocation between On-Demand and Spot instances.
"""
return pulumi.get(self, "maximum_ondemand_capacity_units")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
|
class Queue(list):
def __init__(self, *args, **kwargs):
super(Queue, self).__init__(*args, **kwargs)
def reposition(self, original_position, new_position):
temp = self[original_position]
del self[original_position]
try:
self.insert(new_position, temp)
except IndexError:
self.insert(original_position, temp)
raise
|
import asyncio
import socket
from urllib.parse import urlparse
from .exceptions import * # pylint: disable=wildcard-import
from .protocol import AmqpProtocol
from .version import __version__
from .version import __packagename__
async def connect(host='localhost', port=None, login='guest', password='guest',
virtualhost='/', ssl=None, login_method='PLAIN', insist=False,
protocol_factory=AmqpProtocol, *, loop=None, **kwargs):
"""Convenient method to connect to an AMQP broker
@host: the host to connect to
@port: broker port
@login: login
@password: password
@virtualhost: AMQP virtualhost to use for this connection
@ssl: SSL context used for secure connections, omit for no SSL
- see https://docs.python.org/3/library/ssl.html
@login_method: AMQP auth method
@insist: Insist on connecting to a server
@protocol_factory:
Factory to use, if you need to subclass AmqpProtocol
@loop: Set the event loop to use
@kwargs: Arguments to be given to the protocol_factory instance
Returns: a tuple (transport, protocol) of an AmqpProtocol instance
"""
if loop is None:
loop = asyncio.get_event_loop()
factory = lambda: protocol_factory(loop=loop, **kwargs)
create_connection_kwargs = {}
if ssl is not None:
create_connection_kwargs['ssl'] = ssl
if port is None:
if ssl:
port = 5671
else:
port = 5672
transport, protocol = await loop.create_connection(
factory, host, port, **create_connection_kwargs
)
# these 2 flags *may* show up in sock.type. They are only available on linux
# see https://bugs.python.org/issue21327
nonblock = getattr(socket, 'SOCK_NONBLOCK', 0)
cloexec = getattr(socket, 'SOCK_CLOEXEC', 0)
sock = transport.get_extra_info('socket')
if sock is not None and (sock.type & ~nonblock & ~cloexec) == socket.SOCK_STREAM:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
await protocol.start_connection(host, port, login, password, virtualhost, ssl=ssl, login_method=login_method,
insist=insist)
except Exception:
await protocol.wait_closed()
raise
return transport, protocol
async def from_url(
url, login_method='PLAIN', insist=False, protocol_factory=AmqpProtocol, **kwargs):
""" Connect to the AMQP using a single url parameter and return the client.
For instance:
amqp://user:password@hostname:port/vhost
@insist: Insist on connecting to a server
@protocol_factory:
Factory to use, if you need to subclass AmqpProtocol
@loop: optionally set the event loop to use.
@kwargs: Arguments to be given to the protocol_factory instance
Returns: a tuple (transport, protocol) of an AmqpProtocol instance
"""
url = urlparse(url)
if url.scheme not in ('amqp', 'amqps'):
raise ValueError('Invalid protocol %s, valid protocols are amqp or amqps' % url.scheme)
transport, protocol = await connect(
host=url.hostname or 'localhost',
port=url.port,
login=url.username or 'guest',
password=url.password or 'guest',
virtualhost=(url.path[1:] if len(url.path) > 1 else '/'),
login_method=login_method,
insist=insist,
protocol_factory=protocol_factory,
**kwargs)
return transport, protocol
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .ioc_finder import find_iocs
__author__ = """Floyd Hightower"""
__version__ = '1.2.17'
|
from util.table import table
from util.region import *
from util.Daegu import Daegu
from util.Seoul import Seoul
#from util.Gangwon import Gangwon
from util.KST import kst_time
from util.collector import collector
regions = [Seoul().collect,
Daegu().collect,
busan,
daejeon,
gwangju,
ulsan,
incheon,
sejong,
gyeongbuk,
gyeongnam,
gyeonggi,
chungbuk,
chungnam,
gangwon,
jeonbuk,
jeonnam,
jeju,
foreign,
]
if __name__=="__main__":
table = table()
data = collector(regions)
try:
table.generate(data)
except:
data = collector(regions)
table.generate(data)
readme = open('README.md', mode='wt', encoding='utf-8')
readme.write('''
## 중공 바이러스[CCP Virus]()(SARS-CoV-2) 대한민국 확진자 통계
#### Confirmed cases of CCP-Virus in Korea
{0} KST
각 **시/도/구/군청** 사이트에 공개되어 있는 **공식 자료**를 수합하여 만든 통계입니다.
질병관리본부에서 발표되는 공식 수치와는 다를 수 있습니다.
{1}
** 부산, 경상남도 홈페이지에 표시된 수치가 질병관리본부 발표보다 적습니다. **<br>
#1 확진자(중국인) 제외
'''.format(kst_time(), table.Chart))
readme.close()
|
v = int(input('Digite um valor: '))
validador = 0
contador = 1
while contador < v:
if v % contador == 0:
validador += 1
contador +=1
if validador > 1:
print(f'Esse número NÃO é primo, pois é divisível por {validador+1} números diferentes ')
else:
print('Esse número é primo')
|
# pylint: disable=C0301,C0103,R0913,R0914,R0904,C0111,R0201,R0902
import warnings
from itertools import count
from struct import pack
from typing import Tuple, List, Any
import numpy as np
from numpy import zeros, where, searchsorted
from numpy.linalg import eigh # type: ignore
from pyNastran.utils.numpy_utils import float_types
from pyNastran.f06.f06_formatting import write_floats_13e, _eigenvalue_header
from pyNastran.op2.result_objects.op2_objects import get_times_dtype
from pyNastran.op2.tables.oes_stressStrain.real.oes_objects import StressObject, StrainObject, OES_Object
from pyNastran.op2.op2_interface.write_utils import to_column_bytes
class RealSolidArray(OES_Object):
def __init__(self, data_code, is_sort1, isubcase, dt):
OES_Object.__init__(self, data_code, isubcase, apply_data_code=False)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
#if is_sort1:
##sort1
#self.add_node = self.add_node_sort1
#self.add_eid = self.add_eid_sort1
#else:
#raise NotImplementedError('SORT2')
@property
def is_real(self) -> bool:
return True
@property
def is_complex(self) -> bool:
return False
def get_headers(self):
raise NotImplementedError()
def _reset_indices(self) -> None:
self.itotal = 0
self.ielement = 0
def update_data_components(self):
ntimes, nelements_nnodes = self.data.shape[:2]
# vm
oxx = self.data[:, :, 0].reshape(ntimes * nelements_nnodes)
oyy = self.data[:, :, 1].reshape(ntimes * nelements_nnodes)
ozz = self.data[:, :, 2].reshape(ntimes * nelements_nnodes)
txy = self.data[:, :, 3].reshape(ntimes * nelements_nnodes)
tyz = self.data[:, :, 4].reshape(ntimes * nelements_nnodes)
txz = self.data[:, :, 5].reshape(ntimes * nelements_nnodes)
#I1 = oxx + oyy + ozz
#txyz = txy**2 + tyz**2 + txz ** 2
#I2 = oxx * oyy + oyy * ozz + ozz * oxx - txyz
#I3 = oxx * oyy * ozz + 2 * txy * tyz * txz + oxx * tyz**2 - oyy * txz**2 - ozz * txy
# (n_subarrays, nrows, ncols)
o1, o2, o3 = calculate_principal_components(
ntimes, nelements_nnodes,
oxx, oyy, ozz, txy, tyz, txz,
self.is_stress)
ovm_sheari = calculate_ovm_shear(oxx, oyy, ozz, txy, tyz, txz, o1, o3,
self.is_von_mises, self.is_stress)
ovm_sheari2 = ovm_sheari.reshape(ntimes, nelements_nnodes)
self.data[:, :, 6] = o1.reshape(ntimes, nelements_nnodes)
self.data[:, :, 7] = o2.reshape(ntimes, nelements_nnodes)
self.data[:, :, 8] = o3.reshape(ntimes, nelements_nnodes)
self.data[:, :, 9] = ovm_sheari2
#A = [[doxx, dtxy, dtxz],
#[dtxy, doyy, dtyz],
#[dtxz, dtyz, dozz]]
#(_lambda, v) = eigh(A) # a hermitian matrix is a symmetric-real matrix
def __iadd__(self, factor):
"""[A] += b"""
#[oxx, oyy, ozz, txy, tyz, txz, o1, o2, o3, ovmShear]
if isinstance(factor, float_types):
self.data[:, :, :6] += factor
else:
# TODO: should support arrays
raise TypeError('factor=%s and must be a float' % (factor))
self.update_data_components()
def __isub__(self, factor):
"""[A] -= b"""
if isinstance(factor, float_types):
self.data[:, :, :6] -= factor
else:
# TODO: should support arrays
raise TypeError('factor=%s and must be a float' % (factor))
self.update_data_components()
def __imul__(self, factor):
"""[A] *= b"""
assert isinstance(factor, float_types), 'factor=%s and must be a float' % (factor)
self.data[:, :, :6] *= factor
self.update_data_components()
def __idiv__(self, factor):
"""[A] *= b"""
assert isinstance(factor, float_types), 'factor=%s and must be a float' % (factor)
self.data[:, :, :6] *= 1. / factor
self.update_data_components()
def build(self):
"""sizes the vectorized attributes of the RealSolidArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)
if self.is_sort1:
ntimes = self.ntimes
ntotal = self.ntotal
nelements = self.nelements
else:
#print(f'ntimes={self.ntimes} nelements={self.nelements} ntotal={self.ntotal}')
ntimes = self.nelements
ntotal = self.ntotal
nelements = self.ntimes
#print(f'ntimes={ntimes} nelements={nelements} ntotal={ntotal}')
#self.ntimes = ntimes
#self.ntotal = ntotal
#self.nelements = nelements
_times = zeros(ntimes, dtype=dtype)
# TODO: could be more efficient by using nelements for cid
element_node = zeros((ntotal, 2), dtype=idtype)
element_cid = zeros((nelements, 2), dtype=idtype)
#if nelements > 5000:
#raise RuntimeError(nelements)
#if self.element_name == 'CTETRA':
#nnodes = 4
#elif self.element_name == 'CPENTA':
#nnodes = 6
#elif self.element_name == 'CHEXA':
#nnodes = 8
#self.element_node = zeros((self.ntotal, nnodes, 2), 'int32')
#[oxx, oyy, ozz, txy, tyz, txz, o1, o2, o3, ovmShear]
data = zeros((ntimes, ntotal, 10), fdtype)
self.nnodes = element_node.shape[0] // self.nelements
#self.data = zeros((self.ntimes, self.nelements, nnodes+1, 10), 'float32')
if self.load_as_h5:
#for key, value in sorted(self.data_code.items()):
#print(key, value)
group = self._get_result_group()
self._times = group.create_dataset('_times', data=_times)
self.element_node = group.create_dataset('element_node', data=element_node)
self.element_cid = group.create_dataset('element_cid', data=element_cid)
self.data = group.create_dataset('data', data=data)
else:
self._times = _times
self.element_node = element_node
self.element_cid = element_cid
self.data = data
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
# TODO: cid?
#element_node = [self.element_node[:, 0], self.element_node[:, 1]]
if self.nonlinear_factor not in (None, np.nan):
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_element_node(
column_values, column_names,
headers, self.element_node, self.data)
#self.data_frame = pd.Panel(self.data, items=column_values, major_axis=element_node, minor_axis=headers).to_frame()
#self.data_frame.columns.names = column_names
#self.data_frame.index.names = ['ElementID', 'NodeID', 'Item']
else:
# Static sxc sxd sxe sxf smax smin MS_tension MS_compression
# ElementID NodeID
# 12 22 0.0 0.0 0.0 0.0 0.0 0.0 1.401298e-45 1.401298e-45
# 26 0.0 0.0 0.0 0.0 0.0 0.0 1.401298e-45 1.401298e-45
index = pd.MultiIndex.from_arrays(self.element_node.T, names=['ElementID', 'NodeID'])
data_frame = pd.DataFrame(self.data[0], columns=headers, index=index)
data_frame.columns.names = ['Static']
self.data_frame = data_frame
def add_eid_sort1(self, unused_etype, cid, dt, eid, unused_node_id,
oxx, oyy, ozz, txy, tyz, txz, o1, o2, o3,
unused_acos, unused_bcos, unused_ccos, unused_pressure, ovm):
# See the CHEXA, CPENTA, or CTETRA entry for the definition of the element coordinate systems.
# The material coordinate system (CORDM) may be the basic system (0 or blank), any defined system
# (Integer > 0), or the standard internal coordinate system of the element designated as:
# -1: element coordinate system (-1)
# -2: element system based on eigenvalue techniques to insure non bias in the element formulation(-2).
# C:\MSC.Software\msc_nastran_runs\ecs-2-rg.op2
assert cid >= -2, cid
assert eid >= 0, eid
#print(f'dt={dt} eid={eid}')
self._times[self.itime] = dt
self.element_node[self.itotal, :] = [eid, 0] # 0 is center
omax_mid_min = [o1, o2, o3]
omin = min(omax_mid_min)
omax_mid_min.remove(omin)
omax = max(omax_mid_min)
omax_mid_min.remove(omax)
omid = omax_mid_min[0]
self.data[self.itime, self.itotal, :] = [oxx, oyy, ozz, txy, tyz, txz, omax, omid, omin, ovm]
#self.data[self.itime, self.ielement, 0, :] = [oxx, oyy, ozz, txy, tyz, txz, o1, o2, o3, ovm]
#print('element_cid[%i, :] = [%s, %s]' % (self.ielement, eid, cid))
if self.ielement == self.nelements:
self.ielement = 0
self.element_cid[self.ielement, :] = [eid, cid]
self.itotal += 1
self.ielement += 1
def add_node_sort1(self, dt, eid, unused_inode, node_id,
oxx, oyy, ozz, txy, tyz, txz, o1, o2, o3,
unused_acos, unused_bcos, unused_ccos, unused_pressure, ovm):
# skipping aCos, bCos, cCos, pressure
omax_mid_min = [o1, o2, o3]
omin = min(omax_mid_min)
omax_mid_min.remove(omin)
omax = max(omax_mid_min)
omax_mid_min.remove(omax)
omid = omax_mid_min[0]
self.data[self.itime, self.itotal, :] = [oxx, oyy, ozz, txy, tyz, txz, omax, omid, omin, ovm]
#print('data[%s, %s, :] = %s' % (self.itime, self.itotal, str(self.data[self.itime, self.itotal, :])))
#self.data[self.itime, self.ielement-1, self.inode, :] = [oxx, oyy, ozz, txy, tyz, txz, o1, o2, o3, ovm]
#print('eid=%i node_id=%i exx=%s' % (eid, node_id, str(oxx)))
self.element_node[self.itotal, :] = [eid, node_id]
#self.element_node[self.ielement-1, self.inode-1, :] = [eid, node_id]
self.itotal += 1
def add_eid_sort2(self, unused_etype, cid, dt, eid, unused_node_id,
oxx, oyy, ozz, txy, tyz, txz, o1, o2, o3,
unused_acos, unused_bcos, unused_ccos, unused_pressure, ovm):
#itime = self.ielement
#ielement = self.itotal
#itotal = self.itime
#print(self.ntimes, self.nelements, self.ntotal, self.nnodes)
itime = self.itotal // self.nnodes
ielement = self.itime
itotal = self.itotal
assert cid >= -2, cid
assert eid >= 0, eid
#try:
self._times[itime] = dt
#print(f'dt={dt} eid={eid} ielement={ielement} -> itime={itime} itotal={itotal}')
#except IndexError:
#print(f'*dt={dt} eid={eid} ielement={ielement} -> itime={itime} itotal={itotal}')
#self.itime += 1
#self.ielement += 1
#return
self.element_node[itotal, :] = [eid, 0] # 0 is center
omax_mid_min = [o1, o2, o3]
omin = min(omax_mid_min)
omax_mid_min.remove(omin)
omax = max(omax_mid_min)
omax_mid_min.remove(omax)
omid = omax_mid_min[0]
self.data[itime, itotal, :] = [oxx, oyy, ozz, txy, tyz, txz, omax, omid, omin, ovm]
#print('element_cid[%i, :] = [%s, %s]' % (self.ielement, eid, cid))
#if self.ielement == self.nelements:
#self.ielement = 0
self.element_cid[ielement, :] = [eid, cid]
#self.itime += 1
self.itotal += 1
self.ielement += 1
#print('self._times', self._times)
def add_node_sort2(self, dt, eid, unused_inode, node_id,
oxx, oyy, ozz, txy, tyz, txz, o1, o2, o3,
unused_acos, unused_bcos, unused_ccos, unused_pressure, ovm):
#ielement = self.ielement
#itotal = self.itotal
#itime = self.itime
#itime=0 ielement=1 itotal=1
#itime=0 ielement=1 itotal=2
#itime=0 ielement=1 itotal=3
#itime=0 ielement=1 itotal=4
#ielement = self.ielement
#itime = (self.itime - 1) % self.nelements
#itime = self.itime - 1
nnodes = self.nnodes
itime = self.itotal // nnodes
itotal = self.itotal
#ielement = self.ielement - 1
#ielement = self.itime
#inode = self.itotal % nnodes
#itotal2 = (self.ielement - 1) * nnodes + inode
#print(f' itime={itime} itotal={itotal}; nid={node_id}; '
#f'ielement={ielement} inode={inode} -> itotal2={itotal2}')
# skipping aCos, bCos, cCos, pressure
omax_mid_min = [o1, o2, o3]
omin = min(omax_mid_min)
omax_mid_min.remove(omin)
omax = max(omax_mid_min)
omax_mid_min.remove(omax)
omid = omax_mid_min[0]
self.data[itime, itotal, :] = [oxx, oyy, ozz, txy, tyz, txz, omax, omid, omin, ovm]
#print('data[%s, %s, :] = %s' % (self.itime, self.itotal, str(self.data[self.itime, self.itotal, :])))
#print('eid=%i node_id=%i exx=%s' % (eid, node_id, str(oxx)))
self.element_node[itotal, :] = [eid, node_id]
#self.element_node[ielement-1, inode-1, :] = [eid, node_id]
self.itotal += 1
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ieid, eid_nid in enumerate(self.element_node):
(eid, nid) = eid_nid
t1 = self.data[itime, ieid, :]
t2 = table.data[itime, ieid, :]
(oxx1, oyy1, ozz1, txy1, tyz1, txz1, o11, o21, o31, ovm1) = t1
(oxx2, oyy2, ozz2, txy2, tyz2, txz2, o12, o22, o32, ovm2) = t2
if not np.array_equal(t1, t2):
msg += (
'(%s, %s) (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n'
'%s (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid, nid,
oxx1, oyy1, ozz1, txy1, tyz1, txz1, o11, o21, o31, ovm1,
' ' * (len(str(eid)) + len(str(nid)) + 2),
oxx2, oyy2, ozz2, txy2, tyz2, txz2, o12, o22, o32, ovm2))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
@property
def nnodes_per_element(self) -> int:
return self.nnodes_per_element_no_centroid + 1
@property
def nnodes_per_element_no_centroid(self) -> int:
if self.element_type == 39: # CTETRA
nnodes = 4
elif self.element_type == 67: # CHEXA
nnodes = 8
elif self.element_type == 68: # CPENTA
nnodes = 6
elif self.element_type == 255: # CPYRAM
nnodes = 5
else:
raise NotImplementedError(f'element_name={self.element_name} self.element_type={self.element_type}')
return nnodes
def get_stats(self, short: bool=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
f' ntimes: {self.ntimes:d}\n',
f' ntotal: {self.ntotal:d}\n',
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
try:
nnodes_per_element = self.element_node.shape[0] // nelements
except ZeroDivisionError:
nnodes_per_element = '???'
nnodes = self.element_node.shape[0]
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i nnodes=%i\n nnodes_per_element=%s (including centroid)\n'
% (self.__class__.__name__, ntimes, nelements, nnodes, nnodes_per_element))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i nnodes=%i\n nodes_per_element=%i (including centroid)\n'
% (self.__class__.__name__, nelements, nnodes, nnodes_per_element))
ntimes_word = '1'
msg.append(' eType, cid\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nnodes, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(f' element_node.shape = {self.element_node.shape}\n')
msg.append(f' element_cid.shape = {self.element_cid.shape}\n')
msg.append(f' data.shape = {self.data.shape}\n')
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
#print(''.join(msg))
return msg
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = searchsorted(eids, self.element_node[:, 0]) #[0]
return itot
def eid_to_element_node_index(self, eids):
#ind = ravel([searchsorted(self.element_node[:, 0] == eid) for eid in eids])
ind = searchsorted(eids, self.element_node[:, 0])
#ind = ind.reshape(ind.size)
#ind.sort()
return ind
def write_f06(self, f06_file, header=None, page_stamp: str='PAGE %s',
page_num: int=1, is_mag_phase: bool=False, is_sort1: bool=True):
calculate_directional_vectors = True
if header is None:
header = []
nnodes, msg_temp = _get_f06_header_nnodes(self, is_mag_phase)
# write the f06
ntimes = self.data.shape[0]
eids2 = self.element_node[:, 0]
nodes = self.element_node[:, 1]
eids3 = self.element_cid[:, 0]
cids3 = self.element_cid[:, 1]
fdtype = self.data.dtype
oxx = self.data[:, :, 0]
oyy = self.data[:, :, 1]
ozz = self.data[:, :, 2]
txy = self.data[:, :, 3]
tyz = self.data[:, :, 4]
txz = self.data[:, :, 5]
o1 = self.data[:, :, 6]
o2 = self.data[:, :, 7]
o3 = self.data[:, :, 8]
ovm = self.data[:, :, 9]
p = (o1 + o2 + o3) / -3.
nnodes_total = self.data.shape[1]
if calculate_directional_vectors:
v = calculate_principal_eigenvectors4(
ntimes, nnodes_total,
oxx, oyy, ozz, txy, txz, tyz,
fdtype)[1]
else:
v = np.zeros((ntimes, nnodes, 3, 3), dtype=fdtype)
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
oxx = self.data[itime, :, 0]
oyy = self.data[itime, :, 1]
ozz = self.data[itime, :, 2]
txy = self.data[itime, :, 3]
tyz = self.data[itime, :, 4]
txz = self.data[itime, :, 5]
o1 = self.data[itime, :, 6]
o2 = self.data[itime, :, 7]
o3 = self.data[itime, :, 8]
ovm = self.data[itime, :, 9]
vi = v[itime, :, :, :]
pi = p[itime, :]
cnnodes = nnodes + 1
for i, deid, node_id, doxx, doyy, dozz, dtxy, dtyz, dtxz, do1, do2, do3, dp, dv, dovm in zip(
count(), eids2, nodes, oxx, oyy, ozz, txy, tyz, txz, o1, o2, o3, pi, vi, ovm):
# o1-max
# o2-mid
# o3-min
assert do1 >= do2 >= do3, 'o1 >= o2 >= o3; eid=%s o1=%e o2=%e o3=%e' % (deid, do1, do2, do3)
[oxxi, oyyi, ozzi, txyi, tyzi, txzi, o1i, o2i, o3i, pii, ovmi] = write_floats_13e(
[doxx, doyy, dozz, dtxy, dtyz, dtxz, do1, do2, do3, dp, dovm])
if i % cnnodes == 0:
j = where(eids3 == deid)[0][0]
cid = cids3[j]
f06_file.write('0 %8s %8iGRID CS %i GP\n' % (deid, cid, nnodes))
f06_file.write(
'0 %8s X %-13s XY %-13s A %-13s LX%5.2f%5.2f%5.2f %-13s %s\n'
' %8s Y %-13s YZ %-13s B %-13s LY%5.2f%5.2f%5.2f\n'
' %8s Z %-13s ZX %-13s C %-13s LZ%5.2f%5.2f%5.2f\n'
% ('CENTER', oxxi, txyi, o1i, dv[0, 1], dv[0, 2], dv[0, 0], pii, ovmi,
'', oyyi, tyzi, o2i, dv[1, 1], dv[1, 2], dv[1, 0],
'', ozzi, txzi, o3i, dv[2, 1], dv[2, 2], dv[2, 0]))
else:
f06_file.write(
'0 %8s X %-13s XY %-13s A %-13s LX%5.2f%5.2f%5.2f %-13s %s\n'
' %8s Y %-13s YZ %-13s B %-13s LY%5.2f%5.2f%5.2f\n'
' %8s Z %-13s ZX %-13s C %-13s LZ%5.2f%5.2f%5.2f\n'
% (node_id, oxxi, txyi, o1i, dv[0, 1], dv[0, 2], dv[0, 0], pii, ovmi,
'', oyyi, tyzi, o2i, dv[1, 1], dv[1, 2], dv[1, 0],
'', ozzi, txzi, o3i, dv[2, 1], dv[2, 2], dv[2, 0]))
i += 1
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2_file, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
calculate_directional_vectors = True
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write(f'{self.__class__.__name__}.write_op2: {call_frame[1][3]}\n')
if itable == -1:
#print('***************', itable)
self._write_table_header(op2_file, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
nnodes_expected = self.nnodes
eids2 = self.element_node[:, 0]
nodes = self.element_node[:, 1]
nelements_nodes = len(nodes)
eids3 = self.element_cid[:, 0]
cids3 = self.element_cid[:, 1]
element_device = eids3 * 10 + self.device_code
# table 4 info
#ntimes = self.data.shape[0]
nnodes = self.data.shape[1]
nelements = len(np.unique(eids2))
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
nnodes_centroid = self.nnodes_per_element
nnodes_no_centroid = self.nnodes_per_element_no_centroid
ntotali = 4 + 21 * nnodes_no_centroid
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
assert nnodes > 1, nnodes
#assert self.ntimes == 1, self.ntimes
op2_ascii.write(f' ntimes = {self.ntimes}\n')
ntimes = self.ntimes
#print('ntotal=%s' % (ntotal))
if not self.is_sort1:
raise NotImplementedError('SORT2')
#op2_format = endian + b'2i6f'
idtype = self.element_cid.dtype
fdtype = self.data.dtype
if self.size == 4:
grid_bytes = b'GRID'
else:
warnings.warn(f'downcasting {self.class_name}...')
idtype = np.int32(1)
fdtype = np.float32(1.0)
grid_bytes = b'GRID'
cen_array = np.full(nelements, grid_bytes, dtype='|S4')
nnodes_no_centroid_array = np.full(nelements, nnodes_no_centroid, dtype=idtype)
element_wise_data = to_column_bytes([
element_device, # ints
cids3, # ints
cen_array, # bytes
nnodes_no_centroid_array, # ints
], fdtype, debug=False)
oxx = self.data[:, :, 0]
oyy = self.data[:, :, 1]
ozz = self.data[:, :, 2]
txy = self.data[:, :, 3]
tyz = self.data[:, :, 4]
txz = self.data[:, :, 5]
o1 = self.data[:, :, 6]
o2 = self.data[:, :, 7]
o3 = self.data[:, :, 8]
ovm = self.data[:, :, 9]
p = (o1 + o2 + o3) / -3.
# speed up transient cases, but slightly slows down static cases
data_out = np.empty((nelements, 4+21*nnodes_centroid), dtype=fdtype)
# setting:
# - CTETRA: [element_device, cid, 'CEN/', 4]
# - CPYRAM: [element_device, cid, 'CEN/', 5]
# - CPENTA: [element_device, cid, 'CEN/', 6]
# - CHEXA: [element_device, cid, 'CEN/', 8]
data_out[:, :4] = element_wise_data
# we could tack the nodes on, so we don't have to keep stacking it
# but we run into issues with datai
#
# total=nelements_nodes
#nodes_view = nodes.view(fdtype).reshape(nelements, nnodes_centroid)
#inode = np.arange(nnodes_centroid)
#data_out[:, 4+inode*21] = nodes_view[:, inode]
# v is the (3, 3) eigenvector for every time and every element
if calculate_directional_vectors:
v = calculate_principal_eigenvectors4(
ntimes, nnodes,
oxx, oyy, ozz, txy, txz, tyz,
fdtype)[1]
else:
v = np.zeros((ntimes, nnodes, 3, 3), dtype=fdtype)
op2_ascii.write(f'nelements={nelements:d}\n')
for itime in range(self.ntimes):
vi = v[itime, :, :, :]
self._write_table_3(op2_file, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2_file.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write(f'r4 [4, {itable:d}, 4]\n')
op2_ascii.write(f'r4 [4, {4 * ntotal:d}, 4]\n')
col_inputs = [
nodes,
oxx[itime, :], txy[itime, :], o1[itime, :], vi[:, 0, 1], vi[:, 0, 2], vi[:, 0, 0], p[itime, :], ovm[itime, :],
oyy[itime, :], tyz[itime, :], o2[itime, :], vi[:, 1, 1], vi[:, 1, 2], vi[:, 1, 0],
ozz[itime, :], txz[itime, :], o3[itime, :], vi[:, 2, 1], vi[:, 2, 2], vi[:, 2, 0],
]
# stack each output by columns and fix any dtypes
datai = to_column_bytes(col_inputs, fdtype)
#datai2 = datai.reshape(nelements, 21*nnodes_centroid)
#data_out = np.hstack([element_wise_data, datai2])
#data_out[:, 4:] = datai2
# switch datai to element format and put it in the output buffer
data_out[:, 4:] = datai.reshape(nelements, 21*nnodes_centroid)
op2_file.write(data_out)
itable -= 1
header = [4 * ntotal,]
op2_file.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class RealSolidStressArray(RealSolidArray, StressObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealSolidArray.__init__(self, data_code, is_sort1, isubcase, dt)
StressObject.__init__(self, data_code, isubcase)
def get_headers(self) -> List[str]:
if self.is_von_mises:
von_mises = 'von_mises'
else:
von_mises = 'max_shear'
headers = ['oxx', 'oyy', 'ozz', 'txy', 'tyz', 'txz', 'omax', 'omid', 'omin', von_mises]
return headers
class RealSolidStrainArray(RealSolidArray, StrainObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealSolidArray.__init__(self, data_code, is_sort1, isubcase, dt)
StrainObject.__init__(self, data_code, isubcase)
def get_headers(self) -> List[str]:
if self.is_von_mises:
von_mises = 'von_mises'
else:
von_mises = 'max_shear'
headers = ['exx', 'eyy', 'ezz', 'exy', 'eyz', 'exz', 'emax', 'emid', 'emin', von_mises]
return headers
def _get_solid_msgs(self: RealSolidArray):
if self.is_von_mises:
von_mises = 'VON MISES'
else:
von_mises = 'MAX SHEAR'
if self.is_stress:
base_msg = [
'0 CORNER ------CENTER AND CORNER POINT STRESSES--------- DIR. COSINES MEAN \n',
' ELEMENT-ID GRID-ID NORMAL SHEAR PRINCIPAL -A- -B- -C- PRESSURE %s \n' % von_mises]
tetra_msg = [' S T R E S S E S I N T E T R A H E D R O N S O L I D E L E M E N T S ( C T E T R A )\n', ]
penta_msg = [' S T R E S S E S I N P E N T A H E D R O N S O L I D E L E M E N T S ( P E N T A )\n', ]
hexa_msg = [' S T R E S S E S I N H E X A H E D R O N S O L I D E L E M E N T S ( H E X A )\n', ]
pyram_msg = [' S T R E S S E S I N P Y R A M I D S O L I D E L E M E N T S ( P Y R A M )\n', ]
else:
base_msg = [
'0 CORNER ------CENTER AND CORNER POINT STRAINS--------- DIR. COSINES MEAN \n',
' ELEMENT-ID GRID-ID NORMAL SHEAR PRINCIPAL -A- -B- -C- PRESSURE %s \n' % von_mises]
tetra_msg = [' S T R A I N S I N T E T R A H E D R O N S O L I D E L E M E N T S ( C T E T R A )\n', ]
penta_msg = [' S T R A I N S I N P E N T A H E D R O N S O L I D E L E M E N T S ( P E N T A )\n', ]
hexa_msg = [' S T R A I N S I N H E X A H E D R O N S O L I D E L E M E N T S ( H E X A )\n', ]
pyram_msg = [' S T R A I N S I N P Y R A M I D S O L I D E L E M E N T S ( P Y R A M )\n', ]
tetra_msg += base_msg
penta_msg += base_msg
hexa_msg += base_msg
return tetra_msg, penta_msg, hexa_msg, pyram_msg
def _get_f06_header_nnodes(self: RealSolidArray, is_mag_phase=True):
tetra_msg, penta_msg, hexa_msg, pyram_msg = _get_solid_msgs(self)
if self.element_type == 39: # CTETRA
msg = tetra_msg
nnodes = 4
elif self.element_type == 67: # CHEXA
msg = hexa_msg
nnodes = 8
elif self.element_type == 68: # CPENTA
msg = penta_msg
nnodes = 6
elif self.element_type == 255: # CPYRAM
msg = pyram_msg
nnodes = 5
else: # pragma: no cover
msg = f'element_name={self.element_name} self.element_type={self.element_type}'
raise NotImplementedError(msg)
return nnodes, msg
def calculate_principal_components(ntimes: int, nelements_nnodes: int,
oxx, oyy, ozz,
txy, tyz, txz,
is_stress: bool) -> Tuple[Any, Any, Any]:
"""
TODO: scale by 2 for strain
"""
a_matrix = np.full((ntimes * nelements_nnodes, 3, 3), np.nan)
#print(a_matrix.shape, oxx.shape)
a_matrix[:, 0, 0] = oxx
a_matrix[:, 1, 1] = oyy
a_matrix[:, 2, 2] = ozz
# we're only filling the lower part of the A matrix
if is_stress:
a_matrix[:, 1, 0] = txy
a_matrix[:, 2, 0] = txz
a_matrix[:, 2, 1] = tyz
else:
a_matrix[:, 1, 0] = txy / 2.
a_matrix[:, 2, 0] = txz / 2.
a_matrix[:, 2, 1] = tyz / 2.
eigs = np.linalg.eigvalsh(a_matrix) # array = (..., M, M) array
o1 = eigs[:, 2]
o2 = eigs[:, 1]
o3 = eigs[:, 0]
return o1, o2, o3
def calculate_principal_eigenvectors5(ntimes: int, nelements: int, nnodes: int,
oxx: np.ndarray, oyy: np.ndarray, ozz: np.ndarray,
txy: np.ndarray, txz: np.ndarray, tyz: np.ndarray,
dtype):
"""
For 10 CTETRA elements (5 nodes) with 2 times, the shape would be:
>>> (ntimes, nelements, nnodes, 3, 3)
>>> (2, 10, 5, 3, 3)
TODO: scale by 2 for strain
"""
a_matrix = np.empty((ntimes, nelements, nnodes, 3, 3), dtype=dtype)
# we're only filling the lower part of the A matrix
a_matrix[:, :, :, 0, 0] = oxx
a_matrix[:, :, :, 1, 1] = oyy
a_matrix[:, :, :, 2, 2] = ozz
a_matrix[:, :, :, 1, 0] = txy
a_matrix[:, :, :, 2, 0] = txz
a_matrix[:, :, :, 2, 1] = tyz
# _lambda: ntimes, nelements, nnodes, (3)
# v: ntimes, nelements, nnodes, (3, 3)
(_lambda, v) = eigh(a_matrix) # a hermitian matrix is a symmetric-real matrix
return _lambda, v
def calculate_principal_eigenvectors4(ntimes: int, nnodes: int,
oxx: np.ndarray, oyy: np.ndarray, ozz: np.ndarray,
txy: np.ndarray, txz: np.ndarray, tyz: np.ndarray,
dtype):
"""
For 10 CTETRA elements (5 nodes) with 2 times, the shape would be:
>>> (ntimes, nelements*nnodes, 3, 3)
>>> (2, 10*5, 3, 3)
TODO: scale by 2 for strain
Parameters
----------
oxx : (ntimes, nnodes) np.ndarray
Returns
-------
eigenvalues : (ntimes, nnodes, 3)
the eigenvalues
eigenvectors : (ntimes, nnodes, 3, 3)
the eigenvectors
"""
a_matrix = np.empty((ntimes, nnodes, 3, 3), dtype=dtype)
# we're only filling the lower part of the A matrix
try:
a_matrix[:, :, 0, 0] = oxx
a_matrix[:, :, 1, 1] = oyy
a_matrix[:, :, 2, 2] = ozz
a_matrix[:, :, 1, 0] = txy
a_matrix[:, :, 2, 0] = txz
a_matrix[:, :, 2, 1] = tyz
except Exception:
raise RuntimeError(f'a_matrix.shape={a_matrix.shape} oxx.shape={oxx.shape}')
# _lambda: ntimes, nnodes, (3)
# v: ntimes, nnodes, (3, 3)
(_lambda, v) = eigh(a_matrix) # a hermitian matrix is a symmetric-real matrix
return _lambda, v
def calculate_ovm_shear(oxx, oyy, ozz,
txy, tyz, txz, o1, o3,
is_von_mises: bool,
is_stress: bool):
if is_von_mises:
# von mises
ovm_shear = np.sqrt((oxx - oyy)**2 + (oyy - ozz)**2 + (oxx - ozz)**2 +
3. * (txy**2 + tyz**2 + txz ** 2))
else:
# max shear
ovm_shear = (o1 - o3) / 2.
return ovm_shear
|
import json
from datetime import timedelta as td
from django.utils.timezone import now
from hc.api.models import Check
from hc.test import BaseTestCase
class ListChecksTestCase(BaseTestCase):
def setUp(self):
super(ListChecksTestCase, self).setUp()
self.now = now().replace(microsecond=0)
self.a1 = Check(user=self.alice, name="Alice 1")
self.a1.timeout = td(seconds=3600)
self.a1.grace = td(seconds=900)
self.a1.last_ping = self.now
self.a1.n_pings = 1
self.a1.status = "new"
self.a1.save()
self.a2 = Check(user=self.alice, name="Alice 2")
self.a2.timeout = td(seconds=86400)
self.a2.grace = td(seconds=3600)
self.a2.last_ping = self.now
self.a2.status = "up"
self.a2.save()
def get(self):
return self.client.get("/api/v1/checks/", HTTP_X_API_KEY="abc")
def test_it_works(self):
r = self.get()
# Assert the response status code
self.assertEqual(r.status_code, 200)
doc = r.json()
self.assertTrue("checks" in doc)
checks = {check["name"]: check for check in doc["checks"]}
# Assert the expected length of checks
self.assertEqual(len(checks), 2)
# Assert the checks Alice 1 and Alice 2's timeout, grace, ping_url,
# status,
self.assertEqual(checks['Alice 1']['timeout'], 3600)
self.assertEqual(checks['Alice 1']['grace'], 900)
self.assertEqual(checks["Alice 1"]["ping_url"], self.a1.url())
self.assertEqual(checks['Alice 1']['status'], 'new')
self.assertEqual(checks['Alice 2']['timeout'], 86400)
self.assertEqual(checks['Alice 2']['grace'], 3600)
self.assertEqual(checks["Alice 2"]["ping_url"], self.a2.url())
self.assertEqual(checks['Alice 2']['status'], 'up')
# last_ping, n_pings and pause_url
self.assertEqual(checks['Alice 1']['n_pings'], self.a1.n_pings)
self.assertEqual(checks['Alice 1']['pause_url'],
self.a1.to_dict()['pause_url'])
self.assertEqual(checks['Alice 1']['last_ping'],
self.a1.to_dict()['last_ping'])
self.assertEqual(checks['Alice 2']['n_pings'], self.a2.n_pings)
self.assertEqual(checks['Alice 2']['pause_url'],
self.a2.to_dict()['pause_url'])
self.assertEqual(checks['Alice 2']['last_ping'],
self.a2.to_dict()['last_ping'])
def test_it_shows_only_users_checks(self):
bobs_check = Check(user=self.bob, name="Bob 1")
bobs_check.save()
r = self.get()
data = r.json()
self.assertEqual(len(data["checks"]), 2)
for check in data["checks"]:
self.assertNotEqual(check["name"], "Bob 1")
# Test that it accepts an api_key in the request
def test_that_it_accepts_api_key_with_the_request(self):
payload = json.dumps({"api_key": "abc"})
r = self.client.generic(
"GET", "/api/v1/checks/", payload, content_type="application/json")
self.assertEqual(r.status_code, 200)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import torch
from torch.utils.data import DataLoader
from habitat import logger
from habitat_baselines.common.base_il_trainer import BaseILTrainer
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.tensorboard_utils import TensorboardWriter
from habitat_baselines.il.data.eqa_cnn_pretrain_data import (
EQACNNPretrainDataset,
)
from habitat_baselines.il.models.models import MultitaskCNN
from habitat_baselines.utils.visualizations.utils import (
save_depth_results,
save_rgb_results,
save_seg_results,
)
@baseline_registry.register_trainer(name="eqa-cnn-pretrain")
class EQACNNPretrainTrainer(BaseILTrainer):
r"""Trainer class for Encoder-Decoder for Feature Extraction
used in EmbodiedQA (Das et. al.;CVPR 2018)
Paper: https://embodiedqa.org/paper.pdf.
"""
supported_tasks = ["EQA-v0"]
def __init__(self, config=None):
super().__init__(config)
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if config is not None:
logger.info(f"config: {config}")
def _make_results_dir(self):
r"""Makes directory for saving eqa-cnn-pretrain eval results."""
for type in ["rgb", "seg", "depth"]:
dir_name = self.config.RESULTS_DIR.format(split="val", type=type)
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
def _save_results(
self,
gt_rgb: torch.Tensor,
pred_rgb: torch.Tensor,
gt_seg: torch.Tensor,
pred_seg: torch.Tensor,
gt_depth: torch.Tensor,
pred_depth: torch.Tensor,
path: str,
) -> None:
r"""For saving EQA-CNN-Pretrain reconstruction results.
Args:
gt_rgb: rgb ground truth
preg_rgb: autoencoder output rgb reconstruction
gt_seg: segmentation ground truth
pred_seg: segmentation output
gt_depth: depth map ground truth
pred_depth: depth map output
path: to write file
"""
save_rgb_results(gt_rgb[0], pred_rgb[0], path)
save_seg_results(gt_seg[0], pred_seg[0], path)
save_depth_results(gt_depth[0], pred_depth[0], path)
def train(self) -> None:
r"""Main method for pre-training Encoder-Decoder Feature Extractor for EQA.
Returns:
None
"""
config = self.config
eqa_cnn_pretrain_dataset = EQACNNPretrainDataset(config)
train_loader = DataLoader(
eqa_cnn_pretrain_dataset,
batch_size=config.IL.EQACNNPretrain.batch_size,
shuffle=True,
)
logger.info(
"[ train_loader has {} samples ]".format(
len(eqa_cnn_pretrain_dataset)
)
)
model = MultitaskCNN()
model.train().to(self.device)
optim = torch.optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=float(config.IL.EQACNNPretrain.lr),
)
depth_loss = torch.nn.SmoothL1Loss()
ae_loss = torch.nn.SmoothL1Loss()
seg_loss = torch.nn.CrossEntropyLoss()
epoch, t = 1, 0
with TensorboardWriter(
config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
while epoch <= config.IL.EQACNNPretrain.max_epochs:
start_time = time.time()
avg_loss = 0.0
for batch in train_loader:
t += 1
idx, gt_rgb, gt_depth, gt_seg = batch
optim.zero_grad()
gt_rgb = gt_rgb.to(self.device)
gt_depth = gt_depth.to(self.device)
gt_seg = gt_seg.to(self.device)
pred_seg, pred_depth, pred_rgb = model(gt_rgb)
l1 = seg_loss(pred_seg, gt_seg.long())
l2 = ae_loss(pred_rgb, gt_rgb)
l3 = depth_loss(pred_depth, gt_depth)
loss = l1 + (10 * l2) + (10 * l3)
avg_loss += loss.item()
if t % config.LOG_INTERVAL == 0:
logger.info(
"[ Epoch: {}; iter: {}; loss: {:.3f} ]".format(
epoch, t, loss.item()
)
)
writer.add_scalar("total_loss", loss, t)
writer.add_scalars(
"individual_losses",
{"seg_loss": l1, "ae_loss": l2, "depth_loss": l3},
t,
)
loss.backward()
optim.step()
end_time = time.time()
time_taken = "{:.1f}".format((end_time - start_time) / 60)
avg_loss = avg_loss / len(train_loader)
logger.info(
"[ Epoch {} completed. Time taken: {} minutes. ]".format(
epoch, time_taken
)
)
logger.info("[ Average loss: {:.3f} ]".format(avg_loss))
print("-----------------------------------------")
if epoch % config.CHECKPOINT_INTERVAL == 0:
self.save_checkpoint(
model.state_dict(), "epoch_{}.ckpt".format(epoch)
)
epoch += 1
def _eval_checkpoint(
self,
checkpoint_path: str,
writer: TensorboardWriter,
checkpoint_index: int = 0,
) -> None:
r"""Evaluates a single checkpoint.
Args:
checkpoint_path: path of checkpoint
writer: tensorboard writer object for logging to tensorboard
checkpoint_index: index of cur checkpoint for logging
Returns:
None
"""
config = self.config
config.defrost()
config.TASK_CONFIG.DATASET.SPLIT = self.config.EVAL.SPLIT
config.freeze()
eqa_cnn_pretrain_dataset = EQACNNPretrainDataset(config, mode="val")
eval_loader = DataLoader(
eqa_cnn_pretrain_dataset,
batch_size=config.IL.EQACNNPretrain.batch_size,
shuffle=False,
)
logger.info(
"[ eval_loader has {} samples ]".format(
len(eqa_cnn_pretrain_dataset)
)
)
model = MultitaskCNN()
state_dict = torch.load(checkpoint_path)
model.load_state_dict(state_dict)
model.to(self.device).eval()
depth_loss = torch.nn.SmoothL1Loss()
ae_loss = torch.nn.SmoothL1Loss()
seg_loss = torch.nn.CrossEntropyLoss()
t = 0
avg_loss = 0.0
avg_l1 = 0.0
avg_l2 = 0.0
avg_l3 = 0.0
with torch.no_grad():
for batch in eval_loader:
t += 1
idx, gt_rgb, gt_depth, gt_seg = batch
gt_rgb = gt_rgb.to(self.device)
gt_depth = gt_depth.to(self.device)
gt_seg = gt_seg.to(self.device)
pred_seg, pred_depth, pred_rgb = model(gt_rgb)
l1 = seg_loss(pred_seg, gt_seg.long())
l2 = ae_loss(pred_rgb, gt_rgb)
l3 = depth_loss(pred_depth, gt_depth)
loss = l1 + (10 * l2) + (10 * l3)
avg_loss += loss.item()
avg_l1 += l1.item()
avg_l2 += l2.item()
avg_l3 += l3.item()
if t % config.LOG_INTERVAL == 0:
logger.info(
"[ Iter: {}; loss: {:.3f} ]".format(t, loss.item()),
)
if config.EVAL_SAVE_RESULTS:
if t % config.EVAL_SAVE_RESULTS_INTERVAL == 0:
result_id = "ckpt_{}_{}".format(
checkpoint_index, idx[0].item()
)
result_path = os.path.join(
self.config.RESULTS_DIR, result_id
)
self._save_results(
gt_rgb,
pred_rgb,
gt_seg,
pred_seg,
gt_depth,
pred_depth,
result_path,
)
avg_loss /= len(eval_loader)
avg_l1 /= len(eval_loader)
avg_l2 /= len(eval_loader)
avg_l3 /= len(eval_loader)
writer.add_scalar("avg val total loss", avg_loss, checkpoint_index)
writer.add_scalars(
"avg val individual_losses",
{"seg_loss": avg_l1, "ae_loss": avg_l2, "depth_loss": avg_l3},
checkpoint_index,
)
logger.info("[ Average loss: {:.3f} ]".format(avg_loss))
logger.info("[ Average seg loss: {:.3f} ]".format(avg_l1))
logger.info("[ Average autoencoder loss: {:.4f} ]".format(avg_l2))
logger.info("[ Average depthloss: {:.4f} ]".format(avg_l3))
|
#!/usr/bin/env python3
import re
# [^\W\d_] - will match any lower or upper case alpha character. No digits or underscore.
months_de = {
"Januar": 1,
"Februar": 2,
"März": 3,
"April": 4,
"Mai": 5,
"Juni": 6,
"Juli": 7,
"August": 8,
"September": 9,
"Oktober": 10,
"November": 11,
"Dezember": 12,
}
months_fr = {
"janvier": 1,
"fèvrier": 2,
"février": 2,
"mars": 3,
"avril": 4,
"mai": 5,
"juin": 6,
"juillet": 7,
"aout": 8,
"août": 8,
"septembre": 9,
"octobre": 10,
"novembre": 11,
"decembre": 12,
"décembre": 12,
}
months_it = {
"gennaio": 1,
"febbraio": 2,
"marzo": 3,
"aprile": 4,
"maggio": 5,
"giugno": 6,
"luglio": 7,
"agosto": 8,
"settembre": 9,
"ottobre": 10,
"novembre": 11,
"dicembre": 12,
}
months_all = {}
months_all.update(months_de)
months_all.update(months_fr)
months_all.update(months_it)
def parse_date(d):
assert d, "Content is empty"
d = d.replace("ä", "ä")
d = d.replace(" ", " ")
d = d.strip()
# print(d)
# This could be done more nice, using assignment expression. But that
# requires Python 3.8 (October 14th, 2019), and many distros still defaults
# to Python 3.7 or earlier.
mo = re.search(r'^(\d+)\. ([^\W\d_]+) (20\d\d)\s*(?:,?\s+|,\s*)(\d\d?)(?:[:\.](\d\d))? +Uhr$', d)
if mo:
# 20. März 2020 15.00 Uhr
# 21. März 2020, 10 Uhr
# 21. März 2020, 11:00 Uhr
# 21.03.2020, 15h30
# 21. März 2020, 8.00 Uhr
# 21. März 2020, 18.15 Uhr
# 21. März 2020, 18.15 Uhr
# 21. März 2020, 14.00 Uhr
# 23. März 2020, 15 Uhr
# 18. April 2020,16.00 Uhr
return f"{int(mo[3]):4d}-{months_all[mo[2]]:02d}-{int(mo[1]):02d}T{int(mo[4]):02d}:{int(mo[5]) if mo[5] else 0:02d}"
mo = re.search(r'^(\d+)\.\s*([^\W\d_]+)\s*(20\d\d)$', d)
if mo:
# 21. März 2020
# 1.Mai 2020
return f"{int(mo[3]):4d}-{months_all[mo[2]]:02d}-{int(mo[1]):02d}T"
mo = re.search(r'^(\d+)\.(\d+)\.(\d\d)$', d)
if mo:
# 21.3.20
assert 20 <= int(mo[3]) <= 21
assert 1 <= int(mo[2]) <= 12
return f"20{int(mo[3]):02d}-{int(mo[2]):02d}-{int(mo[1]):02d}T"
mo = re.search(r'^(\d+)[\.-](\d+)[\.-](20\d\d)[,:]?\s*(\d\d?)[h:;\.](\d\d)(?:h| Uhr)?', d)
if mo:
# 20.3.2020, 16.30
# 21.03.2020, 15h30
# 23.03.2020, 12:00
# 23.03.2020 12:00
# 08.04.2020: 09.30 Uhr
# 07.04.2020 15.00h
# 30.04.2020,13.30 Uhr
# 05-05-2020 00:00
# 07.05.2020, 00;00 Uhr
assert 2020 <= int(mo[3]) <= 2021
assert 1 <= int(mo[2]) <= 12
return f"{int(mo[3]):4d}-{int(mo[2]):02d}-{int(mo[1]):02d}T{int(mo[4]):02d}:{int(mo[5]):02d}"
mo = re.search(r'^(\d+)\.(\d+)\.(\d\d),?\s*(\d\d?)[h:\.](\d\d) ?h', d)
if mo:
# 31.03.20, 08.00 h
assert 1 <= int(mo[1]) <= 31
assert 1 <= int(mo[2]) <= 12
assert 20 <= int(mo[3]) <= 21
assert 1 <= int(mo[4]) <= 23
assert 0 <= int(mo[5]) <= 59
return f"{2000 + int(mo[3]):4d}-{int(mo[2]):02d}-{int(mo[1]):02d}T{int(mo[4]):02d}:{int(mo[5]):02d}"
mo = re.search(r'^(\d+)\.(\d+)\.(20\d\d)$', d)
if mo:
# 20.03.2020
assert 2020 <= int(mo[3]) <= 2021
assert 1 <= int(mo[2]) <= 12
return f"{int(mo[3]):4d}-{int(mo[2]):02d}-{int(mo[1]):02d}T"
mo = re.search(r'^(\d+)[a-z]* ([^\W\d_]+) (20\d\d) \((\d+)h\)$', d)
if mo:
# 21 mars 2020 (18h)
# 1er avril 2020 (16h)
assert 2020 <= int(mo[3]) <= 2021
assert 1 <= int(mo[4]) <= 23
return f"{int(mo[3]):4d}-{months_all[mo[2]]:02d}-{int(mo[1]):02d}T{int(mo[4]):02d}:00"
mo = re.search(r'^(\d+)\s*([^\W\d_]+)\s*(20\d\d)$', d)
if mo:
# 21 mars 2020
# 6avril2020 # From pdftotext with NE statistics.
assert 2020 <= int(mo[3]) <= 2021
return f"{int(mo[3]):4d}-{months_all[mo[2]]:02d}-{int(mo[1]):02d}T"
mo = re.search(r'^(\d+)\.(\d+) à (\d+)h(\d\d)?$', d)
if mo:
# 20.03 à 8h00
# 23.03 à 12h
assert 1 <= int(mo[2]) <= 12
assert 1 <= int(mo[3]) <= 23
if mo[4]:
assert 0 <= int(mo[4]) <= 59
return f"2020-{int(mo[2]):02d}-{int(mo[1]):02d}T{int(mo[3]):02d}:{int(mo[4]) if mo[4] else 0:02d}"
mo = re.search(r'^(\d+) ([^\W\d_]+) (202\d), ore (\d+)\.(\d\d)$', d)
if mo:
# 21 marzo 2020, ore 8.00
return f"{int(mo[3]):4d}-{months_all[mo[2]]:02d}-{int(mo[1]):02d}T{int(mo[4]):02d}:{int(mo[5]):02d}"
mo = re.search(r'^(\d\d)\.(\d\d)\.(202\d),? ore (\d+):(\d\d)$', d)
if mo:
# 27.03.2020 ore 08:00
assert 1 <= int(mo[1]) <= 31
assert 1 <= int(mo[2]) <= 12
assert 2020 <= int(mo[3]) <= 2021
assert 0 <= int(mo[4]) <= 23
assert 0 <= int(mo[5]) <= 59
return f"{int(mo[3]):4d}-{int(mo[2]):02d}-{int(mo[1]):02d}T{int(mo[4]):02d}:{int(mo[5]):02d}"
mo = re.search(r'^(\d\d\d\d-\d\d-\d\d)$', d)
if mo:
# 2020-03-23
return f"{mo[1]}T"
mo = re.search(r'^(\d+)\.(\d+)\.? / (\d+)h$', d)
if mo:
assert 1 <= int(mo[1]) <= 31
assert 1 <= int(mo[2]) <= 12
assert 1 <= int(mo[3]) <= 23
# 24.3. / 10h
return f"2020-{int(mo[2]):02d}-{int(mo[1]):02d}T{int(mo[3]):02d}:00"
mo = re.search(r'^(\d\d\d\d-\d\d-\d\d)[ T](\d\d:\d\d)(:\d\d)?$', d)
if mo:
# 2020-03-23T15:00:00
# 2020-03-23 15:00:00
# 2020-03-23 15:00
return f"{mo[1]}T{mo[2]}"
assert False, f"Unknown date/time format: {d}"
|
import pandas as pd
import numpy as np
from os import path
from .table_check import table_check
def load_dataCSV(DataSheet, PeakSheet):
"""Loads and validates the DataFile and PeakFile from csv files.
Parameters
----------
DataSheet : string
The name of the csv file (.csv file) that contains the 'Data'. Note, the data sheet must contain an 'Idx' and 'SampleID'column. e.g. 'datasheetxxx1.csv' or '/homedir/datasheetxxx1.csv'
PeakSheet : string
The name of the csv file (.csv file) that contains the 'Peak'. Note, the peak sheet must contain an 'Idx', 'Name', and 'Label' column. e.g. 'peaksheetxxx1.csv' or 'peaksheetxxx1.csv'
Returns
-------
DataTable: DataFrame
Data sheet from the csv file.
PeakTable: DataFrame
Peak sheet from the csv file.
"""
# Check Datasheet exists
if path.isfile(DataSheet) is False:
raise ValueError("{} does not exist.".format(filename))
if not DataSheet.endswith(".csv"):
raise ValueError("{} should be a .csv file.".format(filename))
# Check PeakSheet exists
if path.isfile(PeakSheet) is False:
raise ValueError("{} does not exist.".format(filename))
if not PeakSheet.endswith(".csv"):
raise ValueError("{} should be a .csv file.".format(filename))
# LOAD PEAK DATA
print("Loadings PeakFile: {}".format(PeakSheet))
PeakTable = pd.read_csv(PeakSheet)
# LOAD DATA TABLE
print("Loadings DataFile: {}".format(DataSheet))
DataTable = pd.read_csv(DataSheet)
# Replace with nans
DataTable = DataTable.replace(-99, np.nan)
DataTable = DataTable.replace(".", np.nan)
DataTable = DataTable.replace(" ", np.nan)
# Error checks
table_check(DataTable, PeakTable, print_statement=True)
# Make the Idx column start from 1
DataTable.index = np.arange(1, len(DataTable) + 1)
PeakTable.index = np.arange(1, len(PeakTable) + 1)
print("TOTAL SAMPLES: {} TOTAL PEAKS: {}".format(len(DataTable), len(PeakTable)))
print("Done!")
return DataTable, PeakTable
|
# Import of the relevant tools
import time
import numpy as np
import theano
import theano.tensor as T
from theano import pp, config
from plotly.tools import FigureFactory as FF
import plotly.graph_objs as go
from ..io.read_vtk import ReadVTK
from ..data_attachment.measures import Measures
from ..data_attachment.varifolds import Varifolds
from ..math_utils.kernels import _squared_distances, _gaussian_kernel
# a TheanoShapes manifold will be created from a regular Curve/Surface,
# with information about connectivity and number of points.
# Basically, a TheanoShapes is an efficient implementation of
# a shape orbit.
from .theano_hamiltonianclouds import TheanoHamiltonianClouds
from .shapes_manifold import ShapesManifold
class TheanoShapes(ShapesManifold, TheanoHamiltonianClouds) :
"""
Combines the Hamiltonian dynamics with the data attachment + io methods
of the ShapesManifold class.
This is the class which should be used to handle curves and surfaces
with full momentum.
"""
def __init__(self, Q0,
kernel = ('gaussian', 1),
data_attachment = ('measure-kernel', ('gaussian', 1)),
weights = (0.01, 1), # gamma_V, gamma_W
dt = 0.1,
compatibility_compile = False,
plot_interactive = False,
plot_file = True,
foldername = 'results/'
) :
"""
Creates a TheanoCurves/Surfaces manifold.
Compilation takes place here.
"""
TheanoHamiltonianClouds.__init__(self, kernel = kernel,
weights = weights,
dt = dt,
plot_interactive = plot_interactive,
plot_file = plot_file,
foldername = foldername)
ShapesManifold.__init__(self, Q0,
data_attachment)
#===============================================================
# Before compiling, we assign types to the teano variables
q0 = T.matrix('q0')
p0 = T.matrix('p0')
s0 = T.matrix('s0')
xt_x = T.matrix('xt_x')
xt_mu = T.vector('xt_mu')
xt_n = T.matrix('xt_n')
# Compilation. Depending on settings specified in the ~/.theanorc file or explicitely given
# at execution time, this will produce CPU or GPU code.
if not compatibility_compile : # With theano, it's better to let the compilator handle the whole forward-backward pipeline
print('Compiling the shooting_cost routine...')
time1 = time.time()
if self.embedding_type == 'measure' :
self.opt_shooting_cost = theano.function([q0, p0, xt_x, xt_mu], # input
self._opt_shooting_cost(q0, p0, xt_x, xt_mu), # output
allow_input_downcast=True) # GPU = float32 only, whereas numpy uses
# float64 : we allow silent conversion
elif self.embedding_type == 'varifold' :
self.opt_shooting_cost = theano.function([q0, p0, xt_x, xt_mu, xt_n], # input
self._opt_shooting_cost(q0, p0, xt_x, xt_mu, xt_n), # output
allow_input_downcast=True) # GPU = float32 only, whereas numpy uses
# float64 : we allow silent conversion
time2 = time.time()
print('Compiled in : ', '{0:.2f}'.format(time2 - time1), 's')
# First, the hamiltonian_trajectory routine, that shall be used in the visualization
print('Compiling the hamiltonian_trajectory visualization routine...')
time1 = time.time()
self.opt_hamiltonian_trajectory = theano.function([q0,p0], # input
self._HamiltonianTrajectory(q0, p0), # output
allow_input_downcast=True) # GPU = float32 only, whereas numpy uses
# float64 : we allow silent conversion
time2 = time.time()
print('Compiled in : ', '{0:.2f}'.format(time2 - time1), 's')
# The hamiltonian_trajectory routine, that shall be used in the visualization of the grid
print('Compiling the hamiltonian_trajectory_carrying visualization routine...')
time1 = time.time()
self.opt_hamiltonian_trajectory_carrying = theano.function([q0,p0,s0], # input
self._HamiltonianTrajectoryCarrying(q0, p0, s0), # output
allow_input_downcast=True) # GPU = float32 only, whereas numpy uses
# float64 : we allow silent conversion
time2 = time.time()
print('Compiled in : ', '{0:.2f}'.format(time2 - time1), 's')
def hamiltonian_trajectory(self, q0,p0) :
[qt, pt] = self.opt_hamiltonian_trajectory(q0,p0)
return [np.append([q0], qt, axis=0), np.append([p0], pt, axis=0)]
def hamiltonian_trajectory_carrying(self, q0,p0,s0) :
[qt, pt, st] = self.opt_hamiltonian_trajectory_carrying(q0,p0,s0)
return [np.append([q0], qt, axis=0), np.append([p0], pt, axis=0), np.append([s0], st, axis=0)]
|
from django import forms
from goggles.warehouse.models import ImportJob, Profile
from goggles.warehouse.tasks import schedule_import_conversation
class ProfileForm(forms.ModelForm):
password = forms.CharField(label='Password', max_length=255)
update_session_info = forms.BooleanField(
label='Update login session information?')
class Meta:
model = Profile
exclude = (
'user',
'status',
'session_name',
'session_value',
'session_expires',
'expires_on',
)
class ImportJobForm(forms.ModelForm):
class Meta:
model = ImportJob
exclude = ('user', 'status')
class ConversationActionForm(forms.Form):
action = forms.ChoiceField(choices=[
('import_job', 'Schedule an Import Job for this conversation')
])
def handle_action(self, conversation):
handler = getattr(self, 'do_%(action)s' % self.cleaned_data)
return handler(conversation)
def do_import_job(self, conversation):
schedule_import_conversation.delay(conversation.pk)
return 'Starting an import job for %s' % (conversation.name,)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class CustomLocationsConfiguration(Configuration):
"""Configuration for CustomLocations.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(CustomLocationsConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2020-07-15-privatepreview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-extendedlocation/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
# Copyright (C) 2013 by Aivars Kalvans <aivars.kalvans@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
import random
import base64
from scrapy import log
class RandomProxy(object):
def __init__(self, settings):
self.proxy_list = settings.get('PROXY_LIST')
fin = open(self.proxy_list)
self.proxies = {}
for line in fin.readlines():
parts = re.match('(\w+://)(\w+:\w+@)?(.+)', line)
# Cut trailing @
if parts.group(2):
user_pass = parts.group(2)[:-1]
else:
user_pass = ''
self.proxies[parts.group(1) + parts.group(3)] = user_pass
fin.close()
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_request(self, request, spider):
# Don't overwrite with a random one (server-side state for IP)
if 'proxy' in request.meta:
return
proxy_address = random.choice(self.proxies.keys())
proxy_user_pass = self.proxies[proxy_address]
request.meta['proxy'] = proxy_address
if proxy_user_pass:
basic_auth = 'Basic ' + base64.encodestring(proxy_user_pass)
request.headers['Proxy-Authorization'] = basic_auth
def process_exception(self, request, exception, spider):
proxy = request.meta['proxy']
log.msg('Removing failed proxy <%s>, %d proxies left' % (
proxy, len(self.proxies)))
try:
del self.proxies[proxy]
except ValueError:
pass
|
r"""
Interface to Mathematica
The Mathematica interface will only work if Mathematica is installed on your
computer with a command line interface that runs when you give the ``math``
command. The interface lets you send certain Sage objects to Mathematica,
run Mathematica functions, import certain Mathematica expressions to Sage,
or any combination of the above.
To send a Sage object ``sobj`` to Mathematica, call ``mathematica(sobj)``.
This exports the Sage object to Mathematica and returns a new Sage object
wrapping the Mathematica expression/variable, so that you can use the
Mathematica variable from within Sage. You can then call Mathematica
functions on the new object; for example::
sage: mobj = mathematica(x^2-1) # optional - mathematica
sage: mobj.Factor() # optional - mathematica
(-1 + x)*(1 + x)
In the above example the factorization is done using Mathematica's
``Factor[]`` function.
To see Mathematica's output you can simply print the Mathematica wrapper
object. However if you want to import Mathematica's output back to Sage,
call the Mathematica wrapper object's ``sage()`` method. This method returns
a native Sage object::
sage: mobj = mathematica(x^2-1) # optional - mathematica
sage: mobj2 = mobj.Factor(); mobj2 # optional - mathematica
(-1 + x)*(1 + x)
sage: mobj2.parent() # optional - mathematica
Mathematica
sage: sobj = mobj2.sage(); sobj # optional - mathematica
(x + 1)*(x - 1)
sage: sobj.parent() # optional - mathematica
Symbolic Ring
If you want to run a Mathematica function and don't already have the input
in the form of a Sage object, then it might be simpler to input a string to
``mathematica(expr)``. This string will be evaluated as if you had typed it
into Mathematica::
sage: mathematica('Factor[x^2-1]') # optional - mathematica
(-1 + x)*(1 + x)
sage: mathematica('Range[3]') # optional - mathematica
{1, 2, 3}
If you don't want Sage to go to the trouble of creating a wrapper for the
Mathematica expression, then you can call ``mathematica.eval(expr)``, which
returns the result as a Mathematica AsciiArtString formatted string. If you
want the result to be a string formatted like Mathematica's InputForm, call
``repr(mobj)`` on the wrapper object ``mobj``. If you want a string
formatted in Sage style, call ``mobj._sage_repr()``::
sage: mathematica.eval('x^2 - 1') # optional - mathematica
2
-1 + x
sage: repr(mathematica('Range[3]')) # optional - mathematica
'{1, 2, 3}'
sage: mathematica('Range[3]')._sage_repr() # optional - mathematica
'[1, 2, 3]'
Finally, if you just want to use a Mathematica command line from within
Sage, the function ``mathematica_console()`` dumps you into an interactive
command-line Mathematica session. This is an enhanced version of the usual
Mathematica command-line, in that it provides readline editing and history
(the usual one doesn't!)
Tutorial
--------
We follow some of the tutorial from
http://library.wolfram.com/conferences/devconf99/withoff/Basic1.html/.
For any of this to work you must buy and install the Mathematica
program, and it must be available as the command
``math`` in your PATH.
Syntax
~~~~~~
Now make 1 and add it to itself. The result is a Mathematica
object.
::
sage: m = mathematica
sage: a = m(1) + m(1); a # optional - mathematica
2
sage: a.parent() # optional - mathematica
Mathematica
sage: m('1+1') # optional - mathematica
2
sage: m(3)**m(50) # optional - mathematica
717897987691852588770249
The following is equivalent to ``Plus[2, 3]`` in
Mathematica::
sage: m = mathematica
sage: m(2).Plus(m(3)) # optional - mathematica
5
We can also compute `7(2+3)`.
::
sage: m(7).Times(m(2).Plus(m(3))) # optional - mathematica
35
sage: m('7(2+3)') # optional - mathematica
35
Some typical input
~~~~~~~~~~~~~~~~~~
We solve an equation and a system of two equations::
sage: eqn = mathematica('3x + 5 == 14') # optional - mathematica
sage: eqn # optional - mathematica
5 + 3*x == 14
sage: eqn.Solve('x') # optional - mathematica
{{x -> 3}}
sage: sys = mathematica('{x^2 - 3y == 3, 2x - y == 1}') # optional - mathematica
sage: print(sys) # optional - mathematica
2
{x - 3 y == 3, 2 x - y == 1}
sage: sys.Solve('{x, y}') # optional - mathematica
{{x -> 0, y -> -1}, {x -> 6, y -> 11}}
Assignments and definitions
~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you assign the mathematica `5` to a variable `c`
in Sage, this does not affect the `c` in Mathematica.
::
sage: c = m(5) # optional - mathematica
sage: print(m('b + c x')) # optional - mathematica
b + c x
sage: print(m('b') + c*m('x')) # optional - mathematica
b + 5 x
The Sage interfaces changes Sage lists into Mathematica lists::
sage: m = mathematica
sage: eq1 = m('x^2 - 3y == 3') # optional - mathematica
sage: eq2 = m('2x - y == 1') # optional - mathematica
sage: v = m([eq1, eq2]); v # optional - mathematica
{x^2 - 3*y == 3, 2*x - y == 1}
sage: v.Solve(['x', 'y']) # optional - mathematica
{{x -> 0, y -> -1}, {x -> 6, y -> 11}}
Function definitions
~~~~~~~~~~~~~~~~~~~~
Define mathematica functions by simply sending the definition to
the interpreter.
::
sage: m = mathematica
sage: _ = mathematica('f[p_] = p^2'); # optional - mathematica
sage: m('f[9]') # optional - mathematica
81
Numerical Calculations
~~~~~~~~~~~~~~~~~~~~~~
We find the `x` such that `e^x - 3x = 0`.
::
sage: eqn = mathematica('Exp[x] - 3x == 0') # optional - mathematica
sage: eqn.FindRoot(['x', 2]) # optional - mathematica
{x -> 1.512134551657842}
Note that this agrees with what the PARI interpreter gp produces::
sage: gp('solve(x=1,2,exp(x)-3*x)')
1.512134551657842473896739678 # 32-bit
1.5121345516578424738967396780720387046 # 64-bit
Next we find the minimum of a polynomial using the two different
ways of accessing Mathematica::
sage: mathematica('FindMinimum[x^3 - 6x^2 + 11x - 5, {x,3}]') # optional - mathematica
{0.6150998205402516, {x -> 2.5773502699629733}}
sage: f = mathematica('x^3 - 6x^2 + 11x - 5') # optional - mathematica
sage: f.FindMinimum(['x', 3]) # optional - mathematica
{0.6150998205402516, {x -> 2.5773502699629733}}
Polynomial and Integer Factorization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We factor a polynomial of degree 200 over the integers.
::
sage: R.<x> = PolynomialRing(ZZ)
sage: f = (x**100+17*x+5)*(x**100-5*x+20)
sage: f
x^200 + 12*x^101 + 25*x^100 - 85*x^2 + 315*x + 100
sage: g = mathematica(str(f)) # optional - mathematica
sage: print(g) # optional - mathematica
2 100 101 200
100 + 315 x - 85 x + 25 x + 12 x + x
sage: g # optional - mathematica
100 + 315*x - 85*x^2 + 25*x^100 + 12*x^101 + x^200
sage: print(g.Factor()) # optional - mathematica
100 100
(20 - 5 x + x ) (5 + 17 x + x )
We can also factor a multivariate polynomial::
sage: f = mathematica('x^6 + (-y - 2)*x^5 + (y^3 + 2*y)*x^4 - y^4*x^3') # optional - mathematica
sage: print(f.Factor()) # optional - mathematica
3 2 3
x (x - y) (-2 x + x + y )
We factor an integer::
sage: n = mathematica(2434500) # optional - mathematica
sage: n.FactorInteger() # optional - mathematica
{{2, 2}, {3, 2}, {5, 3}, {541, 1}}
sage: n = mathematica(2434500) # optional - mathematica
sage: F = n.FactorInteger(); F # optional - mathematica
{{2, 2}, {3, 2}, {5, 3}, {541, 1}}
sage: F[1] # optional - mathematica
{2, 2}
sage: F[4] # optional - mathematica
{541, 1}
Mathematica's ECM package is no longer available.
Long Input
----------
The Mathematica interface reads in even very long input (using
files) in a robust manner.
::
sage: t = '"%s"'%10^10000 # ten thousand character string.
sage: a = mathematica(t) # optional - mathematica
sage: a = mathematica.eval(t) # optional - mathematica
Loading and saving
------------------
Mathematica has an excellent ``InputForm`` function,
which makes saving and loading Mathematica objects possible. The
first examples test saving and loading to strings.
::
sage: x = mathematica(pi/2) # optional - mathematica
sage: print(x) # optional - mathematica
Pi
--
2
sage: loads(dumps(x)) == x # optional - mathematica
True
sage: n = x.N(50) # optional - mathematica
sage: print(n) # optional - mathematica
1.5707963267948966192313216916397514420985846996876
sage: loads(dumps(n)) == n # optional - mathematica
True
Complicated translations
------------------------
The ``mobj.sage()`` method tries to convert a Mathematica object to a Sage
object. In many cases, it will just work. In particular, it should be able to
convert expressions entirely consisting of:
- numbers, i.e. integers, floats, complex numbers;
- functions and named constants also present in Sage, where:
- Sage knows how to translate the function or constant's name from
Mathematica's, or
- the Sage name for the function or constant is trivially related to
Mathematica's;
- symbolic variables whose names don't pathologically overlap with
objects already defined in Sage.
This method will not work when Mathematica's output includes:
- strings;
- functions unknown to Sage;
- Mathematica functions with different parameters/parameter order to
the Sage equivalent.
If you want to convert more complicated Mathematica expressions, you can
instead call ``mobj._sage_()`` and supply a translation dictionary::
sage: m = mathematica('NewFn[x]') # optional - mathematica
sage: m._sage_(locals={'NewFn': sin}) # optional - mathematica
sin(x)
For more details, see the documentation for ``._sage_()``.
OTHER Examples::
sage: def math_bessel_K(nu,x):
....: return mathematica(nu).BesselK(x).N(20)
sage: math_bessel_K(2,I) # optional - mathematica
-2.59288617549119697817 + 0.18048997206696202663*I
::
sage: slist = [[1, 2], 3., 4 + I]
sage: mlist = mathematica(slist); mlist # optional - mathematica
{{1, 2}, 3., 4 + I}
sage: slist2 = list(mlist); slist2 # optional - mathematica
[{1, 2}, 3., 4 + I]
sage: slist2[0] # optional - mathematica
{1, 2}
sage: slist2[0].parent() # optional - mathematica
Mathematica
sage: slist3 = mlist.sage(); slist3 # optional - mathematica
[[1, 2], 3.00000000000000, I + 4]
::
sage: mathematica('10.^80') # optional - mathematica
1.*^80
sage: mathematica('10.^80').sage() # optional - mathematica
1.00000000000000e80
AUTHORS:
- William Stein (2005): first version
- Doug Cutrell (2006-03-01): Instructions for use under Cygwin/Windows.
- Felix Lawrence (2009-08-21): Added support for importing Mathematica lists
and floats with exponents.
TESTS:
Check that numerical approximations via Mathematica's `N[]` function work
correctly (:trac:`18888`, :trac:`28907`)::
sage: mathematica('Pi/2').N(10) # optional -- mathematica
1.5707963268
sage: mathematica('Pi').N(10) # optional -- mathematica
3.1415926536
sage: mathematica('Pi').N(50) # optional -- mathematica
3.14159265358979323846264338327950288419716939937511
sage: str(mathematica('Pi*x^2-1/2').N()) # optional -- mathematica
2
-0.5 + 3.14159 x
Check that Mathematica's `E` exponential symbol is correctly backtranslated
as Sage's `e` (:trac:`29833`)::
sage: x = var('x')
sage: (e^x)._mathematica_().sage() # optional -- mathematica
e^x
sage: exp(x)._mathematica_().sage() # optional -- mathematica
e^x
"""
#*****************************************************************************
# Copyright (C) 2005 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
#*****************************************************************************
import os
import re
from sage.misc.cachefunc import cached_method
from sage.interfaces.expect import (Expect, ExpectElement, ExpectFunction,
FunctionElement)
from sage.interfaces.interface import AsciiArtString
from sage.interfaces.tab_completion import ExtraTabCompletion
from sage.docs.instancedoc import instancedoc
from sage.structure.richcmp import rich_to_bool
def clean_output(s):
if s is None:
return ''
i = s.find('Out[')
j = i + s[i:].find('=')
s = s[:i] + ' '*(j+1-i) + s[j+1:]
s = s.replace('\\\n','')
return s.strip('\n')
def _un_camel(name):
"""
Convert `CamelCase` to `camel_case`.
EXAMPLES::
sage: sage.interfaces.mathematica._un_camel('CamelCase')
'camel_case'
sage: sage.interfaces.mathematica._un_camel('EllipticE')
'elliptic_e'
sage: sage.interfaces.mathematica._un_camel('FindRoot')
'find_root'
sage: sage.interfaces.mathematica._un_camel('GCD')
'gcd'
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
class Mathematica(ExtraTabCompletion, Expect):
"""
Interface to the Mathematica interpreter.
"""
def __init__(self, maxread=None, script_subdirectory=None, logfile=None, server=None,
server_tmpdir=None, command=None, verbose_start=False):
r"""
TESTS:
Test that :trac:`28075` is fixed::
sage: repr(mathematica.eval("Print[1]; Print[2]; Print[3]")) # optional - mathematica
'1\n2\n3'
"""
# We use -rawterm to get a raw text interface in Mathematica 9 or later.
# This works around the following issues of Mathematica 9 or later
# (tested with Mathematica 11.0.1 for Mac OS X x86 (64-bit))
#
# 1) If TERM is unset and input is a pseudoterminal, Mathematica shows no
# prompts, so pexpect will not work.
#
# 2) If TERM is set (to dumb, lpr, vt100, or xterm), there will be
# prompts; but there is bizarre echoing behavior by Mathematica (not
# the terminal driver). For example, with TERM=dumb, many spaces and
# \r's are echoed. With TERM=vt100 or better, in addition, many escape
# sequences are printed.
#
if command is None:
command = os.getenv('SAGE_MATHEMATICA_COMMAND') or 'math -rawterm'
eval_using_file_cutoff = 1024
# Removing terminal echo using "stty -echo" is not essential but it slightly
# improves performance (system time) and eliminates races of the terminal echo
# as a possible source of error.
if server:
command = 'stty -echo; {}'.format(command)
else:
command = 'sh -c "stty -echo; {}"'.format(command)
Expect.__init__(self,
name='mathematica',
terminal_echo=False,
command=command,
prompt=r'In\[[0-9]+\]:= ',
server=server,
server_tmpdir=server_tmpdir,
script_subdirectory=script_subdirectory,
verbose_start=verbose_start,
logfile=logfile,
eval_using_file_cutoff=eval_using_file_cutoff)
def _read_in_file_command(self, filename):
return '<<"%s"'%filename
def _keyboard_interrupt(self):
print("Interrupting %s..." % self)
e = self._expect
e.sendline(chr(3)) # send ctrl-c
e.expect('Interrupt> ')
e.sendline("a") # a -- abort
e.expect(self._prompt)
return e.before
def _install_hints(self):
"""
Hints for installing mathematica on your computer.
AUTHORS:
- William Stein and Justin Walker (2006-02-12)
"""
return """
In order to use the Mathematica interface you need to have Mathematica
installed and have a script in your PATH called "math" that runs the
command-line version of Mathematica. Alternatively, you could use a
remote connection to a server running Mathematica -- for hints, type
print(mathematica._install_hints_ssh())
(1) You might have to buy Mathematica (http://www.wolfram.com/).
(2) * LINUX: The math script usually comes standard with your Mathematica install.
However, on some systems it may be called wolfram, while math is absent.
In this case, assuming wolfram is in your PATH,
(a) create a file called math (in your PATH):
#!/bin/sh
/usr/bin/env wolfram $@
(b) Make the file executable.
chmod +x math
* WINDOWS:
Install Mathematica for Linux into the VMware virtual machine (sorry,
that's the only way at present).
* APPLE OS X:
(a) create a file called math (in your PATH):
#!/bin/sh
/Applications/Mathematica.app/Contents/MacOS/MathKernel $@
The path in the above script must be modified if you installed
Mathematica elsewhere or installed an old version of
Mathematica that has the version in the .app name.
(b) Make the file executable.
chmod +x math
* WINDOWS:
Install Mathematica for Linux into the VMware virtual machine (sorry,
that's the only way at present).
"""
## The following only works with Sage for Cygwin (not colinux).
## Note that Sage colinux is the preferred way to run Sage in Windows,
## and I do not know how to use Mathematica from colinux Sage (unless
## you install Mathematica-for-linux into the colinux machine, which
## is possible).
## Create a file named "math", which you place in the Sage root
## directory. The file contained a single line, which was the
## path to the mathematica math.exe file. In my case, this might be:
## C:/Program Files/Wolfram Research/Mathematica/4.0/math.exe
## The key points are
## 1) there is a file named "math.exe", and it will generally be
## located in a place analogous to the above (depending on where
## Mathematica has been installed). This file is used only for
## launching the kernel with a text-based interface.
## 2) a cygwin batch file must be created which executes this file,
## which means using forward slashes rather than back slashes,
## and probably surrounding everything in quotes
## 3) this cygwin batch file must be on the path for Sage (placing
## it in <SAGE_LOCAL>/bin/ is an easy way to ensure this).
def eval(self, code, strip=True, **kwds):
s = Expect.eval(self, code, **kwds)
if strip:
return AsciiArtString(clean_output(s))
else:
return AsciiArtString(s)
#def _keyboard_interrupt(self):
# print("Keyboard interrupt pressed; trying to recover.")
# E = self.expect()
# E.sendline(chr(3))
# E.sendline('a')
# E.expect(':= ')
# raise KeyboardInterrupt, "Ctrl-c pressed while running Mathematica command"
def set(self, var, value):
"""
Set the variable var to the given value.
"""
cmd = '%s=%s;'%(var,value)
#out = self.eval(cmd)
out = self._eval_line(cmd, allow_use_file=True)
if len(out) > 8:
raise TypeError("Error executing code in Mathematica\nCODE:\n\t%s\nMathematica ERROR:\n\t%s"%(cmd, out))
def get(self, var, ascii_art=False):
"""
Get the value of the variable var.
AUTHORS:
- William Stein
- Kiran Kedlaya (2006-02-04): suggested using InputForm
"""
if ascii_art:
return self.eval(var, strip=True)
else:
return self.eval('InputForm[%s, NumberMarks->False]'%var, strip=True)
#def clear(self, var):
# """
# Clear the variable named var.
# """
# self.eval('Clear[%s]'%var)
def _eval_line(self, line, allow_use_file=True, wait_for_prompt=True, restart_if_needed=False):
s = Expect._eval_line(self, line,
allow_use_file=allow_use_file, wait_for_prompt=wait_for_prompt)
return str(s).strip('\n')
def _function_call_string(self, function, args, kwds):
"""
Returns the string used to make function calls.
EXAMPLES::
sage: mathematica._function_call_string('Sin', ['x'], [])
'Sin[x]'
"""
return "%s[%s]"%(function, ",".join(args))
def _left_list_delim(self):
return "{"
def _right_list_delim(self):
return "}"
def _left_func_delim(self):
return "["
def _right_func_delim(self):
return "]"
###########################################
# System -- change directory, etc
###########################################
def chdir(self, dir):
"""
Change Mathematica's current working directory.
EXAMPLES::
sage: mathematica.chdir('/') # optional - mathematica
sage: mathematica('Directory[]') # optional - mathematica
"/"
"""
self.eval('SetDirectory["%s"]'%dir)
def _true_symbol(self):
return 'True'
def _false_symbol(self):
return 'False'
def _equality_symbol(self):
return '=='
def _assign_symbol(self):
return ":="
def _exponent_symbol(self):
"""
Returns the symbol used to denote the exponent of a number in
Mathematica.
EXAMPLES::
sage: mathematica._exponent_symbol() # optional - mathematica
'*^'
::
sage: bignum = mathematica('10.^80') # optional - mathematica
sage: repr(bignum) # optional - mathematica
'1.*^80'
sage: repr(bignum).replace(mathematica._exponent_symbol(), 'e').strip() # optional - mathematica
'1.e80'
"""
return "*^"
def _object_class(self):
return MathematicaElement
def console(self, readline=True):
mathematica_console(readline=readline)
def _tab_completion(self):
a = self.eval('Names["*"]')
return a.replace('$','').replace('\n \n>','').replace(',','').replace('}','').replace('{','').split()
def help(self, cmd):
return self.eval('? %s'%cmd)
def __getattr__(self, attrname):
if attrname[:1] == "_":
raise AttributeError
return MathematicaFunction(self, attrname)
@instancedoc
class MathematicaElement(ExpectElement):
def __getitem__(self, n):
return self.parent().new('%s[[%s]]'%(self._name, n))
def __getattr__(self, attrname):
self._check_valid()
if attrname[:1] == "_":
raise AttributeError
return MathematicaFunctionElement(self, attrname)
def __float__(self, precision=16):
P = self.parent()
return float(P.eval('N[%s,%s]'%(self.name(),precision)))
def _reduce(self):
return self.parent().eval('InputForm[%s]' % self.name()).strip()
def __reduce__(self):
return reduce_load, (self._reduce(), )
def _latex_(self):
z = self.parent().eval('TeXForm[%s]'%self.name())
i = z.find('=')
return z[i+1:].strip()
def _repr_(self):
P = self.parent()
return P.get(self._name, ascii_art=False).strip()
def _sage_(self, locals={}):
r"""
Attempt to return a Sage version of this object.
This method works successfully when Mathematica returns a result
or list of results that consist only of:
- numbers, i.e. integers, floats, complex numbers;
- functions and named constants also present in Sage, where:
- Sage knows how to translate the function or constant's name
from Mathematica's naming scheme, or
- you provide a translation dictionary `locals`, or
- the Sage name for the function or constant is simply the
Mathematica name in lower case;
- symbolic variables whose names don't pathologically overlap with
objects already defined in Sage.
This method will not work when Mathematica's output includes:
- strings;
- functions unknown to Sage that are not specified in `locals`;
- Mathematica functions with different parameters/parameter order to
the Sage equivalent. In this case, define a function to do the
parameter conversion, and pass it in via the locals dictionary.
EXAMPLES:
Mathematica lists of numbers/constants become Sage lists of
numbers/constants::
sage: m = mathematica('{{1., 4}, Pi, 3.2e100, I}') # optional - mathematica
sage: s = m.sage(); s # optional - mathematica
[[1.00000000000000, 4], pi, 3.20000000000000*e100, I]
sage: s[1].n() # optional - mathematica
3.14159265358979
sage: s[3]^2 # optional - mathematica
-1
::
sage: m = mathematica('x^2 + 5*y') # optional - mathematica
sage: m.sage() # optional - mathematica
x^2 + 5*y
::
sage: m = mathematica('Sin[Sqrt[1-x^2]] * (1 - Cos[1/x])^2') # optional - mathematica
sage: m.sage() # optional - mathematica
(cos(1/x) - 1)^2*sin(sqrt(-x^2 + 1))
::
sage: m = mathematica('NewFn[x]') # optional - mathematica
sage: m._sage_(locals={'NewFn': sin}) # optional - mathematica
sin(x)
::
sage: var('bla') # optional - mathematica
bla
sage: m = mathematica('bla^2') # optional - mathematica
sage: bla^2 - m.sage() # optional - mathematica
0
::
sage: m = mathematica('bla^2') # optional - mathematica
sage: mb = m.sage() # optional - mathematica
sage: var('bla') # optional - mathematica
bla
sage: bla^2 - mb # optional - mathematica
0
AUTHORS:
- Felix Lawrence (2010-11-03): Major rewrite to use ._sage_repr() and
sage.calculus.calculus.symbolic_expression_from_string() for greater
compatibility, while still supporting conversion of symbolic
expressions.
TESTS:
Check that :trac:`28814` is fixed::
sage: mathematica('Exp[1000.0]').sage() # optional - mathematica
1.97007111401700e434
sage: mathematica('1/Exp[1000.0]').sage() # optional - mathematica
5.07595889754950e-435
sage: mathematica(RealField(100)(1/3)).sage() # optional - mathematica
0.3333333333333333333333333333335
"""
from sage.libs.pynac.pynac import symbol_table
from sage.symbolic.constants import constants_name_table as constants
from sage.calculus.calculus import symbolic_expression_from_string
from sage.calculus.calculus import _find_func as find_func
# Get Mathematica's output and perform preliminary formatting
res = self._sage_repr()
if '"' in res:
raise NotImplementedError("String conversion from Mathematica \
does not work. Mathematica's output was: %s" % res)
# Find all the mathematica functions, constants and symbolic variables
# present in `res`. Convert MMA functions and constants to their
# Sage equivalents (if possible), using `locals` and
# `sage.libs.pynac.pynac.symbol_table['mathematica']` as translation
# dictionaries. If a MMA function or constant is not either
# dictionary, then we use a variety of tactics listed in `autotrans`.
# If a MMA variable is not in any dictionary, then create an
# identically named Sage equivalent.
# Merge the user-specified locals dictionary and the symbol_table
# (locals takes priority)
lsymbols = symbol_table['mathematica'].copy()
lsymbols.update(locals)
# Strategies for translating unknown functions/constants:
autotrans = [ str.lower, # Try it in lower case
_un_camel, # Convert `CamelCase` to `camel_case`
lambda x: x # Try the original name
]
# Find the MMA funcs/vars/constants - they start with a letter.
# Exclude exponents (e.g. 'e8' from 4.e8)
p = re.compile(r'(?<!\.)[a-zA-Z]\w*')
for m in p.finditer(res):
# If the function, variable or constant is already in the
# translation dictionary, then just move on.
if m.group() in lsymbols:
pass
# Now try to translate all other functions -- try each strategy
# in `autotrans` and check if the function exists in Sage
elif m.end() < len(res) and res[m.end()] == '(':
for t in autotrans:
f = find_func(t(m.group()), create_when_missing = False)
if f is not None:
lsymbols[m.group()] = f
break
else:
raise NotImplementedError("Don't know a Sage equivalent \
for Mathematica function '%s'. Please specify one \
manually using the 'locals' dictionary" % m.group())
# Check if Sage has an equivalent constant
else:
for t in autotrans:
if t(m.group()) in constants:
lsymbols[m.group()] = constants[t(m.group())]
break
# If Sage has never heard of the variable, then
# symbolic_expression_from_string will automatically create it
try:
return symbolic_expression_from_string(res, lsymbols,
accept_sequence=True)
except Exception:
raise NotImplementedError("Unable to parse Mathematica \
output: %s" % res)
def __str__(self):
P = self._check_valid()
return P.get(self._name, ascii_art=True)
def __len__(self):
"""
Return the object's length, evaluated by mathematica.
EXAMPLES::
sage: len(mathematica([1,1.,2])) # optional - mathematica
3
AUTHORS:
- Felix Lawrence (2009-08-21)
"""
return int(self.Length())
@cached_method
def _is_graphics(self):
"""
Test whether the mathematica expression is graphics
OUTPUT:
Boolean.
EXAMPLES::
sage: P = mathematica('Plot[Sin[x],{x,-2Pi,4Pi}]') # optional - mathematica
sage: P._is_graphics() # optional - mathematica
True
"""
P = self._check_valid()
return P.eval('InputForm[%s]' % self.name()).strip().startswith('Graphics[')
def save_image(self, filename, ImageSize=600):
r"""
Save a mathematica graphics
INPUT:
- ``filename`` -- string. The filename to save as. The
extension determines the image file format.
- ``ImageSize`` -- integer. The size of the resulting image.
EXAMPLES::
sage: P = mathematica('Plot[Sin[x],{x,-2Pi,4Pi}]') # optional - mathematica
sage: filename = tmp_filename() # optional - mathematica
sage: P.save_image(filename, ImageSize=800) # optional - mathematica
"""
P = self._check_valid()
if not self._is_graphics():
raise ValueError('mathematica expression is not graphics')
filename = os.path.abspath(filename)
s = 'Export["%s", %s, ImageSize->%s]'%(filename, self.name(), ImageSize)
P.eval(s)
def _rich_repr_(self, display_manager, **kwds):
"""
Rich Output Magic Method
See :mod:`sage.repl.rich_output` for details.
EXAMPLES::
sage: from sage.repl.rich_output import get_display_manager
sage: dm = get_display_manager()
sage: P = mathematica('Plot[Sin[x],{x,-2Pi,4Pi}]') # optional - mathematica
The following test requires a working X display on Linux so that the
Mathematica frontend can do the rendering (:trac:`23112`)::
sage: P._rich_repr_(dm) # optional - mathematica mathematicafrontend
OutputImagePng container
"""
if self._is_graphics():
OutputImagePng = display_manager.types.OutputImagePng
if display_manager.preferences.graphics == 'disable':
return
if OutputImagePng in display_manager.supported_output():
return display_manager.graphics_from_save(
self.save_image, kwds, '.png', OutputImagePng)
else:
OutputLatex = display_manager.types.OutputLatex
dmp = display_manager.preferences.text
if dmp is None or dmp == 'plain':
return
if dmp == 'latex' and OutputLatex in display_manager.supported_output():
return OutputLatex(self._latex_())
def show(self, ImageSize=600):
r"""
Show a mathematica expression immediately.
This method attempts to display the graphics immediately,
without waiting for the currently running code (if any) to
return to the command line. Be careful, calling it from within
a loop will potentially launch a large number of external
viewer programs.
INPUT:
- ``ImageSize`` -- integer. The size of the resulting image.
OUTPUT:
This method does not return anything. Use :meth:`save` if you
want to save the figure as an image.
EXAMPLES::
sage: Q = mathematica('Sin[x Cos[y]]/Sqrt[1-x^2]') # optional - mathematica
sage: show(Q) # optional - mathematica
<html>\(\frac{\sin (x \cos (y))}{\sqrt{1-x^2}}\)</html>
The following example starts a Mathematica frontend to do the rendering
(:trac:`28819`)::
sage: P = mathematica('Plot[Sin[x],{x,-2Pi,4Pi}]') # optional - mathematica
sage: show(P) # optional - mathematica mathematicafrontend
sage: P.show(ImageSize=800) # optional - mathematica mathematicafrontend
"""
from sage.repl.rich_output import get_display_manager
dm = get_display_manager()
dm.display_immediately(self, ImageSize=ImageSize)
def str(self):
return str(self)
def _richcmp_(self, other, op):
P = self.parent()
if P.eval("%s < %s"%(self.name(), other.name())).strip() == 'True':
return rich_to_bool(op, -1)
elif P.eval("%s > %s"%(self.name(), other.name())).strip() == 'True':
return rich_to_bool(op, 1)
elif P.eval("%s == %s"%(self.name(), other.name())).strip() == 'True':
return rich_to_bool(op, 0)
return NotImplemented
def __bool__(self):
"""
Return whether this Mathematica element is not identical to ``False``.
EXAMPLES::
sage: bool(mathematica(True)) # optional - mathematica
True
sage: bool(mathematica(False)) # optional - mathematica
False
In Mathematica, `0` cannot be used to express falsity::
sage: bool(mathematica(0)) # optional - mathematica
True
"""
P = self._check_valid()
cmd = '%s===%s' % (self._name, P._false_symbol())
return P.eval(cmd).strip() != P._true_symbol()
__nonzero__ = __bool__
def n(self, *args, **kwargs):
r"""
Numerical approximation by converting to Sage object first
Convert the object into a Sage object and return its numerical
approximation. See documentation of the function
:func:`sage.misc.functional.n` for details.
EXAMPLES::
sage: mathematica('Pi').n(10) # optional -- mathematica
3.1
sage: mathematica('Pi').n() # optional -- mathematica
3.14159265358979
sage: mathematica('Pi').n(digits=10) # optional -- mathematica
3.141592654
"""
return self._sage_().n(*args, **kwargs)
@instancedoc
class MathematicaFunction(ExpectFunction):
def _instancedoc_(self):
M = self._parent
return M.help(self._name)
@instancedoc
class MathematicaFunctionElement(FunctionElement):
def _instancedoc_(self):
M = self._obj.parent()
return M.help(self._name)
# An instance
mathematica = Mathematica()
def reduce_load(X):
return mathematica(X)
def mathematica_console(readline=True):
from sage.repl.rich_output.display_manager import get_display_manager
if not get_display_manager().is_in_terminal():
raise RuntimeError('Can use the console only in the terminal. Try %%mathematica magics instead.')
if not readline:
os.system('math')
return
else:
os.system('math-readline')
return
|
# This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _OnPrem
class _Certificates(_OnPrem):
_type = "certificates"
_icon_dir = "resources/onprem/certificates"
class CertManager(_Certificates):
_icon = "cert-manager.png"
class LetsEncrypt(_Certificates):
_icon = "lets-encrypt.png"
# Aliases
|
from bst import BSTNode, BST
class AVLNode(BSTNode):
"""Implementation of AVL Node"""
def __init__(self, key):
BSTNode.__init__(self, key)
self.height = 0
def update_subtree_info(self):
self.height = self._uncached_height()
def _uncached_height(self):
return 1 + max((self.left and self.left.height) or -1,
(self.right and self.right.height) or - 1)
class AVL(BST):
"""Implementation of AVL Tree"""
def __init__(self, node_class = AVLNode):
BST.__init__(self, node_class)
def height(self, node):
"""Return height of subtree rooted at this node
"""
if node is None:
return -1
return node.height
def insert(self, key):
inserted_node = BST.insert(self, key)
self._rebalance(inserted_node)
return inserted_node
def delete(self, key):
deleted_node = BST.delete(self, key)
self._rebalance(deleted_node)
return deleted_node
def _left_rotate(self, node):
"""Left rotate on this node"""
temp = node.right
node.right = temp.left
if temp.left is not None:
temp.left.parent = node
temp.parent = node.parent
if node.parent is None:
self.root = temp
elif node == node.parent.left:
node.parent.left = temp
elif node == node.parent.right:
node.parent.right = temp
node.parent = temp
temp.left = node
node.update_subtree_info()
temp.update_subtree_info()
def _right_rotate(self, node):
"""Right rotate on this node"""
temp = node.left
node.left = temp.right
if temp.right is not None:
temp.right.parent = node
temp.parent = node.parent
if node.parent is None:
self.root = temp
elif node == node.parent.right:
node.parent.right = temp
elif node == node.parent.left:
node.parent.left = temp
node.parent = temp
temp.right = node
node.update_subtree_info()
temp.update_subtree_info()
def _rebalance(self, node):
"""Rebalance Tree"""
while node is not None:
node.update_subtree_info()
if self.height(node.left) >= 2 + self.height(node.right):
if self.height(node.left.left) >= self.height(node.left.right):
self._right_rotate(node)
else:
self._left_rotate(node.left)
self._right_rotate(node)
elif self.height(node.right) >= 2 + self.height(node.left):
if self.height(node.right.right) > self.height(node.right.left):
self._left_rotate(node)
else:
self._right_rotate(node.right)
self._left_rotate(node)
node = node.parent
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.