hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a027dd43afd4eb1921cbb07a07753533cd6fa2b | 3,208 | py | Python | lib/stacks/api/api/main.py | sierrezinal/api-l3x-in | 0c5122a29ecd8f94cb9b99909499c330969d26ee | [
"Apache-2.0"
] | 1 | 2021-02-04T23:42:43.000Z | 2021-02-04T23:42:43.000Z | lib/stacks/api/api/main.py | sierrezinal/api-l3x-in | 0c5122a29ecd8f94cb9b99909499c330969d26ee | [
"Apache-2.0"
] | null | null | null | lib/stacks/api/api/main.py | sierrezinal/api-l3x-in | 0c5122a29ecd8f94cb9b99909499c330969d26ee | [
"Apache-2.0"
] | null | null | null | import json
from os import environ as env
from typing import (Dict, List, Mapping)
import utils
import utils.aws as aws
import utils.handlers as handlers
def social_report(event: utils.LambdaEvent) -> Mapping: # pylint: disable=unused-argument
"""Get all events from CloudWatch REPORT_LOG_GROUP_NAME group."""
log_group_name = env["REPORT_LOG_GROUP_NAME"]
return aws.read_all_log_streams(log_group=log_group_name)
def contact(event: utils.LambdaEvent) -> str:
"""
Send event payload to Notifications lambda for delivery.
Expects these keys in event mapping:
- source
- name
- email
- description
"""
lambda_notifications = env["LAMBDA_NOTIFICATIONS"]
body = event["body"]
utils.Log.debug("Processing body payload: %s", body)
try:
utils.Log.debug("Loading JSON content from body")
utils.Log.info("json.loads should be safe to use: "
"https://stackoverflow.com/a/45483187/2274124")
msg = """Source: {source}
Name: {name}
Mail: {email}
Desc: {description}
""".format(**json.loads(body))
except (TypeError, json.JSONDecodeError) as error:
raise utils.HandledError("JSON body is malformatted: %s" % error)
except KeyError as error:
raise utils.HandledError("Missing JSON key: %s" % error)
utils.Log.debug("### Message content below ###")
utils.Log.debug(msg)
utils.Log.debug("#############################")
return aws.invoke_lambda(
name=lambda_notifications,
payload={
"title": "New /contact submission received",
"payload": msg,
}).text
def pagespeed_report(_: utils.LambdaEvent) -> List[Dict]:
"""Return report from Google Pagespeed data stored in DynamoDB."""
data = aws.scan_dynamodb_table(env["PAGESPEED_TABLE"])
if data["Count"] == 0:
raise utils.HandledError(message="Unexpected DynamoDB response: empty table",
status_code=500)
items = [{"url": item['url']['S'],
"latest_score_value": float(item['latest_score_value']['N']),
"latest_score_timestamp": item['latest_score_timestamp']['S']}
for item in data["Items"]]
utils.Log.debug("Items: %s", items)
errors = False
for item in items:
if not 0.95 < item["latest_score_value"] <= 1:
item["error"] = True
errors = True
if errors:
raise utils.HandledError(message=items, status_code=400)
return items
def handler(event, context) -> utils.Response:
"""Lambda entry point.
Public HTTPS REST API entry point
"""
router_map = {
"GET /pagespeed_report": pagespeed_report,
"GET /robots.txt": lambda _: "User-agent: *\nDisallow: /",
"GET /social_report": social_report,
"POST /contact": contact,
}
return handlers.ApiGatewayEventHandler(name="api",
event=utils.LambdaEvent(event),
context=utils.LambdaContext(context),
router_map=router_map,
).response
| 29.981308 | 90 | 0.602868 |
3003936dd5189d126774b76012a04f52f6140437 | 4,394 | py | Python | manipulators/geometric_product_handler.py | spencerparkin/MathTree | 4aa286248c2dc6a34ad2ef3e56d48b60838f3b72 | [
"MIT"
] | null | null | null | manipulators/geometric_product_handler.py | spencerparkin/MathTree | 4aa286248c2dc6a34ad2ef3e56d48b60838f3b72 | [
"MIT"
] | null | null | null | manipulators/geometric_product_handler.py | spencerparkin/MathTree | 4aa286248c2dc6a34ad2ef3e56d48b60838f3b72 | [
"MIT"
] | null | null | null | # geometric_product_handler.py
from math_tree import MathTreeManipulator, MathTreeNode
class GeometricProductHandler(MathTreeManipulator):
def __init__(self):
super().__init__()
def _manipulate_subtree(self, node):
new_node = self._manipulate_subtree_internal(node, False)
if new_node:
return new_node
return self._manipulate_subtree_internal(node, True)
def _manipulate_subtree_internal(self, node, allow_same_grade):
if node.data == '*':
for i in range(len(node.child_list) - 1):
node_a = node.child_list[i]
node_b = node.child_list[i + 1]
scalar_list_a, vector_list_a = self._parse_blade(node_a)
scalar_list_b, vector_list_b = self._parse_blade(node_b)
if vector_list_a is not None and vector_list_b is not None:
if len(vector_list_a) > 0 and len(vector_list_b) > 0:
if len(vector_list_a) != len(vector_list_b) or allow_same_grade:
if len(vector_list_a) == 1 or len(vector_list_b) == 1:
sum = MathTreeNode('+', [
MathTreeNode('.', [
MathTreeNode('^', [vector.copy() for vector in vector_list_a]),
MathTreeNode('^', [vector.copy() for vector in vector_list_b])
]),
MathTreeNode('^', [
MathTreeNode('^', [vector.copy() for vector in vector_list_a]),
MathTreeNode('^', [vector.copy() for vector in vector_list_b])
]),
])
else:
if len(vector_list_a) <= len(vector_list_b):
sum = MathTreeNode('+', [
MathTreeNode('*', [
vector_list_a[0].copy(),
MathTreeNode('^', [vector.copy() for vector in vector_list_a[1:]]),
MathTreeNode('^', [vector.copy() for vector in vector_list_b])
]),
MathTreeNode('*', [
MathTreeNode(-1.0),
MathTreeNode('.', [
vector_list_a[0].copy(),
MathTreeNode('^', [vector.copy() for vector in vector_list_a[1:]]),
]),
MathTreeNode('^', [vector.copy() for vector in vector_list_b])
])
])
else:
sum = MathTreeNode('+', [
MathTreeNode('*', [
MathTreeNode('^', [vector.copy() for vector in vector_list_a]),
MathTreeNode('^', [vector.copy() for vector in vector_list_b[:-1]]),
vector_list_b[-1].copy()
]),
MathTreeNode('*', [
MathTreeNode(-1.0),
MathTreeNode('^', [vector.copy() for vector in vector_list_a]),
MathTreeNode('.', [
MathTreeNode('^', [vector.copy() for vector in vector_list_b[:-1]]),
vector_list_b[-1].copy()
])
])
])
node.child_list += scalar_list_a + scalar_list_b
del node.child_list[i]
del node.child_list[i]
node.child_list.insert(i, sum)
return node | 60.191781 | 116 | 0.378015 |
0730da80b3610a980fdae021af3d8b5cb1d1cafe | 12,660 | py | Python | kivy/core/video/video_ffpyplayer.py | VICTORVICKIE/kivy | 55abc963fe9099c078a3a2253397de70c2ee17b1 | [
"MIT"
] | null | null | null | kivy/core/video/video_ffpyplayer.py | VICTORVICKIE/kivy | 55abc963fe9099c078a3a2253397de70c2ee17b1 | [
"MIT"
] | null | null | null | kivy/core/video/video_ffpyplayer.py | VICTORVICKIE/kivy | 55abc963fe9099c078a3a2253397de70c2ee17b1 | [
"MIT"
] | null | null | null | '''
FFmpeg based video abstraction
==============================
To use, you need to install ffpyplayer and have a compiled ffmpeg shared
library.
https://github.com/matham/ffpyplayer
The docs there describe how to set this up. But briefly, first you need to
compile ffmpeg using the shared flags while disabling the static flags (you'll
probably have to set the fPIC flag, e.g. CFLAGS=-fPIC). Here are some
instructions: https://trac.ffmpeg.org/wiki/CompilationGuide. For Windows, you
can download compiled GPL binaries from http://ffmpeg.zeranoe.com/builds/.
Similarly, you should download SDL2.
Now, you should have ffmpeg and sdl directories. In each, you should have an
'include', 'bin' and 'lib' directory, where e.g. for Windows, 'lib' contains
the .dll.a files, while 'bin' contains the actual dlls. The 'include' directory
holds the headers. The 'bin' directory is only needed if the shared libraries
are not already in the path. In the environment, define FFMPEG_ROOT and
SDL_ROOT, each pointing to the ffmpeg and SDL directories respectively. (If
you're using SDL2, the 'include' directory will contain an 'SDL2' directory,
which then holds the headers).
Once defined, download the ffpyplayer git repo and run
python setup.py build_ext --inplace
Finally, before running you need to ensure that ffpyplayer is in python's path.
..Note::
When kivy exits by closing the window while the video is playing,
it appears that the __del__method of VideoFFPy
is not called. Because of this, the VideoFFPy object is not
properly deleted when kivy exits. The consequence is that because
MediaPlayer creates internal threads which do not have their daemon
flag set, when the main threads exists, it'll hang and wait for the other
MediaPlayer threads to exit. But since __del__ is not called to delete the
MediaPlayer object, those threads will remain alive, hanging kivy. What
this means is that you have to be sure to delete the MediaPlayer object
before kivy exits by setting it to None.
'''
__all__ = ('VideoFFPy', )
try:
import ffpyplayer
from ffpyplayer.player import MediaPlayer
from ffpyplayer.tools import set_log_callback, get_log_callback
except:
raise
from threading import Thread
from kivy.clock import Clock, mainthread
from kivy.logger import Logger
from kivy.core.video import VideoBase
from kivy.graphics import Rectangle, BindTexture
from kivy.graphics.texture import Texture
from kivy.graphics.fbo import Fbo
from kivy.weakmethod import WeakMethod
import time
Logger.info('VideoFFPy: Using ffpyplayer {}'.format(ffpyplayer.version))
logger_func = {'quiet': Logger.critical, 'panic': Logger.critical,
'fatal': Logger.critical, 'error': Logger.error,
'warning': Logger.warning, 'info': Logger.info,
'verbose': Logger.debug, 'debug': Logger.debug}
def _log_callback(message, level):
message = message.strip()
if message:
logger_func[level]('ffpyplayer: {}'.format(message))
if not get_log_callback():
set_log_callback(_log_callback)
class VideoFFPy(VideoBase):
YUV_RGB_FS = """
$HEADER$
uniform sampler2D tex_y;
uniform sampler2D tex_u;
uniform sampler2D tex_v;
void main(void) {
float y = texture2D(tex_y, tex_coord0).r;
float u = texture2D(tex_u, tex_coord0).r - 0.5;
float v = texture2D(tex_v, tex_coord0).r - 0.5;
float r = y + 1.402 * v;
float g = y - 0.344 * u - 0.714 * v;
float b = y + 1.772 * u;
gl_FragColor = vec4(r, g, b, 1.0);
}
"""
_trigger = None
def __init__(self, **kwargs):
self._ffplayer = None
self._thread = None
self._next_frame = None
self._seek_queue = []
self._ffplayer_need_quit = False
self._trigger = Clock.create_trigger(self._redraw)
super(VideoFFPy, self).__init__(**kwargs)
def __del__(self):
self.unload()
def _player_callback(self, selector, value):
if self._ffplayer is None:
return
if selector == 'quit':
def close(*args):
self.unload()
Clock.schedule_once(close, 0)
def _get_position(self):
if self._ffplayer is not None:
return self._ffplayer.get_pts()
return 0
def _set_position(self, pos):
self.seek(pos)
def _set_volume(self, volume):
self._volume = volume
if self._ffplayer:
self._ffplayer.set_volume(self._volume)
def _get_duration(self):
if self._ffplayer is None:
return 0
return self._ffplayer.get_metadata()['duration']
@mainthread
def _do_eos(self):
if self.eos == 'pause':
self.pause()
elif self.eos == 'stop':
self.stop()
elif self.eos == 'loop':
self.position = 0
self.dispatch('on_eos')
@mainthread
def _change_state(self, state):
self._state = state
def _redraw(self, *args):
if not self._ffplayer:
return
next_frame = self._next_frame
if not next_frame:
return
img, pts = next_frame
if img.get_size() != self._size or self._texture is None:
self._size = w, h = img.get_size()
if self._out_fmt == 'yuv420p':
w2 = int(w / 2)
h2 = int(h / 2)
self._tex_y = Texture.create(
size=(w, h), colorfmt='luminance')
self._tex_u = Texture.create(
size=(w2, h2), colorfmt='luminance')
self._tex_v = Texture.create(
size=(w2, h2), colorfmt='luminance')
self._fbo = fbo = Fbo(size=self._size)
with fbo:
BindTexture(texture=self._tex_u, index=1)
BindTexture(texture=self._tex_v, index=2)
Rectangle(size=fbo.size, texture=self._tex_y)
fbo.shader.fs = VideoFFPy.YUV_RGB_FS
fbo['tex_y'] = 0
fbo['tex_u'] = 1
fbo['tex_v'] = 2
self._texture = fbo.texture
else:
self._texture = Texture.create(size=self._size,
colorfmt='rgba')
# XXX FIXME
# self.texture.add_reload_observer(self.reload_buffer)
self._texture.flip_vertical()
self.dispatch('on_load')
if self._texture:
if self._out_fmt == 'yuv420p':
dy, du, dv, _ = img.to_memoryview()
if dy and du and dv:
self._tex_y.blit_buffer(dy, colorfmt='luminance')
self._tex_u.blit_buffer(du, colorfmt='luminance')
self._tex_v.blit_buffer(dv, colorfmt='luminance')
self._fbo.ask_update()
self._fbo.draw()
else:
self._texture.blit_buffer(
img.to_memoryview()[0], colorfmt='rgba')
self.dispatch('on_frame')
def _next_frame_run(self):
ffplayer = self._ffplayer
sleep = time.sleep
trigger = self._trigger
did_dispatch_eof = False
seek_queue = self._seek_queue
# fast path, if the source video is yuv420p, we'll use a glsl shader
# for buffer conversion to rgba
while not self._ffplayer_need_quit:
src_pix_fmt = ffplayer.get_metadata().get('src_pix_fmt')
if not src_pix_fmt:
sleep(0.005)
continue
if src_pix_fmt == 'yuv420p':
self._out_fmt = 'yuv420p'
ffplayer.set_output_pix_fmt(self._out_fmt)
self._ffplayer.toggle_pause()
break
if self._ffplayer_need_quit:
return
# wait until loaded or failed, shouldn't take long, but just to make
# sure metadata is available.
s = time.perf_counter()
while not self._ffplayer_need_quit:
if ffplayer.get_metadata()['src_vid_size'] != (0, 0):
break
# XXX if will fail later then?
if time.perf_counter() - s > 10.:
break
sleep(0.005)
if self._ffplayer_need_quit:
return
# we got all the information, now, get the frames :)
self._change_state('playing')
while not self._ffplayer_need_quit:
seek_happened = False
if seek_queue:
vals = seek_queue[:]
del seek_queue[:len(vals)]
percent, precise = vals[-1]
ffplayer.seek(
percent * ffplayer.get_metadata()['duration'],
relative=False,
accurate=precise
)
seek_happened = True
self._next_frame = None
# Get next frame if paused:
if seek_happened and ffplayer.get_pause():
ffplayer.set_volume(0.0) # Try to do it silently.
ffplayer.set_pause(False)
try:
# We don't know concrete number of frames to skip,
# this number worked fine on couple of tested videos:
to_skip = 6
while True:
frame, val = ffplayer.get_frame(show=False)
# Exit loop on invalid val:
if val in ('paused', 'eof'):
break
# Exit loop on seek_queue updated:
if seek_queue:
break
# Wait for next frame:
if frame is None:
sleep(0.005)
continue
# Wait until we skipped enough frames:
to_skip -= 1
if to_skip == 0:
break
# Assuming last frame is actual, just get it:
frame, val = ffplayer.get_frame(force_refresh=True)
finally:
ffplayer.set_pause(bool(self._state == 'paused'))
ffplayer.set_volume(self._volume)
# Get next frame regular:
else:
frame, val = ffplayer.get_frame()
if val == 'eof':
sleep(0.2)
if not did_dispatch_eof:
self._do_eos()
did_dispatch_eof = True
elif val == 'paused':
did_dispatch_eof = False
sleep(0.2)
else:
did_dispatch_eof = False
if frame:
self._next_frame = frame
trigger()
else:
val = val if val else (1 / 30.)
sleep(val)
def seek(self, percent, precise=True):
if self._ffplayer is None:
return
self._seek_queue.append((percent, precise,))
def stop(self):
self.unload()
def pause(self):
if self._ffplayer and self._state != 'paused':
self._ffplayer.toggle_pause()
self._state = 'paused'
def play(self):
if self._ffplayer and self._state == 'paused':
self._ffplayer.toggle_pause()
self._state = 'playing'
return
self.load()
self._out_fmt = 'rgba'
ff_opts = {
'paused': True,
'out_fmt': self._out_fmt,
'sn': True,
'volume': self._volume,
}
self._ffplayer = MediaPlayer(
self._filename, callback=self._player_callback,
thread_lib='SDL',
loglevel='info', ff_opts=ff_opts)
# Disabled as an attempt to fix kivy issue #6210
# self._ffplayer.set_volume(self._volume)
self._thread = Thread(target=self._next_frame_run, name='Next frame')
self._thread.daemon = True
self._thread.start()
def load(self):
self.unload()
def unload(self):
if self._trigger is not None:
self._trigger.cancel()
self._ffplayer_need_quit = True
if self._thread:
self._thread.join()
self._thread = None
if self._ffplayer:
self._ffplayer = None
self._next_frame = None
self._size = (0, 0)
self._state = ''
self._ffplayer_need_quit = False
| 33.850267 | 79 | 0.560506 |
164061afa1041d90483242e0c7d9c84a88c915a5 | 145 | py | Python | simple_api_server_test/get_by_douban/urls.py | dollarkillerx/PythonReview | ee896ee702a6c854f599d7e73e2ceef4eecd4c40 | [
"MIT"
] | null | null | null | simple_api_server_test/get_by_douban/urls.py | dollarkillerx/PythonReview | ee896ee702a6c854f599d7e73e2ceef4eecd4c40 | [
"MIT"
] | 3 | 2020-06-06T00:45:41.000Z | 2022-02-10T11:40:18.000Z | simple_api_server_test/get_by_douban/urls.py | dollarkillerx/PythonReview | ee896ee702a6c854f599d7e73e2ceef4eecd4c40 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import Music,Book
urlpatterns = [
path('music/',Music.as_view()),
path('book/',Book.as_view())
] | 24.166667 | 35 | 0.682759 |
f98f735a943d8d3032625531ba3beb919e6818d4 | 2,003 | py | Python | tests/test_group/test_liegroup.py | npapapietro/liesym | 56bce3290e35d111b86413191516c41a09f0a07d | [
"MIT"
] | 2 | 2021-09-09T22:25:25.000Z | 2022-01-22T01:15:47.000Z | tests/test_group/test_liegroup.py | npapapietro/liesym | 56bce3290e35d111b86413191516c41a09f0a07d | [
"MIT"
] | 1 | 2021-12-20T00:15:26.000Z | 2021-12-20T01:54:07.000Z | tests/test_group/test_liegroup.py | npapapietro/liesym | 56bce3290e35d111b86413191516c41a09f0a07d | [
"MIT"
] | 1 | 2021-09-09T22:25:31.000Z | 2021-09-09T22:25:31.000Z | from sympy import Matrix, I, LeviCivita, sympify
from liesym import SU, SO, Sp, A, B, C, D
def test_su():
su2 = SU(2)
assert su2.dimension == 2
assert su2.group == "SU"
assert su2.generators() == [
Matrix([
[0, 1],
[1, 0]]) / 2,
Matrix([
[0, -I],
[I, 0]]) / 2,
Matrix([
[1, 0],
[0, -1]]) / 2]
assert su2.algebra == A(1)
for i in range(3):
for j in range(3):
for k in range(3):
assert su2.structure_constants(i, j, k) == LeviCivita(i, j, k)
for n in range(2, 5):
g = SU(n)
assert g.quadratic_casimir(n) == sympify(n**2 - 1) / sympify(2 * n)
def test_so():
so3 = SO(3)
assert so3.dimension == 3
assert so3.group == "SO"
assert so3.generators() == [
Matrix([
[0, I, 0],
[-I, 0, 0],
[0, 0, 0]]),
Matrix([
[0, 0, I],
[0, 0, 0],
[-I, 0, 0]]),
Matrix([
[0, 0, 0],
[0, 0, I],
[0, -I, 0]])]
assert so3.generators(True) == [
(Matrix([
[0, I, 0],
[-I, 0, 0],
[0, 0, 0]]), (1, 0)),
(Matrix([
[0, 0, I],
[0, 0, 0],
[-I, 0, 0]]), (2, 0)),
(Matrix([
[0, 0, 0],
[0, 0, I],
[0, -I, 0]]), (2, 1))
]
assert so3.algebra == B(1)
assert SO(4).algebra == D(2)
for n in range(5, 7):
g = SO(n)
r = g.algebra.fundamental_weights[0]
assert g.quadratic_casimir(r) == sympify(n - 1) / 2
def test_sp():
sp4 = Sp(4)
assert sp4.dimension == 4
assert sp4.group == "Sp"
assert len(sp4.generators()) == 10
assert sp4.algebra == C(2)
for n in range(2, 5):
g = Sp(2 * n)
r = g.algebra.fundamental_weights[0]
assert g.quadratic_casimir(r) == sympify(2 * n + 1) / 2
| 21.308511 | 78 | 0.403894 |
0ffa157db11076631e2fd03965e12f8fb5c01da2 | 537 | py | Python | manage.py | estagiodois/rooms | 225b4a854db3dc57f928aae0ac5f2946fabd116d | [
"MIT"
] | 4 | 2019-06-20T02:01:15.000Z | 2020-08-17T21:28:31.000Z | manage.py | estagiodois/rooms | 225b4a854db3dc57f928aae0ac5f2946fabd116d | [
"MIT"
] | 32 | 2018-09-16T14:31:13.000Z | 2021-06-10T17:42:31.000Z | manage.py | estagiodois/rooms | 225b4a854db3dc57f928aae0ac5f2946fabd116d | [
"MIT"
] | 4 | 2019-04-27T19:14:17.000Z | 2021-03-08T01:15:10.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rooms.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.5625 | 73 | 0.685289 |
d25446e0a95a4fe18af4165a714442d056cb42c5 | 2,074 | py | Python | src/sqlint/cli.py | berset/sqlint | fb23e800a8b2a642f87d0398cae428ad75571318 | [
"MIT"
] | null | null | null | src/sqlint/cli.py | berset/sqlint | fb23e800a8b2a642f87d0398cae428ad75571318 | [
"MIT"
] | null | null | null | src/sqlint/cli.py | berset/sqlint | fb23e800a8b2a642f87d0398cae428ad75571318 | [
"MIT"
] | null | null | null | import click
import logging
import os
from typing import Dict
from .checker import check as check_tree
from .config import Config
from .formatter import format as format_tree
from .syntax_tree import SyntaxTree
# setting logger
logger = logging.getLogger(__name__)
@click.command(context_settings={'ignore_unknown_options': True})
@click.argument('files', nargs=-1, type=click.Path())
@click.option('--config', '-c', 'config_file',
type=click.Path(),
help='Path to the config file that will be the authoritative config source.')
@click.option('--format', '-f', 'is_format', is_flag=True, help='Prints formatted sql and exist')
def main(files, config_file, is_format):
"""
Args:
files:
config_file: path to the user config file.
is_format: the flage whether outputs formatted sql
Returns:
"""
if len(files) == 0:
# Todo: search *.sql file in current directory recursively.
return
config = Config(config_file)
trees: Dict[str, SyntaxTree] = {}
# constructs syntax tree in each files
for f in files:
if not os.path.exists(f):
logger.warning(f'file is not found: {f}')
continue
if os.path.isdir(f):
logger.warning(f'{f} is a directory')
continue
with open(f, 'r') as fp:
if is_format:
# constructs syntax tree
trees[f] = SyntaxTree.sqlptree(fp.read(), is_abstract=True)
else:
trees[f] = SyntaxTree.sqlptree(fp.read())
errs = False
for file, tree in trees.items():
if is_format:
formatted_tree = format_tree(tree, config)
logger.info(formatted_tree.sqlftree())
else:
tree.sqlftree()
for v in sorted(check_tree(tree, config)):
errs = True
logger.info('{} {}'.format(file, v))
if errs:
exit(1)
if __name__ == '__main__':
main()
| 28.805556 | 98 | 0.581003 |
db56d291bb2d7e1e6f472a6ef26b1c8ba85b51c2 | 808 | py | Python | src/hparams/__init__.py | luciencho/jddc_solo | efddf0885d5e3c640835874f70d57d25123de141 | [
"BSD-3-Clause"
] | null | null | null | src/hparams/__init__.py | luciencho/jddc_solo | efddf0885d5e3c640835874f70d57d25123de141 | [
"BSD-3-Clause"
] | null | null | null | src/hparams/__init__.py | luciencho/jddc_solo | efddf0885d5e3c640835874f70d57d25123de141 | [
"BSD-3-Clause"
] | null | null | null | # coding:utf-8
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
import os
from src.hparams import solo_hparam
from src.utils import utils
registry_hparams = dict(
solo_base=solo_hparam.solo_base(),
solo_drop=solo_hparam.solo_rnn(),
solo_cnn=solo_hparam.solo_cnn(),
solo_thu=solo_hparam.solo_thu())
def merge_hparam(args):
if args.hparam_set not in registry_hparams:
raise ValueError('invalid high parameter set {}'.format(args.hparam_set))
else:
hparam = registry_hparams[args.hparam_set]
for k, v in hparam.__dict__.items():
if not k.startswith('_'):
utils.verbose('add attribute {} [{}] to hparams'.format(k, v))
setattr(args, k, v)
return args
| 29.925926 | 81 | 0.695545 |
b780684e918d146608fa053a0cbed503e686068d | 8,731 | py | Python | modest/utils/accessPSC.py | jtrunnels91/ModularEstimator | 1088f91440abd5a82d094311f51d0250ecca52e1 | [
"MIT"
] | null | null | null | modest/utils/accessPSC.py | jtrunnels91/ModularEstimator | 1088f91440abd5a82d094311f51d0250ecca52e1 | [
"MIT"
] | null | null | null | modest/utils/accessPSC.py | jtrunnels91/ModularEstimator | 1088f91440abd5a82d094311f51d0250ecca52e1 | [
"MIT"
] | null | null | null | import requests
import pandas as pd
import numpy as np
from tempfile import NamedTemporaryFile
import os
import subprocess
from astropy.io import fits
import matplotlib.pyplot as plt
from . import spacegeometry
def getChandraObs(
obsID,
fileList
):
pass
def getHeaderInfo(
key,
header
):
catKeys = list(header.keys())
foundKey = False
for index in range(len(header)):
if key == header[index]:
catKey = catKeys[index]
unitKey = catKey.replace('TYPE', 'UNIT')
if unitKey == catKey:
unitKey = catKey.replace('TYP', 'UNIT')
if unitKey in header:
columnUnit = header[unitKey]
else:
columnUnit = None
columnIndexDict = {
'index': index,
'key': catKey
}
if columnUnit:
columnIndexDict['unit'] = columnUnit
foundKey = True
if not foundKey:
raise ValueError('Did not find columns %s in local catalog.' %key)
return columnIndexDict
def plotLocalCatalog(
catalogName='xmmsl2_clean.fits',
dirpath='/home/joel/Documents/pythonDev/research/pulsarJPDAF/pulsarData/xray_catalogs/',
fluxKey='FLUX_B8'
):
hdulist = fits.open(dirpath + catalogName)
catalogHeader = hdulist[1].header
catalogData = hdulist[1].data
hdulist.close()
minFlux = np.min(catalogData[fluxKey])
scaledFlux = np.array(catalogData[fluxKey] - minFlux)
maxFlux = np.max(scaledFlux)
scaledFlux = scaledFlux/maxFlux
plt.figure()
for index in range(len(catalogData)):
plt.scatter(catalogData[index]['RA'], catalogData[index]['DEC'], s=scaledFlux[index])
plt.show(block=False)
return
def localCatalog_coneSearch(
RA,
DEC,
FOV,
catalogName='xmmsl2_clean.fits',
dirpath='/home/joel/Documents/pythonDev/research/pulsarJPDAF/pulsarData/xray_catalogs/',
removeNaNs=True,
fluxKey='FLUX_B8',
extentKey='EXT_B8',
raKey='RA',
decKey='DEC',
srcNameKey='UNIQUE_SRCNAME'
):
hdulist = fits.open(dirpath + catalogName)
catalogHeader = hdulist[1].header
catalogData = hdulist[1].data
hdulist.close()
columns = [srcNameKey, raKey, decKey, fluxKey, extentKey]
savedColumns = []
columnIndexDict = {}
catKeys = list(catalogHeader.keys())
for index in range(len(catalogHeader)):
for column in columns:
if column == catalogHeader[index]:
catKey = catKeys[index]
unitKey = catKey.replace('TYPE', 'UNIT')
if unitKey in catalogHeader:
columnUnit = catalogHeader[unitKey]
else:
columnUnit = None
columnIndexDict[column] = {
'index': index,
'key': catKey
}
if columnUnit:
columnIndexDict[column]['unit'] = columnUnit
columns.remove(column)
savedColumns.append(column)
if columns:
raise ValueError('Did not find columns %s in local catalog.' %columns)
if columnIndexDict[raKey]['unit'] == 'rad':
raConversionFactor = 1
elif columnIndexDict[raKey]['unit'] == 'degrees' or columnIndexDict[raKey]['unit'] == 'degree':
raConversionFactor = np.pi / 180.0
if columnIndexDict[decKey]['unit'] == 'rad':
decConversionFactor = 1
elif columnIndexDict[decKey]['unit'] == 'degrees' or columnIndexDict[decKey]['unit'] == 'degree':
decConversionFactor = np.pi/180.0
if RA['unit'] == 'rad':
referenceRA = RA['value']
elif RA['unit'] == 'degrees':
referenceRA = RA['value'] * np.pi / 180.0
else:
raise ValueError('Unrecougnized RA units %s' % RA['unit'])
if DEC['unit'] == 'rad':
referenceDec = DEC['value']
elif DEC['unit'] == 'degrees':
referenceDec = DEC['value'] * np.pi / 180.0
else:
raise ValueError('Unrecougnized Dec units %s' % DEC['unit'])
if FOV['unit'] == 'rad':
FOVVal = FOV['value']
elif FOV['unit'] == 'degrees':
FOVVal = FOV['value'] * np.pi / 180.0
else:
raise ValueError('Unrecougnized FOV units %s' % FOV['unit'])
referenceUnitVector = spacegeometry.sidUnitVec(
referenceRA,
referenceDec
)
mySourceDF = pd.DataFrame(columns=savedColumns)
for source in catalogData:
sourceUnitVector = spacegeometry.sidUnitVec(
source[raKey] * raConversionFactor,
source[decKey] * decConversionFactor
)
angularDiff = np.arccos(referenceUnitVector.dot(sourceUnitVector))
if angularDiff < (FOVVal/2):
mySrcDict = {}
skipVal = False
for columnName, columnInfo in columnIndexDict.items():
if not skipVal:
if 'unit' in columnInfo:
mySrcDict[columnName] = {
'value': source[columnName],
'unit': columnInfo['unit'].replace('cm2', 'cm^2')
}
else:
mySrcDict[columnName] = source[columnName]
if removeNaNs:
try:
skipVal = np.isnan(source[columnName])
except:
skipVal = False
if not skipVal:
mySourceDF = mySourceDF.append(mySrcDict, ignore_index=True)
return mySourceDF
def xamin_coneSearch(
RA,
DEC,
FOV,
angleUnits='degrees',
catalog='xray',
removeNullFlux=True,
fluxKey='flux'
):
if angleUnits == 'degrees':
FOVArcmin = FOV * 60
elif angleUnits == 'radians':
FOVArcmin = FOV * 3437.75
elif angleUnits == 'arc':
FOVArcmin = FOV
dirpath = '/home/joel/Documents/pythonDev/modules/ModularFilter/modest/utils'
fieldCommand = 'fields=name,ra,dec,%s' % fluxKey
myCommand = ['java',
'-jar',
dirpath + '/users.jar',
'table=%s' %catalog,
'position=\'%s, %s\'' % (RA, DEC),
'radius=%s' % FOVArcmin,
fieldCommand]
print(myCommand)
# myQuery += ('table=%s' % catalog)
# myQuery += ('position=\'%s, %s\'' % (RA, DEC))
# myQuery += ('radius=%s' % FOV)
# subprocess.call(['java', '-jar', 'users.jar'], env=env)
# process = subprocess.Popen(['java', '-jar', 'users.jar'], stdout=subprocess.PIPE)
process = subprocess.Popen(myCommand, stdout=subprocess.PIPE)
output = process.stdout
print(output)
outputDF = pd.read_csv(output, sep='|', comment='#').dropna(how='any')
outputDF.columns = outputDF.columns.str.strip()
outputDF = outputDF.rename(columns={str.lower(fluxKey):'flux'})
print(outputDF)
for row in range(len(outputDF)):
try:
outputDF.set_value(row, 'flux', outputDF.loc[row]['flux'])
except:
if removeNullFlux is True:
outputDF.drop(row, inplace=True)
# print('Dropping row %i' %(row))
outputDF.reset_index()
return(outputDF)
def chandraPSC_coneSearch(
RA,
DEC,
FOV,
FOVUnits='degrees',
minSignificance=0
):
if FOVUnits == 'degrees':
FOVArcmin = FOV * 60
elif FOVUnits == 'radians':
FOVArcmin = FOV * 3437.75
elif FOVUnits == 'arcmins':
FOVArcmin = FOV
else:
raise ValueError('Unrecougnized unit for FOV. Use either degrees, radians, or arcmins.')
baseQuery=(
'http://cda.cfa.harvard.edu/csccli/getProperties?query='
'SELECT m.name, m.ra, m.dec, m.flux_aper_b, m.significance ' +
'FROM master_source m ' +
'WHERE (' +
'dbo.cone_distance(m.ra,m.dec,%s,%s)<=%s'
%(RA, DEC, FOVArcmin)
)
if minSignificance > 0:
baseQuery = (
baseQuery +
'AND m.significance > %s)'
%minSignificance
)
else:
baseQuery = baseQuery + ')'
print(baseQuery)
response=requests.get(baseQuery)
# t = TemporaryFile()
# with open('./tmp', 'wb') as f:
# f.write(response.content)
with NamedTemporaryFile(mode='wb', delete=False) as f:
f.write(response.content)
resultsDF = pd.read_csv(f.name, sep='\t', comment='#')
f.close()
os.remove(f.name)
return(resultsDF)
| 31.634058 | 101 | 0.554919 |
4a45dbbde48f5ccb5a48b4e92a365c48c3470759 | 97 | py | Python | scraper_extractor/components/helpers.py | jakjus/csgomath_engine | c7fd113f6ce63fa070798ea01b39088bc555fc55 | [
"MIT"
] | null | null | null | scraper_extractor/components/helpers.py | jakjus/csgomath_engine | c7fd113f6ce63fa070798ea01b39088bc555fc55 | [
"MIT"
] | null | null | null | scraper_extractor/components/helpers.py | jakjus/csgomath_engine | c7fd113f6ce63fa070798ea01b39088bc555fc55 | [
"MIT"
] | null | null | null | def text_to_price(text):
return int(text.replace('$', '').replace(',', '').replace('.', ''))
| 32.333333 | 71 | 0.556701 |
82a55b9b13a8674d6f326d940e83cfcff3f48307 | 18,808 | py | Python | pysnmp-with-texts/ASCEND-MIBVRTR-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/ASCEND-MIBVRTR-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/ASCEND-MIBVRTR-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module ASCEND-MIBVRTR-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ASCEND-MIBVRTR-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:28:57 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
configuration, = mibBuilder.importSymbols("ASCEND-MIB", "configuration")
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, ObjectIdentity, Integer32, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, iso, ModuleIdentity, Bits, NotificationType, Unsigned32, Counter64, MibIdentifier, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "ObjectIdentity", "Integer32", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "iso", "ModuleIdentity", "Bits", "NotificationType", "Unsigned32", "Counter64", "MibIdentifier", "Counter32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class DisplayString(OctetString):
pass
mibvRouterProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 529, 23, 139))
mibvRouterProfileTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 139, 1), )
if mibBuilder.loadTexts: mibvRouterProfileTable.setStatus('mandatory')
if mibBuilder.loadTexts: mibvRouterProfileTable.setDescription('A list of mibvRouterProfile profile entries.')
mibvRouterProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1), ).setIndexNames((0, "ASCEND-MIBVRTR-MIB", "vRouterProfile-Name"))
if mibBuilder.loadTexts: mibvRouterProfileEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mibvRouterProfileEntry.setDescription('A mibvRouterProfile entry containing objects that maps to the parameters of mibvRouterProfile profile.')
vRouterProfile_Name = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 1), DisplayString()).setLabel("vRouterProfile-Name").setMaxAccess("readonly")
if mibBuilder.loadTexts: vRouterProfile_Name.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_Name.setDescription('The name of a VRouter.')
vRouterProfile_Active = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("vRouterProfile-Active").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_Active.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_Active.setDescription('Whether the VROUTER is active or not')
vRouterProfile_VrouterIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 3), IpAddress()).setLabel("vRouterProfile-VrouterIpAddr").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_VrouterIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_VrouterIpAddr.setDescription('System ip address for a VRouter.')
vRouterProfile_PoolSummary = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("vRouterProfile-PoolSummary").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_PoolSummary.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_PoolSummary.setDescription('Flag, when set indicates that host addresses assigned from the pool should be marked as PRIVATE in the routing table and summarized to the world at large via a (constant) network advertisement for the whole pool.')
vRouterProfile_ShareGlobalPool = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("vRouterProfile-ShareGlobalPool").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_ShareGlobalPool.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_ShareGlobalPool.setDescription('Flag, when set indicates that vrouter can share the address pools defined for in IP-GLOBAL profile.')
vRouterProfile_RipPolicy = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("splitHorzn", 1), ("poisonRvrs", 2)))).setLabel("vRouterProfile-RipPolicy").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_RipPolicy.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_RipPolicy.setDescription('Describes whether to use Poison reverse or Split Horizon policy. Global for the vrouter.')
vRouterProfile_SummarizeRipRoutes = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("vRouterProfile-SummarizeRipRoutes").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_SummarizeRipRoutes.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_SummarizeRipRoutes.setDescription('Summarize subnets in RIP broadcasts per RFC 1058/RFC 1009.')
vRouterProfile_RipTrigger = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("vRouterProfile-RipTrigger").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_RipTrigger.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_RipTrigger.setDescription('When set to TRUE (its default value) it causes RIP to send triggered (incremental) updates. Otherwise full table updates are sent when a change in the routing table is noticed.')
vRouterProfile_DomainName = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 8), DisplayString()).setLabel("vRouterProfile-DomainName").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_DomainName.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_DomainName.setDescription('The DNS domain name assigned to this vrouter.')
vRouterProfile_SecDomainName = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 9), DisplayString()).setLabel("vRouterProfile-SecDomainName").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_SecDomainName.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_SecDomainName.setDescription('The secondary DNS domain name assigned to this vrouter.')
vRouterProfile_DnsPrimaryServer = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 10), IpAddress()).setLabel("vRouterProfile-DnsPrimaryServer").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_DnsPrimaryServer.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_DnsPrimaryServer.setDescription('The IP address of the primary DNS server for this vRouter.')
vRouterProfile_DnsSecondaryServer = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 11), IpAddress()).setLabel("vRouterProfile-DnsSecondaryServer").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_DnsSecondaryServer.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_DnsSecondaryServer.setDescription('The IP address of the secondary DNS server for this vRouter. This server is used when the primary is not available.')
vRouterProfile_ClientPrimaryDnsServer = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 12), IpAddress()).setLabel("vRouterProfile-ClientPrimaryDnsServer").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_ClientPrimaryDnsServer.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_ClientPrimaryDnsServer.setDescription('Default user IP address of the primary DNS server.')
vRouterProfile_ClientSecondaryDnsServer = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 13), IpAddress()).setLabel("vRouterProfile-ClientSecondaryDnsServer").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_ClientSecondaryDnsServer.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_ClientSecondaryDnsServer.setDescription('Default user IP address of the secondary DNS server. This server is used when the primary is not available.')
vRouterProfile_AllowAsClientDnsInfo = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("false", 1), ("true", 2)))).setLabel("vRouterProfile-AllowAsClientDnsInfo").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_AllowAsClientDnsInfo.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_AllowAsClientDnsInfo.setDescription('This flag controls if main DNS info should be allowed as Client DNS info.')
vRouterProfile_IpxRoutingEnabled = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("vRouterProfile-IpxRoutingEnabled").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_IpxRoutingEnabled.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_IpxRoutingEnabled.setDescription("TRUE if this vRouter is currently routing IPX. We don't do IPX routing protocols or packet forwarding if FALSE.")
vRouterProfile_IpxDialinPool = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 16), DisplayString()).setLabel("vRouterProfile-IpxDialinPool").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_IpxDialinPool.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_IpxDialinPool.setDescription('Dialin Pool Numbers to be shared by the ipx wan interfaces')
vRouterProfile_Action_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("createProfile", 2), ("deleteProfile", 3)))).setLabel("vRouterProfile-Action-o").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_Action_o.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_Action_o.setDescription('')
mibvRouterProfile_PoolNameTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 139, 2), ).setLabel("mibvRouterProfile-PoolNameTable")
if mibBuilder.loadTexts: mibvRouterProfile_PoolNameTable.setStatus('mandatory')
if mibBuilder.loadTexts: mibvRouterProfile_PoolNameTable.setDescription('A list of mibvRouterProfile__pool_name profile entries.')
mibvRouterProfile_PoolNameEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 139, 2, 1), ).setLabel("mibvRouterProfile-PoolNameEntry").setIndexNames((0, "ASCEND-MIBVRTR-MIB", "vRouterProfile-PoolName-Name"), (0, "ASCEND-MIBVRTR-MIB", "vRouterProfile-PoolName-Index-o"))
if mibBuilder.loadTexts: mibvRouterProfile_PoolNameEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mibvRouterProfile_PoolNameEntry.setDescription('A mibvRouterProfile__pool_name entry containing objects that maps to the parameters of mibvRouterProfile__pool_name profile.')
vRouterProfile_PoolName_Name = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 2, 1, 1), DisplayString()).setLabel("vRouterProfile-PoolName-Name").setMaxAccess("readonly")
if mibBuilder.loadTexts: vRouterProfile_PoolName_Name.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_PoolName_Name.setDescription('')
vRouterProfile_PoolName_Index_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 2, 1, 2), Integer32()).setLabel("vRouterProfile-PoolName-Index-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: vRouterProfile_PoolName_Index_o.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_PoolName_Index_o.setDescription('')
vRouterProfile_PoolName = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 2, 1, 3), DisplayString()).setLabel("vRouterProfile-PoolName").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_PoolName.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_PoolName.setDescription('The name of this pool')
mibvRouterProfile_AssignCountTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 139, 3), ).setLabel("mibvRouterProfile-AssignCountTable")
if mibBuilder.loadTexts: mibvRouterProfile_AssignCountTable.setStatus('mandatory')
if mibBuilder.loadTexts: mibvRouterProfile_AssignCountTable.setDescription('A list of mibvRouterProfile__assign_count profile entries.')
mibvRouterProfile_AssignCountEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 139, 3, 1), ).setLabel("mibvRouterProfile-AssignCountEntry").setIndexNames((0, "ASCEND-MIBVRTR-MIB", "vRouterProfile-AssignCount-Name"), (0, "ASCEND-MIBVRTR-MIB", "vRouterProfile-AssignCount-Index-o"))
if mibBuilder.loadTexts: mibvRouterProfile_AssignCountEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mibvRouterProfile_AssignCountEntry.setDescription('A mibvRouterProfile__assign_count entry containing objects that maps to the parameters of mibvRouterProfile__assign_count profile.')
vRouterProfile_AssignCount_Name = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 3, 1, 1), DisplayString()).setLabel("vRouterProfile-AssignCount-Name").setMaxAccess("readonly")
if mibBuilder.loadTexts: vRouterProfile_AssignCount_Name.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_AssignCount_Name.setDescription('')
vRouterProfile_AssignCount_Index_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 3, 1, 2), Integer32()).setLabel("vRouterProfile-AssignCount-Index-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: vRouterProfile_AssignCount_Index_o.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_AssignCount_Index_o.setDescription('')
vRouterProfile_AssignCount = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 3, 1, 3), Integer32()).setLabel("vRouterProfile-AssignCount").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_AssignCount.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_AssignCount.setDescription('The number of host addresses in the above pool. The addresses are contiguous.')
mibvRouterProfile_PoolBaseAddressTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 139, 4), ).setLabel("mibvRouterProfile-PoolBaseAddressTable")
if mibBuilder.loadTexts: mibvRouterProfile_PoolBaseAddressTable.setStatus('mandatory')
if mibBuilder.loadTexts: mibvRouterProfile_PoolBaseAddressTable.setDescription('A list of mibvRouterProfile__pool_base_address profile entries.')
mibvRouterProfile_PoolBaseAddressEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 139, 4, 1), ).setLabel("mibvRouterProfile-PoolBaseAddressEntry").setIndexNames((0, "ASCEND-MIBVRTR-MIB", "vRouterProfile-PoolBaseAddress-Name"), (0, "ASCEND-MIBVRTR-MIB", "vRouterProfile-PoolBaseAddress-Index-o"))
if mibBuilder.loadTexts: mibvRouterProfile_PoolBaseAddressEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mibvRouterProfile_PoolBaseAddressEntry.setDescription('A mibvRouterProfile__pool_base_address entry containing objects that maps to the parameters of mibvRouterProfile__pool_base_address profile.')
vRouterProfile_PoolBaseAddress_Name = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 4, 1, 1), DisplayString()).setLabel("vRouterProfile-PoolBaseAddress-Name").setMaxAccess("readonly")
if mibBuilder.loadTexts: vRouterProfile_PoolBaseAddress_Name.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_PoolBaseAddress_Name.setDescription('')
vRouterProfile_PoolBaseAddress_Index_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 4, 1, 2), Integer32()).setLabel("vRouterProfile-PoolBaseAddress-Index-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: vRouterProfile_PoolBaseAddress_Index_o.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_PoolBaseAddress_Index_o.setDescription('')
vRouterProfile_PoolBaseAddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 139, 4, 1, 3), IpAddress()).setLabel("vRouterProfile-PoolBaseAddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vRouterProfile_PoolBaseAddress.setStatus('mandatory')
if mibBuilder.loadTexts: vRouterProfile_PoolBaseAddress.setDescription('The base address of a pool of addresses we can use to assign to clients.')
mibBuilder.exportSymbols("ASCEND-MIBVRTR-MIB", vRouterProfile_PoolName_Index_o=vRouterProfile_PoolName_Index_o, vRouterProfile_RipTrigger=vRouterProfile_RipTrigger, mibvRouterProfile_AssignCountTable=mibvRouterProfile_AssignCountTable, vRouterProfile_PoolBaseAddress_Name=vRouterProfile_PoolBaseAddress_Name, mibvRouterProfile_PoolNameTable=mibvRouterProfile_PoolNameTable, vRouterProfile_AssignCount=vRouterProfile_AssignCount, vRouterProfile_SummarizeRipRoutes=vRouterProfile_SummarizeRipRoutes, mibvRouterProfileEntry=mibvRouterProfileEntry, vRouterProfile_IpxDialinPool=vRouterProfile_IpxDialinPool, vRouterProfile_Action_o=vRouterProfile_Action_o, vRouterProfile_SecDomainName=vRouterProfile_SecDomainName, mibvRouterProfile_PoolBaseAddressTable=mibvRouterProfile_PoolBaseAddressTable, vRouterProfile_PoolSummary=vRouterProfile_PoolSummary, vRouterProfile_PoolName=vRouterProfile_PoolName, vRouterProfile_AssignCount_Name=vRouterProfile_AssignCount_Name, vRouterProfile_PoolBaseAddress_Index_o=vRouterProfile_PoolBaseAddress_Index_o, mibvRouterProfile_PoolNameEntry=mibvRouterProfile_PoolNameEntry, mibvRouterProfile_AssignCountEntry=mibvRouterProfile_AssignCountEntry, vRouterProfile_Active=vRouterProfile_Active, vRouterProfile_DnsSecondaryServer=vRouterProfile_DnsSecondaryServer, vRouterProfile_PoolName_Name=vRouterProfile_PoolName_Name, mibvRouterProfileTable=mibvRouterProfileTable, vRouterProfile_PoolBaseAddress=vRouterProfile_PoolBaseAddress, vRouterProfile_DnsPrimaryServer=vRouterProfile_DnsPrimaryServer, vRouterProfile_ShareGlobalPool=vRouterProfile_ShareGlobalPool, vRouterProfile_DomainName=vRouterProfile_DomainName, vRouterProfile_IpxRoutingEnabled=vRouterProfile_IpxRoutingEnabled, vRouterProfile_AssignCount_Index_o=vRouterProfile_AssignCount_Index_o, vRouterProfile_ClientSecondaryDnsServer=vRouterProfile_ClientSecondaryDnsServer, mibvRouterProfile=mibvRouterProfile, mibvRouterProfile_PoolBaseAddressEntry=mibvRouterProfile_PoolBaseAddressEntry, vRouterProfile_RipPolicy=vRouterProfile_RipPolicy, vRouterProfile_AllowAsClientDnsInfo=vRouterProfile_AllowAsClientDnsInfo, vRouterProfile_Name=vRouterProfile_Name, vRouterProfile_ClientPrimaryDnsServer=vRouterProfile_ClientPrimaryDnsServer, DisplayString=DisplayString, vRouterProfile_VrouterIpAddr=vRouterProfile_VrouterIpAddr)
| 150.464 | 2,303 | 0.816727 |
e39e4048791d919e6f54d580e4fcb393a3b1aa5d | 292 | py | Python | homeassistant/components/rdw/const.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/rdw/const.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | homeassistant/components/rdw/const.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Constants for the RDW integration."""
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Final
DOMAIN: Final = "rdw"
LOGGER = logging.getLogger(__package__)
SCAN_INTERVAL = timedelta(hours=1)
CONF_LICENSE_PLATE: Final = "license_plate"
| 20.857143 | 43 | 0.794521 |
38da9285fa8cc3423bc21847e15ac2f068694827 | 826 | py | Python | core/migrations/0005_auto_20170209_1745.py | grapesmoker/prograces | 466c3ec7061574f9147e13b5d505761efe15cd3b | [
"MIT"
] | 2 | 2017-02-09T14:10:18.000Z | 2017-03-13T01:09:47.000Z | core/migrations/0005_auto_20170209_1745.py | grapesmoker/prograces | 466c3ec7061574f9147e13b5d505761efe15cd3b | [
"MIT"
] | null | null | null | core/migrations/0005_auto_20170209_1745.py | grapesmoker/prograces | 466c3ec7061574f9147e13b5d505761efe15cd3b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-09 17:45
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20170209_1558'),
]
operations = [
migrations.RemoveField(
model_name='state',
name='geometry',
),
migrations.AddField(
model_name='state',
name='mp_geometry',
field=django.contrib.gis.db.models.fields.MultiPolygonField(null=True, srid=4326),
),
migrations.AddField(
model_name='state',
name='p_geometry',
field=django.contrib.gis.db.models.fields.PolygonField(null=True, srid=4326),
),
]
| 26.645161 | 94 | 0.606538 |
ffd1e143ce8b4e13861310d9ae0db3b365a4a6a9 | 7,108 | py | Python | Session.py | bopopescu/plib | 9786ccff65f1c745639899f2f6f81ec8aa82d828 | [
"Apache-2.0"
] | null | null | null | Session.py | bopopescu/plib | 9786ccff65f1c745639899f2f6f81ec8aa82d828 | [
"Apache-2.0"
] | null | null | null | Session.py | bopopescu/plib | 9786ccff65f1c745639899f2f6f81ec8aa82d828 | [
"Apache-2.0"
] | null | null | null | # coding=utf8
""" Session Module
Handles sessions used by the services to keep track of login users
"""
# Import future
from __future__ import print_function, absolute_import
__author__ = "Chris Nasr"
__copyright__ = "OuroborosCoding"
__maintainer__ = "Chris Nasr"
__email__ = "ouroboroscode@gmail.com"
__created__ = "2017-06-18"
# Import python modules
from hashlib import md5
# Import pip modules
from redis import StrictRedis
# Import project modules
from . import JSON, Strings
# Module variables
_moRedis = None
# Init function
def init(conf):
"""Init
Initialises the module
Args:
conf (dict): The necessary Redis config
Returns:
None
"""
# Pull in the module variable
global _moRedis
# Create the Redis connection
_moRedis = StrictRedis(**conf)
# ApiSession class
class Session(object):
"""Session
Class for handling session data
Extends:
object
"""
# constructor
def __init__(self, *args, **kwargs):
"""Constructor
Instantiates the internal dict instance
Args:
args (list): List arguments
kwargs (dict): Dict arguments
Returns:
ApiSession
"""
self.__dStore = {}
self.__dStore.update(*args, **kwargs)
# __contains__ method
def __contains__(self, key):
"""__contains__
Returns true if the specific key exists in the session
Args:
key (str): The field to check for
Returns:
bool
"""
return key in self.__dStore
# __delitem__ method
def __delitem__(self, key):
"""__delete__
Deletes a specific key from the session
Args:
key (str): The key to delete
Returns:
None
"""
del self.__dStore[key]
# __getitem__ method
def __getitem__(self, key):
"""__getitem__
Returns a specific key from the dict
Args:
key (str): The key to return
Returns:
mixed
"""
return self.__dStore[key]
# __iter__ method
def __iter__(self):
"""__iter__
Return an iterator for the session
Returns:
iterator
"""
return iter(self.__dStore)
# __len__ method
def __len__(self):
"""__len__
Returns the count of keys in the dict
Returns:
uint
"""
return len(self.__dStore)
# __setitem__ method
def __setitem__(self, key, value):
"""__setitem__
Sets a specific key in the dict
Args:
key (str): The key to store the value under
value (mixed): The value to store under the key
Returns:
None
"""
self.__dStore[key] = value
# __str__ method
def __str__(self):
"""__str__
Returns a string representation of the session
Returns:
str
"""
return str(self.__dStore)
# addLock method
def addLock(self, _type, _id):
"""Add Lock
Adds a lock to a particular type given its ID
Args:
_type (str): The type of object to lock
_id (mixed): The unique ID of the object type
Returns:
None
"""
if isinstance(_id, (list,tuple)):
_id = '|'.join([str(i) for i in _id])
_moRedis.sadd('locked_%ss' % str(_type), _id)
# admin classmethod
@classmethod
def admin(cls):
"""Admin
Returns a session with full admin access so that cli scripts can run
services without the need of logging in
Returns:
ApiSession
"""
# Make a new session instance with full access
oSession = cls({
"token": md5(Strings.random(16)).hexdigest(),
"login": { "id": 0, "email": "admin@dovetail.co" },
"permissions": {
"tree": {
"acc_invoice": [[0,0,0,0,15,0]],
"acc_payment": [[0,0,0,0,15,0]],
"appointment": [[0,0,0,0,15,0]],
"chat": [[0,0,0,0,15,0]],
"clinic": [[0,0,0,0,15,0]],
"clinic_admin": [[0,0,0,0,15,0]],
"edi": [[0,0,0,0,15,0]],
"eprescribe":[[0,0,0,0,15,0]],
"email": [[0,0,0,0,15,0]],
"exam": [[0,0,0,0,15,0]],
"exam_clinical": [[0,0,0,0,15,0]],
"fg_clinic": [[0,0,0,0,15,0]],
"fg_region": [[0,0,0,0,15,0]],
"exam_section": [[0,0,0,0,15,0]],
"group": [[0,0,0,0,15,0]],
"insurance":[[0,0,0,0,15,0]],
"itp": [[0,0,0,0,15,0]],
"itp_clinic": [[0,0,0,0,15,0]],
"itp_tpl": [[0,0,0,0,15,0]],
"login": [[0,0,0,0,15,0]],
"lookup": [[0,0,0,0,15,0]],
"media": [[0,0,0,0,15,0]],
"mh_dental": [[0,0,0,0,15,0]],
"notes": [[0,0,0,0,15,0]],
"patient": [[0,0,0,0,15,0]],
"patientLists": [[0,0,0,0,15,0]],
"pc_punch": [[0,0,0,0,15,0]],
"pc_rate": [[0,0,0,0,15,0]],
"permission": [[0,0,0,0,15,0]],
"practitioner": [[0,0,0,0,15,0]],
"reporting": [[0,0,0,0,15,0]],
"sms": [[0,0,0,0,15,0]],
"support":[[0,0,0,0,15,0]],
"xray": [[0,0,0,0,15,0]]
},
"groups": {
}
}
})
# Save it in cache
oSession.save()
# Return it
return oSession
# close method
def close(self):
"""Close
Closes the session, deleting it from the cache
Returns:
None
"""
# Delete the record from Redis
_moRedis.delete(self.__dStore['token'])
# create classmethod
@classmethod
def create(cls):
"""Create
Create a new session and returns it
Returns:
ApiSession
"""
# Create a new instance
oRet = cls()
# Generate a random string
sRand = Strings.random(16)
# Add the token to the session
oRet['token'] = md5(sRand).hexdigest()
# Now return the new session
return oRet
# isLocked method
def isLocked(self, _type, _id):
"""Is Locked
Returns true if the given ID is locked
Args:
_type (str): The type of object to check
_id (mixed): A unique ID for the given type
Returns:
bool
"""
if isinstance(_id, (list,tuple)):
_id = '|'.join([str(i) for i in _id])
return _moRedis.sismember('locked_%ss' % str(_type), _id)
# removeLock method
def removeLock(self, _type, _id):
"""Remove Lock
Removes a lock from a particular type given the ID
Args:
_type (str): The type of object to remove the lock on
_id (mixed): A unique ID for the given type
Returns:
bool
"""
if isinstance(_id, (list,tuple)):
_id = '|'.join([str(i) for i in _id])
_moRedis.srem('locked_%ss' % str(_type), _id)
# save method
def save(self):
"""Save
Saves the session so it can be fetched by other processes
Returns:
None
"""
# Dump the data to a JSON string
sJSON = JSON.encode(self.__dStore)
# @TODO reduce session time (I need it long for development)
_moRedis.setex(self.__dStore['token'], 86400, sJSON)
# start classmethod
@classmethod
def start(cls, token):
"""Start
Fetches an existing session if it exists and is valid, else it creates a
new one, and returns it
Args:
token (str): The unique token of an existing session
Returns:
ApiSession
"""
# Fetch from Redis
o = _moRedis.get(token)
# If there's no session or it expired
if o == None:
return None
# Else decode the JSON and create a new instance with it
return cls(JSON.decode(o))
# update method
def update(self):
"""Update
Update the session with the latest data from Redis
Returns:
None
"""
# Fetch from Redis
o = _moRedis.get(self.__dStore['token'])
# If there's no session or it expired
if o == None:
self.__dStore = {}
# Else decode the JSON and update the current instance
self.__dStore = JSON.decode(o)
| 18.656168 | 74 | 0.629854 |
c6c27652960a11db25ee327adea9982f314ea790 | 53 | py | Python | maci/replay_buffers/__init__.py | Faiz/mapr2 | 30fb37e1807d47f3678b5cab80ac60c74c4e37f7 | [
"Apache-2.0"
] | 1 | 2021-09-03T16:33:12.000Z | 2021-09-03T16:33:12.000Z | maci/replay_buffers/__init__.py | Faiz/mapr2 | 30fb37e1807d47f3678b5cab80ac60c74c4e37f7 | [
"Apache-2.0"
] | null | null | null | maci/replay_buffers/__init__.py | Faiz/mapr2 | 30fb37e1807d47f3678b5cab80ac60c74c4e37f7 | [
"Apache-2.0"
] | null | null | null | from .simple_replay_buffer import SimpleReplayBuffer
| 26.5 | 52 | 0.90566 |
6e2199f2c294bd95831bb7566aecc36004dc5c6a | 5,550 | py | Python | server/tasks.py | eggmoid/GalleryManage-FastAPI | fa50cef623a03aed2d7b4ac9c76d74cfb9d898eb | [
"MIT"
] | null | null | null | server/tasks.py | eggmoid/GalleryManage-FastAPI | fa50cef623a03aed2d7b4ac9c76d74cfb9d898eb | [
"MIT"
] | 6 | 2021-08-06T16:30:03.000Z | 2021-12-11T05:30:02.000Z | server/tasks.py | eggmoid/GalleryManage-FastAPI | fa50cef623a03aed2d7b4ac9c76d74cfb9d898eb | [
"MIT"
] | null | null | null | import json
import os
import re
import requests
from celery import Celery
from celery.schedules import crontab
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'server.settings')
app = Celery('task')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks
@app.task
def backup_post(_from=None):
from django.db import connection
with connection.cursor() as cursor:
cursor.execute("SELECT MAX(NUM) FROM BPOST;")
max_num = cursor.fetchone()[0]
if not _from or (int(_from) > int(max_num)):
cursor.execute(
f"INSERT INTO BPOST (SELECT * FROM POST WHERE NUM > {max_num});"
)
else:
cursor.execute(f"DELETE FROM BPOST WHERE NUM >= {_from};")
cursor.execute(
f"INSERT INTO BPOST (SELECT * FROM POST WHERE NUM >= {_from});")
def map_post(e: str):
num = int((re.findall(r'no=(\d+)', e) or [0])[0])
title = (re.findall(r'</em>(.*?)</a>', e) or [""])[0]
name = (re.findall(r'data-nick="(.*?)"', e) or [""])[0]
id = (re.findall(r'data-uid="(.*?)"', e) or [""])[0]
ip = (re.findall(r'data-ip="(.*?)"', e) or [""])[0]
idip = id + ip
date = (re.findall(r'gall_date" title="(.*?)"', e) or [""])[0]
comment_count = int((re.findall(r'reply_num">\[(\d*?)\]', e) or [0])[0])
gall_count = int((re.findall(r'gall_count">(\d*?)<', e) or [0])[0])
gall_recommend = int((re.findall(r'gall_recommend">(\d*?)<', e) or [0])[0])
return [
num, title, name, idip, date, comment_count, gall_count, gall_recommend
]
@app.task
def save_detail(num, refresh=False, ban=False):
from api.models.detail_post.models import DetailPost
from api.models.post.models import Post
# URL = f"https://gall.dcinside.com/mgallery/board/view/?id=girlgroup&no={num}"
URL = f"https://m.dcinside.com/board/girlgroup/{num}"
try:
post = Post.objects.get(num=num)
except Post.DoesNotExist:
return False
_resp = requests.get(
URL,
headers={
"User-Agent":
"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1"
})
resp = re.sub('<script.*?</script>', '', _resp.text, flags=re.DOTALL)
resp = re.sub(r'<img src=("?.*?"?).*?data-original="?(.*?)"? ',
r'<img src="\2" ', resp)
resp = re.sub(r'<img src="?https://dcimg\d\.dcinside\.co\.kr(.*?)"? ',
r'<img src="https://images.dcinside.com\1" ', resp)
# resp = re.sub(
# r'<img src=("?https://nstatic.dcinside.com/dc/m/img/dccon_loading_nobg200.png"?).*?data-original="?(.*?)"? ',
# r'<img src="\2" ',
# resp,
# flags=re.DOTALL)
(detail, created) = DetailPost.objects.get_or_create(num=post)
if created or (refresh and
("/derror/deleted/girlgroup/minor" not in resp) and
resp != "" and _resp.status_code == 200):
detail.detail = resp
detail.save()
if ban:
requests.post("http://localhost:4567/block", data=json.dumps({'no': num}))
@app.task
def sync_gall(page=1, page_end=0):
from api.models.post.models import Post
from django.conf import settings
URL = "https://gall.dcinside.com/mgallery/board/lists/?id=girlgroup&list_num=100&page="
MONITOR = settings.MONITOR
MONITOR_TITLE = [title.decode('utf-8') for title in MONITOR.sdiff('TITLE')]
MONITOR_BAN = [title.decode('utf-8') for title in MONITOR.sdiff('BAN')]
try:
last_num = Post.objects.last().num
except AttributeError:
resp = requests.get(f"{URL}{page}",
headers={
"User-Agent": "Mozilla/5.0"
}).text
source = list(
map(map_post, re.findall('ub-content.*?</tr>',
resp,
flags=re.DOTALL)))
last_num = source[0][0]
while True:
resp = requests.get(f"{URL}{page}",
headers={
"User-Agent": "Mozilla/5.0"
}).text
source = list(
map(map_post, re.findall('ub-content.*?</tr>',
resp,
flags=re.DOTALL)))
if page_end and page > page_end:
return
if not page_end and not len([e for e in source if e[0] > last_num]):
return
for e in source:
(post, _) = Post.objects.get_or_create(num=e[0])
post.title = e[1]
post.name = e[2]
post.idip = e[3]
post.date = e[4]
post.comment_count = e[5]
post.gall_count = e[6]
post.gall_recommend = e[7]
post.save()
if [e[1] for title in MONITOR_BAN if re.search(title, e[1])]:
save_detail.delay(e[0], True, True)
elif [e[1] for title in MONITOR_TITLE if re.search(title, e[1])]:
save_detail.delay(e[0], True)
page += 1
@app.task(bind=True)
def debug_task(self):
print(f'Request: {self.request!r}')
app.conf.beat_schedule = {
'daytime': {
'task': 'server.tasks.sync_gall',
'schedule': crontab(minute='*/2', hour='8-23,0'),
},
'nignttime': {
'task': 'server.tasks.sync_gall',
'schedule': crontab(minute='*/3', hour='1-7'),
},
}
| 36.513158 | 155 | 0.538198 |
5af8e2f3a39a7f7f68c7047cce936866bd38823b | 2,464 | py | Python | supreme/resolve/tests/test_operators.py | KirillDZR/supreme | c296722599363bd0cbcce6877bd9de9b066cb74b | [
"BSD-3-Clause"
] | 95 | 2015-01-17T09:48:20.000Z | 2021-11-07T16:02:38.000Z | supreme/resolve/tests/test_operators.py | KirillDZR/supreme | c296722599363bd0cbcce6877bd9de9b066cb74b | [
"BSD-3-Clause"
] | 4 | 2015-10-23T15:13:34.000Z | 2019-09-23T22:47:10.000Z | supreme/resolve/tests/test_operators.py | KirillDZR/supreme | c296722599363bd0cbcce6877bd9de9b066cb74b | [
"BSD-3-Clause"
] | 34 | 2015-02-22T20:54:40.000Z | 2022-02-27T13:39:32.000Z | import numpy as np
from numpy.testing import *
import scipy.ndimage as ndi
import scipy.linalg
import scipy.sparse as sparse
from supreme.geometry.window import gauss
from supreme.resolve.operators import bilinear, convolve, block_diag
from supreme.io import imread
import os
HR = imread(os.path.join(os.path.dirname(__file__), 'peppers_40.png'),
flatten=True)
def test_bilinear():
H = np.array([[1/2., 0, 0],
[0, 1/2., 0],
[0, 0, 1]])
A = bilinear(HR.shape[0], HR.shape[1],
[H, H],
HR.shape[0] / 2, HR.shape[1]/2)
p = np.prod(HR.shape)
assert_equal(A.shape, (2 * p/4, np.prod(HR.shape)))
HR_small = (A[p/4:, :] * HR.flat).reshape(np.array(HR.shape) / 2)
err_norm = np.linalg.norm(ndi.zoom(HR, 0.5) - HR_small)
err_norm /= np.prod(HR_small.shape)
assert err_norm < 2
def test_convolve():
w = np.array([[0, 1, 0],
[1, 2, 1],
[0, 1, 0]]) / 6.
A = convolve(40, 40, w)
p = np.prod(HR.shape)
c1 = (A * HR.flat).reshape(HR.shape)
c2 = ndi.convolve(HR, w)
assert np.linalg.norm(c1 - c2) / np.prod(HR.shape) < 0.5
def test_block_diag():
X = np.array([[1, 2, 3],
[4, 5, 6]])
Y = scipy.linalg.block_diag(X, X, X)
bd = block_diag(X.shape[0], X.shape[1],
X.shape[0] * 3, X.shape[1] * 3)
assert_array_equal((bd * X.flat).reshape(np.array(X.shape) * 3), Y)
if __name__ == "__main__":
scale = 3
theta = 5 / 180. * np.pi
C = np.cos(theta)
S = np.sin(theta)
tx, ty = 0, 0
A = bilinear(HR.shape[0], HR.shape[1],
[np.array([[C/scale, -S, tx],
[S, C/scale, ty],
[0, 0, 1.]])],
HR.shape[0] / scale, HR.shape[1] / scale)
C = convolve(HR.shape[0], HR.shape[1], gauss(5, std=1))
import matplotlib.pyplot as plt
plt.spy((A * C).todense())
plt.figure()
fwd = (A * C * HR.flat)
rev = C.T * A.T * fwd
plt.subplot(1, 3, 1)
plt.imshow(HR, cmap=plt.cm.gray, interpolation='nearest')
plt.subplot(1, 3, 2)
plt.imshow(fwd.reshape(np.array(HR.shape) / scale),
interpolation='nearest', cmap=plt.cm.gray)
plt.subplot(1, 3, 3)
plt.imshow(rev.reshape(HR.shape),
interpolation='nearest', cmap=plt.cm.gray)
plt.show()
| 28 | 71 | 0.525974 |
c8a1fea820496c23232c34c914affd2ceb7ab2c2 | 16,915 | py | Python | resources/lib/services/msl/msl_handler.py | locutus32/plugin.video.netflix | 68ed615e362fd8687f6bc678dd9efb1aa27f65a8 | [
"MIT"
] | null | null | null | resources/lib/services/msl/msl_handler.py | locutus32/plugin.video.netflix | 68ed615e362fd8687f6bc678dd9efb1aa27f65a8 | [
"MIT"
] | null | null | null | resources/lib/services/msl/msl_handler.py | locutus32/plugin.video.netflix | 68ed615e362fd8687f6bc678dd9efb1aa27f65a8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Author: trummerjo
# Module: MSLHttpRequestHandler
# Created on: 26.01.2017
# License: MIT https://goo.gl/5bMj3H
"""Proxy service to convert manifest and provide license data"""
from __future__ import absolute_import, division, unicode_literals
import re
import zlib
import json
import time
import base64
from functools import wraps
import requests
import xbmcaddon
from resources.lib.globals import g
import resources.lib.common as common
import resources.lib.kodi.ui as ui
import resources.lib.cache as cache
from .request_builder import MSLRequestBuilder
from .profiles import enabled_profiles
from .converter import convert_to_dash
from .exceptions import MSLError
try: # Python 2
unicode
except NameError: # Python 3
unicode = str # pylint: disable=redefined-builtin
CHROME_BASE_URL = 'https://www.netflix.com/nq/msl_v1/cadmium/'
ENDPOINTS = {
'manifest': CHROME_BASE_URL + 'pbo_manifests/%5E1.0.0/router', # "pbo_manifests/^1.0.0/router"
'license': CHROME_BASE_URL + 'pbo_licenses/%5E1.0.0/router'
}
def display_error_info(func):
"""Decorator that catches errors raise by the decorated function,
displays an error info dialog in the UI and reraises the error"""
# pylint: disable=missing-docstring
@wraps(func)
def error_catching_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
ui.show_error_info(common.get_local_string(30028), unicode(exc),
unknown_error=not(unicode(exc)),
netflix_error=isinstance(exc, MSLError))
raise
return error_catching_wrapper
class MSLHandler(object):
"""Handles session management and crypto for license and manifest
requests"""
last_license_url = ''
last_drm_context = ''
last_playback_context = ''
session = requests.session()
def __init__(self):
# pylint: disable=broad-except
self.request_builder = None
try:
msl_data = json.loads(common.load_file('msl_data.json'))
common.info('Loaded MSL data from disk')
except Exception:
msl_data = None
try:
self.request_builder = MSLRequestBuilder(msl_data)
# Addon just installed, the service starts but there is no esn
if g.get_esn():
self.check_mastertoken_validity()
except Exception:
import traceback
common.error(traceback.format_exc())
common.register_slot(
signal=common.Signals.ESN_CHANGED,
callback=self.perform_key_handshake)
def check_mastertoken_validity(self):
"""Return the mastertoken validity and executes a new key handshake when necessary"""
if self.request_builder.crypto.mastertoken:
time_now = time.time()
renewable = self.request_builder.crypto.renewal_window < time_now
expired = self.request_builder.crypto.expiration <= time_now
else:
renewable = False
expired = True
if expired:
if not self.request_builder.crypto.mastertoken:
common.debug('Stored MSL data not available, a new key handshake will be performed')
self.request_builder = MSLRequestBuilder()
else:
common.debug('Stored MSL data is expired, a new key handshake will be performed')
if self.perform_key_handshake():
self.request_builder = MSLRequestBuilder(json.loads(
common.load_file('msl_data.json')))
return self.check_mastertoken_validity()
return {'renewable': renewable, 'expired': expired}
@display_error_info
@common.time_execution(immediate=True)
def perform_key_handshake(self, data=None):
"""Perform a key handshake and initialize crypto keys"""
# pylint: disable=unused-argument
esn = data or g.get_esn()
if not esn:
common.info('Cannot perform key handshake, missing ESN')
return False
common.debug('Performing key handshake. ESN: {}', esn)
response = _process_json_response(
self._post(ENDPOINTS['manifest'],
self.request_builder.handshake_request(esn)))
headerdata = json.loads(
base64.standard_b64decode(response['headerdata']))
self.request_builder.crypto.parse_key_response(
headerdata, not common.is_edge_esn(esn))
common.debug('Key handshake successful')
return True
@display_error_info
@common.time_execution(immediate=True)
def load_manifest(self, viewable_id):
"""
Loads the manifets for the given viewable_id and
returns a mpd-XML-Manifest
:param viewable_id: The id of of the viewable
:return: MPD XML Manifest or False if no success
"""
manifest = self._load_manifest(viewable_id, g.get_esn())
# Disable 1080p Unlock for now, as it is broken due to Netflix changes
# if (g.ADDON.getSettingBool('enable_1080p_unlock') and
# not g.ADDON.getSettingBool('enable_vp9_profiles') and
# not has_1080p(manifest)):
# common.debug('Manifest has no 1080p viewables, trying unlock')
# manifest = self.get_edge_manifest(viewable_id, manifest)
return self.__tranform_to_dash(manifest)
def get_edge_manifest(self, viewable_id, chrome_manifest):
"""Load a manifest with an EDGE ESN and replace playback_context and
drm_context"""
common.debug('Loading EDGE manifest')
esn = g.get_edge_esn()
common.debug('Switching MSL data to EDGE')
self.perform_key_handshake(esn)
manifest = self._load_manifest(viewable_id, esn)
manifest['playbackContextId'] = chrome_manifest['playbackContextId']
manifest['drmContextId'] = chrome_manifest['drmContextId']
common.debug('Successfully loaded EDGE manifest')
common.debug('Resetting MSL data to Chrome')
self.perform_key_handshake()
return manifest
@common.time_execution(immediate=True)
def _load_manifest(self, viewable_id, esn):
cache_identifier = esn + '_' + unicode(viewable_id)
try:
# The manifest must be requested once and maintained for its entire duration
manifest = g.CACHE.get(cache.CACHE_MANIFESTS, cache_identifier, False)
common.debug('Manifest for {} with ESN {} obtained from the cache', viewable_id, esn)
# Save the manifest to disk as reference
common.save_file('manifest.json', json.dumps(manifest).encode('utf-8'))
return manifest
except cache.CacheMiss:
pass
common.debug('Requesting manifest for {} with ESN {}', viewable_id, esn)
profiles = enabled_profiles()
import pprint
common.info('Requested profiles:\n{}', pprint.pformat(profiles, indent=2))
ia_addon = xbmcaddon.Addon('inputstream.adaptive')
hdcp = ia_addon is not None and ia_addon.getSetting('HDCPOVERRIDE') == 'true'
# TODO: Future implementation when available,
# request the HDCP version from Kodi through a function
# in CryptoSession currently not implemented
# so there will be no more need to use the HDCPOVERRIDE = true
hdcp_version = []
if not g.ADDON.getSettingBool('enable_force_hdcp') and hdcp:
hdcp_version = ['1.4']
if g.ADDON.getSettingBool('enable_force_hdcp') and hdcp:
hdcp_version = ['2.2']
timestamp = int(time.time() * 10000)
manifest_request_data = {
'version': 2,
'url': '/manifest',
'id': timestamp,
'languages': [g.LOCAL_DB.get_value('locale_id')],
'params': {
'type': 'standard',
'viewableId': [viewable_id],
'profiles': profiles,
'flavor': 'PRE_FETCH',
'drmType': 'widevine',
'drmVersion': 25,
'usePsshBox': True,
'isBranching': False,
'useHttpsStreams': False,
'imageSubtitleHeight': 1080,
'uiVersion': 'shakti-v93016808',
'uiPlatform': 'SHAKTI',
'clientVersion': '6.0016.426.011',
'desiredVmaf': 'plus_lts', # phone_plus_exp can be used to mobile, not tested
'supportsPreReleasePin': True,
'supportsWatermark': True,
'supportsUnequalizedDownloadables': True,
'showAllSubDubTracks': False,
'titleSpecificData': {
viewable_id: {
'unletterboxed': True
}
},
'videoOutputInfo': [{
'type': 'DigitalVideoOutputDescriptor',
'outputType': 'unknown',
'supportedHdcpVersions': hdcp_version,
'isHdcpEngaged': hdcp
}],
'preferAssistiveAudio': False,
'isNonMember': False
},
'echo': ''
}
# Get and check mastertoken validity
mt_validity = self.check_mastertoken_validity()
manifest = self._chunked_request(ENDPOINTS['manifest'],
manifest_request_data,
esn,
mt_validity)
# Save the manifest to disk as reference
common.save_file('manifest.json', json.dumps(manifest).encode('utf-8'))
# Save the manifest to the cache to retrieve it during its validity
expiration = int(manifest['expiration'] / 1000)
g.CACHE.add(cache.CACHE_MANIFESTS, cache_identifier, manifest, eol=expiration)
if 'result' in manifest:
return manifest['result']
return manifest
@display_error_info
@common.time_execution(immediate=True)
def get_license(self, challenge, sid):
"""
Requests and returns a license for the given challenge and sid
:param challenge: The base64 encoded challenge
:param sid: The sid paired to the challengew
:return: Base64 representation of the licensekey or False unsuccessfull
"""
common.debug('Requesting license')
timestamp = int(time.time() * 10000)
license_request_data = {
'version': 2,
'url': self.last_license_url,
'id': timestamp,
'languages': [g.LOCAL_DB.get_value('locale_id')],
'params': [{
'sessionId': sid,
'clientTime': int(timestamp / 10000),
'challengeBase64': challenge,
'xid': str(timestamp + 1610)
}],
'echo': 'sessionId'
}
response = self._chunked_request(ENDPOINTS['license'], license_request_data, g.get_esn())
return response[0]['licenseResponseBase64']
@common.time_execution(immediate=True)
def __tranform_to_dash(self, manifest):
self.last_license_url = manifest['links']['license']['href']
self.last_playback_context = manifest['playbackContextId']
self.last_drm_context = manifest['drmContextId']
return convert_to_dash(manifest)
@common.time_execution(immediate=True)
def _chunked_request(self, endpoint, request_data, esn, mt_validity=None):
"""Do a POST request and process the chunked response"""
chunked_response = self._process_chunked_response(
self._post(endpoint, self.request_builder.msl_request(request_data, esn)),
mt_validity['renewable'] if mt_validity else None)
return chunked_response['result']
@common.time_execution(immediate=True)
def _post(self, endpoint, request_data):
"""Execute a post request"""
common.debug('Executing POST request to {}', endpoint)
start = time.clock()
response = self.session.post(endpoint, request_data)
common.debug('Request took {}s', time.clock() - start)
common.debug('Request returned response with status {}', response.status_code)
response.raise_for_status()
return response
# pylint: disable=unused-argument
@common.time_execution(immediate=True)
def _process_chunked_response(self, response, mt_renewable):
"""Parse and decrypt an encrypted chunked response. Raise an error
if the response is plaintext json"""
try:
# if the json() does not fail we have an error because
# the expected response is a chunked json response
return _raise_if_error(response.json())
except ValueError:
# json() failed so parse and decrypt the chunked response
common.debug('Received encrypted chunked response')
response = _parse_chunks(response.text)
# TODO: sending for the renewal request is not yet implemented
# if mt_renewable:
# # Check if mastertoken is renewed
# self.request_builder.crypto.compare_mastertoken(response['header']['mastertoken'])
decrypted_response = _decrypt_chunks(response['payloads'],
self.request_builder.crypto)
return _raise_if_error(decrypted_response)
@common.time_execution(immediate=True)
def _process_json_response(response):
"""Execute a post request and expect a JSON response"""
try:
return _raise_if_error(response.json())
except ValueError:
raise MSLError('Expected JSON response, got {}'.format(response.text))
def _raise_if_error(decoded_response):
raise_error = False
# Catch a manifest/chunk error
if any(key in decoded_response for key in ['error', 'errordata']):
raise_error = True
# Catch a license error
if 'result' in decoded_response and isinstance(decoded_response.get('result'), list):
if 'error' in decoded_response['result'][0]:
raise_error = True
if raise_error:
common.error('Full MSL error information:')
common.error(json.dumps(decoded_response))
raise MSLError(_get_error_details(decoded_response))
return decoded_response
def _get_error_details(decoded_response):
# Catch a chunk error
if 'errordata' in decoded_response:
return json.loads(
base64.standard_b64decode(
decoded_response['errordata']))['errormsg']
# Catch a manifest error
if 'error' in decoded_response:
if decoded_response['error'].get('errorDisplayMessage'):
return decoded_response['error']['errorDisplayMessage']
# Catch a license error
if 'result' in decoded_response and isinstance(decoded_response.get('result'), list):
if 'error' in decoded_response['result'][0]:
if decoded_response['result'][0]['error'].get('errorDisplayMessage'):
return decoded_response['result'][0]['error']['errorDisplayMessage']
return 'Unhandled error check log.'
@common.time_execution(immediate=True)
def _parse_chunks(message):
header = message.split('}}')[0] + '}}'
payloads = re.split(',\"signature\":\"[0-9A-Za-z=/+]+\"}',
message.split('}}')[1])
payloads = [x + '}' for x in payloads][:-1]
return {'header': header, 'payloads': payloads}
@common.time_execution(immediate=True)
def _decrypt_chunks(chunks, crypto):
decrypted_payload = ''
for chunk in chunks:
payloadchunk = json.loads(chunk)
payload = payloadchunk.get('payload')
decoded_payload = base64.standard_b64decode(payload)
encryption_envelope = json.loads(decoded_payload)
# Decrypt the text
plaintext = crypto.decrypt(
base64.standard_b64decode(encryption_envelope['iv']),
base64.standard_b64decode(encryption_envelope.get('ciphertext')))
# unpad the plaintext
plaintext = json.loads(plaintext)
data = plaintext.get('data')
# uncompress data if compressed
if plaintext.get('compressionalgo') == 'GZIP':
decoded_data = base64.standard_b64decode(data)
data = zlib.decompress(decoded_data, 16 + zlib.MAX_WBITS).decode('utf-8')
else:
data = base64.standard_b64decode(data).decode('utf-8')
decrypted_payload += data
return json.loads(decrypted_payload)
def has_1080p(manifest):
"""Return True if any of the video tracks in manifest have a 1080p profile
available, else False"""
return any(video['width'] >= 1920
for video in manifest['videoTracks'][0]['downloadables'])
| 41.055825 | 100 | 0.628259 |
d3cada72860e1b3276633b196a194a70b8658418 | 1,749 | py | Python | problems/EE/auto/problem7_EE.py | sunandita/ICAPS_Summer_School_RAE_2020 | a496b62185bcfdd2c76eb7986ae99cfa85708d28 | [
"BSD-3-Clause"
] | 5 | 2020-10-15T14:40:03.000Z | 2021-08-20T17:45:41.000Z | problems/EE/auto/problem7_EE.py | sunandita/ICAPS_Summer_School_RAE_2020 | a496b62185bcfdd2c76eb7986ae99cfa85708d28 | [
"BSD-3-Clause"
] | null | null | null | problems/EE/auto/problem7_EE.py | sunandita/ICAPS_Summer_School_RAE_2020 | a496b62185bcfdd2c76eb7986ae99cfa85708d28 | [
"BSD-3-Clause"
] | 2 | 2020-10-15T07:06:14.000Z | 2020-10-15T17:33:01.000Z | __author__ = 'patras'
from domain_exploreEnv import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
DURATION.COUNTER = {
'survey': 5,
'monitor': 5,
'screen': 5,
'sample': 5,
'process': 5,
'fly': 3,
'deposit': 1,
'transferData': 1,
'take': 2,
'put': 2,
'move': 10,
'charge': 5,
'negotiate': 5,
'handleAlien': 5,
}
rv.TYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.EQUIPMENT = {'survey': 'e1', 'monitor': 'e2', 'screen': 'e3', 'sample': 'e4', 'process': 'e5'}
rv.EQUIPMENTTYPE = {'e1': 'survey', 'e2': 'monitor', 'e3': 'screen', 'e4': 'sample', 'e5':'process'}
rv.LOCATIONS = ['base', 'z1', 'z2', 'z3', 'z4', 'z5', 'z6', 'z7']
rv.EDGES = {'base': {'z1': 15, 'z4': 15, 'z5': 35, 'z6': 35, 'z7': 35}, 'z1': {'base': 15, 'z2': 30}, 'z2': {'z1': 30, 'z3': 30}, 'z3': {'z2': 30, 'z4': 30}, 'z4': {'z3': 30, 'base': 15}, 'z5': {'base': 35}, 'z6': {'base': 35}, 'z7': {'base': 35}}
def ResetState():
state.loc = {'r1': 'base', 'r2': 'base', 'UAV': 'base'}
state.charge = { 'UAV': 80, 'r1': 80, 'r2': 50}
state.data = { 'UAV': 3, 'r1': 3, 'r2': 1}
state.pos = {'c1': 'base', 'e1': 'r2', 'e2': 'base', 'e3': 'base', 'e4': 'base', 'e5': 'base', 'o1': 'UAV'}
state.load = {'r1': NIL, 'r2': 'e1', 'UAV': 'o1'}
state.storm = {'active': True}
tasks = {
5: [['doActivities', 'UAV', [['survey', 'z5'], ['survey', 'z7'], ['survey', 'z6']]]],
}
eventsEnv = {
} | 30.155172 | 247 | 0.480274 |
e3cf88b2840dd63aeeabf8199345d58ee06116b8 | 64,790 | py | Python | project/view_finder/transformations.py | PYSFE/PySFE | 8fd7be869ed7196882405e98849b5b2b81e97517 | [
"MIT"
] | 2 | 2017-11-08T10:23:34.000Z | 2018-08-01T14:39:25.000Z | project/view_finder/transformations.py | PYSFE/pySFE | 8fd7be869ed7196882405e98849b5b2b81e97517 | [
"MIT"
] | null | null | null | project/view_finder/transformations.py | PYSFE/pySFE | 8fd7be869ed7196882405e98849b5b2b81e97517 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# transformations.py
# Copyright (c) 2006-2015, Christoph Gohlke
# Copyright (c) 2006-2015, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Homogeneous Transformation Matrices and Quaternions.
A library for calculating 4x4 matrices for translating, rotating, reflecting,
scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
3D homogeneous coordinates as well as for converting between rotation matrices,
Euler angles, and quaternions. Also includes an Arcball control object and
functions to decompose transformation matrices.
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2015.07.18
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_
* `Numpy 1.9 <http://www.numpy.org>`_
* `Transformations.c 2015.07.18 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for speedup of some functions)
Notes
-----
The API is not stable yet and is expected to change between revisions.
This Python code is not optimized for speed. Refer to the transformations.c
module for a faster implementation of some functions.
Documentation in HTML format can be generated with epydoc.
Matrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using
numpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using
numpy.dot(M, v) for shape (4, \*) column vectors, respectively
numpy.dot(v, M.T) for shape (\*, 4) row vectors ("array of points").
This module follows the "column vectors on the right" and "row major storage"
(C contiguous) conventions. The translation components are in the right column
of the transformation matrix, i.e. M[:3, 3].
The transpose of the transformation matrices may have to be used to interface
with other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16].
Calculations are carried out with numpy.float64 precision.
Vector, point, quaternion, and matrix function arguments are expected to be
"array like", i.e. tuple, list, or numpy arrays.
Return types are numpy arrays unless specified otherwise.
Angles are in radians unless specified otherwise.
Quaternions w+ix+jy+kz are represented as [w, x, y, z].
A triple of Euler angles can be applied/interpreted in 24 ways, which can
be specified using a 4 character string or encoded 4-tuple:
*Axes 4-string*: e.g. 'sxyz' or 'ryxy'
- first character : rotations are applied to 's'tatic or 'r'otating frame
- remaining characters : successive rotation axis 'x', 'y', or 'z'
*Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
- inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
- parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
- repetition : first and last axis are same (1) or different (0).
- frame : rotations are applied to static (0) or rotating (1) frame.
Other Python packages and modules for 3D transformations and quaternions:
* `Transforms3d <https://pypi.python.org/pypi/transforms3d>`_
includes most code of this module.
* `Blender.mathutils <http://www.blender.org/api/blender_python_api>`_
* `numpy-dtypes <https://github.com/numpy/numpy-dtypes>`_
References
----------
(1) Matrices and transformations. Ronald Goldman.
In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
(2) More matrices and transformations: shear and pseudo-perspective.
Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(3) Decomposing a matrix into simple transformations. Spencer Thomas.
In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(4) Recovering the data from the transformation matrix. Ronald Goldman.
In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
(5) Euler angle conversion. Ken Shoemake.
In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
(6) Arcball rotation control. Ken Shoemake.
In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
(7) Representing attitude: Euler angles, unit quaternions, and rotation
vectors. James Diebel. 2006.
(8) A discussion of the solution for the best rotation to relate two sets
of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
(9) Closed-form solution of absolute orientation using unit quaternions.
BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642.
(10) Quaternions. Ken Shoemake.
http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
(11) From quaternion to matrix and back. JMP van Waveren. 2005.
http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
(12) Uniform random rotations. Ken Shoemake.
In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
(13) Quaternion in molecular modeling. CFF Karney.
J Mol Graph Mod, 25(5):595-604
(14) New method for extracting the quaternion from a rotation matrix.
Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087.
(15) Multiple View Geometry in Computer Vision. Hartley and Zissermann.
Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130.
(16) Column Vectors vs. Row Vectors.
http://steve.hollasch.net/cgindex/math/matrix/column-vec.html
Examples
--------
>>> alpha, beta, gamma = 0.123, -1.234, 2.345
>>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]
>>> I = identity_matrix()
>>> Rx = rotation_matrix(alpha, xaxis)
>>> Ry = rotation_matrix(beta, yaxis)
>>> Rz = rotation_matrix(gamma, zaxis)
>>> R = concatenate_matrices(Rx, Ry, Rz)
>>> euler = euler_from_matrix(R, 'rxyz')
>>> numpy.allclose([alpha, beta, gamma], euler)
True
>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
>>> is_same_transform(R, Re)
True
>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
True
>>> qx = quaternion_about_axis(alpha, xaxis)
>>> qy = quaternion_about_axis(beta, yaxis)
>>> qz = quaternion_about_axis(gamma, zaxis)
>>> q = quaternion_multiply(qx, qy)
>>> q = quaternion_multiply(q, qz)
>>> Rq = quaternion_matrix(q)
>>> is_same_transform(R, Rq)
True
>>> S = scale_matrix(1.23, origin)
>>> T = translation_matrix([1, 2, 3])
>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
>>> R = random_rotation_matrix(numpy.random.rand(3))
>>> M = concatenate_matrices(T, R, Z, S)
>>> scale, shear, angles, trans, persp = decompose_matrix(M)
>>> numpy.allclose(scale, 1.23)
True
>>> numpy.allclose(trans, [1, 2, 3])
True
>>> numpy.allclose(shear, [0, math.tan(beta), 0])
True
>>> is_same_transform(R, euler_matrix(_axes='sxyz', *angles))
True
>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
>>> is_same_transform(M, M1)
True
>>> v0, v1 = random_vector(3), random_vector(3)
>>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1))
>>> v2 = numpy.dot(v0, M[:3,:3].T)
>>> numpy.allclose(unit_vector(v1), unit_vector(v2))
True
"""
from __future__ import division, print_function
import math
import numpy
__version__ = '2015.07.18'
__docformat__ = 'restructuredtext en'
__all__ = ()
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4))
True
"""
return numpy.identity(4)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
>>> v = numpy.random.random(3) - 0.5
>>> numpy.allclose(v, translation_matrix(v)[:3, 3])
True
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = translation_from_matrix(translation_matrix(v0))
>>> numpy.allclose(v0, v1)
True
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = numpy.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
w, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0])
>>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1])
True
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.diag([cosa, cosa, cosa])
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array([[ 0.0, -direction[2], direction[1]],
[ direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
w, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v[3] = 1
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.diag([factor, factor, factor, 1.0])
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to pySFE onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix([0, 0, 0], [1, 0, 0])
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0])
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
Return values are same as arguments for projection_matrix function:
point, normal, direction, perspective, and pseudo.
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, direct)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
>>> result = projection_from_matrix(P0, pseudo=False)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> result = projection_from_matrix(P0, pseudo=True)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(w)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustum.
The frustum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustum.
If perspective is True the frustum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (divided by w coordinate).
>>> frustum = numpy.random.rand(6)
>>> frustum[1] += frustum[0]
>>> frustum[3] += frustum[2]
>>> frustum[5] += frustum[4]
>>> M = clip_matrix(perspective=False, *frustum)
>>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
array([-1., -1., -1., 1.])
>>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1])
array([ 1., 1., 1., 1.])
>>> M = clip_matrix(perspective=True, *frustum)
>>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
>>> v / v[3]
array([-1., -1., -1., 1.])
>>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1])
>>> v / v[3]
array([ 1., 1., -1., 1.])
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustum: near <= 0")
t = 2.0 * near
M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0],
[0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0],
[0.0, 0.0, (far+near)/(near-far), t*far/(far-near)],
[0.0, 0.0, -1.0, 0.0]]
else:
M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)],
[0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)],
[0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)],
[0.0, 0.0, 0.0, 1.0]]
return numpy.array(M)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("no two linear independent eigenvectors found %s" % w)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
w = vector_norm(n)
if w > lenorm:
lenorm = w
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z _axes
angles : list of Euler angles about static x, y, z _axes
translate : translation vector along x, y, z _axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix([1, 2, 3])
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0.0, 0.0, 0.0, 1.0
if not numpy.linalg.det(P):
raise ValueError("matrix is singular")
scale = numpy.zeros((3, ))
shear = [0.0, 0.0, 0.0]
angles = [0.0, 0.0, 0.0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0.0, 0.0, 0.0, 1.0
else:
perspective = numpy.array([0.0, 0.0, 0.0, 1.0])
translate = M[3, :3].copy()
M[3, :3] = 0.0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
numpy.negative(scale, scale)
numpy.negative(row, row)
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z _axes
angles : list of Euler angles about static x, y, z _axes
translate : translation vector along x, y, z _axes
perspective : perspective partition of matrix
>>> scale = numpy.random.random(3) - 0.5
>>> shear = numpy.random.random(3) - 0.5
>>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
>>> trans = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
Angles are expected in degrees.
The de-orthogonalization matrix is the inverse.
>>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90])
>>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
True
>>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
>>> numpy.allclose(numpy.sum(O), 43.063229)
True
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array([
[ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0],
[-a*sinb*co, b*sina, 0.0, 0.0],
[ a*cosb, b*cosa, c, 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):
"""Return affine transform matrix to register two point sets.
v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous
coordinates, where ndims is the dimensionality of the coordinate space.
If shear is False, a similarity transformation matrix is returned.
If also scale is False, a rigid/Euclidean transformation matrix
is returned.
By default the algorithm by Hartley and Zissermann [15] is used.
If usesvd is True, similarity and Euclidean transformation matrices
are calculated by minimizing the weighted sum of squared deviations
(RMSD) according to the algorithm by Kabsch [8].
Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9]
is used, which is slower when using this Python implementation.
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]
>>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]
>>> affine_matrix_from_points(v0, v1)
array([[ 0.14549, 0.00062, 675.50008],
[ 0.00048, 0.14094, 53.24971],
[ 0. , 0. , 1. ]])
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> S = scale_matrix(random.random())
>>> M = concatenate_matrices(T, R, S)
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1)
>>> M = affine_matrix_from_points(v0[:3], v1[:3])
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
More examples in superimposition_matrix()
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=True)
v1 = numpy.array(v1, dtype=numpy.float64, copy=True)
ndims = v0.shape[0]
if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape:
raise ValueError("input arrays are of wrong shape or type")
# move centroids to origin
t0 = -numpy.mean(v0, axis=1)
M0 = numpy.identity(ndims+1)
M0[:ndims, ndims] = t0
v0 += t0.reshape(ndims, 1)
t1 = -numpy.mean(v1, axis=1)
M1 = numpy.identity(ndims+1)
M1[:ndims, ndims] = t1
v1 += t1.reshape(ndims, 1)
if shear:
# Affine transformation
A = numpy.concatenate((v0, v1), axis=0)
u, s, vh = numpy.linalg.svd(A.T)
vh = vh[:ndims].T
B = vh[:ndims]
C = vh[ndims:2*ndims]
t = numpy.dot(C, numpy.linalg.pinv(B))
t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1)
M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,)))
elif usesvd or ndims != 3:
# Rigid transformation via SVD of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(ndims+1)
M[:ndims, :ndims] = R
else:
# Rigid transformation matrix via quaternion
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = [[xx+yy+zz, 0.0, 0.0, 0.0],
[yz-zy, xx-yy-zz, 0.0, 0.0],
[zx-xz, xy+yx, yy-xx-zz, 0.0],
[xy-yx, zx+xz, yz+zy, zz-xx-yy]]
# quaternion: eigenvector corresponding to most positive eigenvalue
w, V = numpy.linalg.eigh(N)
q = V[:, numpy.argmax(w)]
q /= vector_norm(q) # unit quaternion
# homogeneous transformation matrix
M = quaternion_matrix(q)
if scale and not shear:
# Affine transformation; scale is ratio of RMS deviations from centroid
v0 *= v0
v1 *= v1
M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# move centroids back
M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0))
M /= M[ndims, ndims]
return M
def superimposition_matrix(v0, v1, scale=False, usesvd=True):
"""Return matrix to transform given 3D point set into second point set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points.
The parameters scale and usesvd are explained in the more general
affine_matrix_from_points function.
The returned matrix is a similarity or Euclidean transformation matrix.
This function has a fast C implementation in transformations.c.
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]]
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scale=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3))
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
return affine_matrix_from_points(v0, v1, shear=False,
scale=scale, usesvd=usesvd)
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
_axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for _axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, _axes)
>>> for _axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, _axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
_axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for _axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(_axes=_axes, *angles)
... R1 = euler_matrix(_axes=_axes, *euler_from_matrix(R0, _axes))
... if not numpy.allclose(R0, R1): print(_axes, "failed")
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
>>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(angles, [0.123, 0, 0])
True
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
_axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis + 1
j = _NEXT_AXIS[i+parity-1] + 1
k = _NEXT_AXIS[i-parity] + 1
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
q = numpy.empty((4, ))
if repetition:
q[0] = cj*(cc - ss)
q[i] = cj*(cs + sc)
q[j] = sj*(cc + ss)
q[k] = sj*(cs - sc)
else:
q[0] = cj*cc + sj*ss
q[i] = cj*sc - sj*cs
q[j] = cj*ss + sj*cc
q[k] = cj*cs - sj*sc
if parity:
q[j] *= -1.0
return q
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, [1, 0, 0])
>>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0])
True
"""
q = numpy.array([0.0, axis[0], axis[1], axis[2]])
qlen = vector_norm(q)
if qlen > _EPS:
q *= math.sin(angle/2.0) / qlen
q[0] = math.cos(angle/2.0)
return q
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
n = numpy.dot(q, q)
if n < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / n)
q = numpy.outer(q, q)
return numpy.array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def quaternion_from_matrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(numpy.identity(4), True)
>>> numpy.allclose(q, [1, 0, 0, 0])
True
>>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R, True)
>>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q = quaternion_from_matrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
>>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0)
>>> numpy.allclose(quaternion_from_matrix(R, isprecise=False),
... quaternion_from_matrix(R, isprecise=True))
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
if isprecise:
q = numpy.empty((4, ))
t = numpy.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 1, 2, 3
if M[1, 1] > M[0, 0]:
i, j, k = 2, 3, 1
if M[2, 2] > M[i, i]:
i, j, k = 3, 1, 2
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = numpy.linalg.eigh(K)
q = V[[3, 0, 1, 2], numpy.argmax(w)]
if q[0] < 0.0:
numpy.negative(q, q)
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7])
>>> numpy.allclose(q, [28, -44, -14, 48])
True
"""
w0, x0, y0, z0 = quaternion0
w1, x1, y1, z1 = quaternion1
return numpy.array([-x1*x0 - y1*y0 - z1*z0 + w1*w0,
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q / numpy.dot(q, q)
def quaternion_real(quaternion):
"""Return real part of quaternion.
>>> quaternion_real([3, 0, 1, 2])
3.0
"""
return float(quaternion[0])
def quaternion_imag(quaternion):
"""Return imaginary part of quaternion.
>>> quaternion_imag([3, 0, 1, 2])
array([ 0., 1., 2.])
"""
return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True)
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
numpy.negative(q1, q1)
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quaternion()
>>> numpy.allclose(1, vector_norm(q))
True
>>> q = random_quaternion(numpy.random.random(3))
>>> len(q.shape), q.shape[0]==4
(1, True)
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1,
numpy.cos(t1)*r1, numpy.sin(t2)*r2])
def random_rotation_matrix(rand=None):
"""Return uniform random rotation matrix.
rand: array like
Three independent random variables that are uniformly distributed
between 0 and 1 for each returned quaternion.
>>> R = random_rotation_matrix()
>>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
True
"""
return quaternion_matrix(random_quaternion(rand))
class Arcball(object):
"""Virtual Trackball Control.
>>> ball = Arcball()
>>> ball = Arcball(initial=numpy.identity(4))
>>> ball.place([320, 320], 320)
>>> ball.down([500, 250])
>>> ball.drag([475, 275])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 3.90583455)
True
>>> ball = Arcball(initial=[1, 0, 0, 0])
>>> ball.place([320, 320], 320)
>>> ball.setaxes([1, 1, 0], [-1, 1, 0])
>>> ball.constrain = True
>>> ball.down([400, 200])
>>> ball.drag([200, 400])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 0.2055924)
True
>>> ball.next()
"""
def __init__(self, initial=None):
"""Initialize virtual trackball control.
initial : quaternion or rotation matrix
"""
self._axis = None
self._axes = None
self._radius = 1.0
self._center = [0.0, 0.0]
self._vdown = numpy.array([0.0, 0.0, 1.0])
self._constrain = False
if initial is None:
self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0])
else:
initial = numpy.array(initial, dtype=numpy.float64)
if initial.shape == (4, 4):
self._qdown = quaternion_from_matrix(initial)
elif initial.shape == (4, ):
initial /= vector_norm(initial)
self._qdown = initial
else:
raise ValueError("initial not a quaternion or matrix")
self._qnow = self._qpre = self._qdown
def place(self, center, radius):
"""Place Arcball, e.g. when window size changes.
center : sequence[2]
Window coordinates of trackball center.
radius : float
Radius of trackball in window coordinates.
"""
self._radius = float(radius)
self._center[0] = center[0]
self._center[1] = center[1]
def setaxes(self, *axes):
"""Set _axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
@property
def constrain(self):
"""Return state of constrain to axis mode."""
return self._constrain
@constrain.setter
def constrain(self, value):
"""Set state of constrain to axis mode."""
self._constrain = bool(value)
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
def drag(self, point):
"""Update current cursor window coordinates."""
vnow = arcball_map_to_sphere(point, self._center, self._radius)
if self._axis is not None:
vnow = arcball_constrain_to_axis(vnow, self._axis)
self._qpre = self._qnow
t = numpy.cross(self._vdown, vnow)
if numpy.dot(t, t) < _EPS:
self._qnow = self._qdown
else:
q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]]
self._qnow = quaternion_multiply(q, self._qdown)
def next(self, acceleration=0.0):
"""Continue rotation in direction of last drag."""
q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
self._qpre, self._qnow = self._qnow, q
def matrix(self):
"""Return homogeneous rotation matrix."""
return quaternion_matrix(self._qnow)
def arcball_map_to_sphere(point, center, radius):
"""Return unit sphere coordinates from window coordinates."""
v0 = (point[0] - center[0]) / radius
v1 = (center[1] - point[1]) / radius
n = v0*v0 + v1*v1
if n > 1.0:
# position outside of sphere
n = math.sqrt(n)
return numpy.array([v0/n, v1/n, 0.0])
else:
return numpy.array([v0, v1, math.sqrt(1.0 - n)])
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = numpy.array(point, dtype=numpy.float64, copy=True)
a = numpy.array(axis, dtype=numpy.float64, copy=True)
v -= a * numpy.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
numpy.negative(v, v)
v /= n
return v
if a[2] == 1.0:
return numpy.array([1.0, 0.0, 0.0])
return unit_vector([-a[1], a[0], 0.0])
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = numpy.array(point, dtype=numpy.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map _axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. Euclidean norm, of ndarray along axis.
>>> v = numpy.random.random(3)
>>> n = vector_norm(v)
>>> numpy.allclose(n, numpy.linalg.norm(v))
True
>>> v = numpy.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> v = numpy.random.rand(5, 4, 3)
>>> n = numpy.empty((5, 3))
>>> vector_norm(v, axis=1, out=n)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1])
1.0
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. Euclidean norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3))
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1]))
[1.0]
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data*data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def random_vector(size):
"""Return array of random doubles in the half-open interval [0.0, 1.0).
>>> v = random_vector(10000)
>>> numpy.all(v >= 0) and numpy.all(v < 1)
True
>>> v0 = random_vector(10)
>>> v1 = random_vector(10)
>>> numpy.any(v0 == v1)
False
"""
return numpy.random.random(size)
def vector_product(v0, v1, axis=0):
"""Return vector perpendicular to vectors.
>>> v = vector_product([2, 0, 0], [0, 3, 0])
>>> numpy.allclose(v, [0, 0, 6])
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> v = vector_product(v0, v1)
>>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> v = vector_product(v0, v1, axis=1)
>>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]])
True
"""
return numpy.cross(v0, v1, axis=axis)
def angle_between_vectors(v0, v1, directed=True, axis=0):
"""Return angle between vectors.
If directed is False, the input vectors are interpreted as undirected _axes,
i.e. the maximum angle is pi/2.
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])
>>> numpy.allclose(a, math.pi)
True
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)
>>> numpy.allclose(a, 0)
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> a = angle_between_vectors(v0, v1)
>>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> a = angle_between_vectors(v0, v1, axis=1)
>>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)
dot = numpy.sum(v0 * v1, axis=axis)
dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis)
return numpy.arccos(dot if directed else numpy.fabs(dot))
def inverse_matrix(matrix):
"""Return inverse of square transformation matrix.
>>> M0 = random_rotation_matrix()
>>> M1 = inverse_matrix(M0.T)
>>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
True
>>> for size in range(1, 7):
... M0 = numpy.random.rand(size, size)
... M1 = inverse_matrix(M0)
... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size)
"""
return numpy.linalg.inv(matrix)
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
>>> is_same_transform(numpy.identity(4), numpy.identity(4))
True
>>> is_same_transform(numpy.identity(4), random_rotation_matrix())
False
"""
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1) | 34.499468 | 80 | 0.587498 |
1e86e73fcfad0804cc41814c17985d11b159642b | 889 | py | Python | Bitchat/urls.py | thanasispe/Bitchat | 5f1162282c69dc087fc93af29bc353b7e01a07f7 | [
"Apache-2.0"
] | 2 | 2022-03-13T15:30:08.000Z | 2022-03-13T15:30:24.000Z | Bitchat/urls.py | thanasispe/Bitchat | 5f1162282c69dc087fc93af29bc353b7e01a07f7 | [
"Apache-2.0"
] | null | null | null | Bitchat/urls.py | thanasispe/Bitchat | 5f1162282c69dc087fc93af29bc353b7e01a07f7 | [
"Apache-2.0"
] | null | null | null | """Bitchat URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from main.views import create_post, home, add_post
urlpatterns = [
path('admin/', admin.site.urls),
path("", home),
path("post-form/",create_post),
path("add-post/", add_post),
]
| 34.192308 | 77 | 0.700787 |
8fd494b9b0f01cc32af4f6a0dcce097deb3d6f18 | 4,352 | py | Python | tests/test_init.py | ju-sh/tzview | 2ad930bf8a3de37697042b05eba332e282626d62 | [
"MIT"
] | null | null | null | tests/test_init.py | ju-sh/tzview | 2ad930bf8a3de37697042b05eba332e282626d62 | [
"MIT"
] | null | null | null | tests/test_init.py | ju-sh/tzview | 2ad930bf8a3de37697042b05eba332e282626d62 | [
"MIT"
] | null | null | null | """
Test cases for code in src/tzview/__init__.py
"""
import datetime
import pytest
import pytz
import tzlocal
import tzcity
import tzview
class TestParseDT:
@pytest.mark.parametrize('dt_str, dt_format, expected', [
# Without dt_format
("2019-02-28 11:23:42", None,
datetime.datetime(2019, 2, 28, 11, 23, 42)),
("May 2019 31", None,
datetime.datetime(2019, 5, 31, 0, 0, 0)),
# With dt_format
("31 19 05", "%d %y %m",
datetime.datetime(2019, 5, 31, 0, 0, 0)),
])
def test_valid(self, dt_str, dt_format, expected):
"""
Normal valid test cases
"""
assert tzview.parse_dt(dt_str, dt_format) == expected
def test_valid_now(self):
"""
Valid test case when dt value is 'now'
"""
now = datetime.datetime.now()
rv = tzview.parse_dt("now")
assert now-rv <= datetime.timedelta(seconds=2)
@pytest.mark.parametrize('dt_str', [
"23-30-34", "a3-3g-32", "two"
])
def test_invalid(self, dt_str):
"""
Test cases that should raise exception
"""
with pytest.raises(ValueError):
tzview.parse_dt(dt_str)
class TestParseTZ:
@pytest.mark.parametrize('tz_str, expected', [
("local", tzlocal.get_localzone()),
("Europe/Oslo", pytz.timezone("Europe/Oslo")),
("Asia/Kuala_Lumpur", pytz.timezone("Asia/Kuala_Lumpur")),
])
def test_valid(self, tz_str, expected):
"""
Normal valid test cases
"""
assert tzview.parse_tz(tz_str) == expected
def test_valid_local(self):
"""
Valid test case when tz_str value is 'local'
"""
local = tzlocal.get_localzone()
rv = tzview.parse_tz("local")
assert local.zone == rv.zone
@pytest.mark.parametrize('tz_str', [
"now", "Europ/Oslo", "Asia/Kuala Lumpur"
])
def test_invalid(self, tz_str):
"""
Test cases that should raise exception
"""
with pytest.raises(tzcity.UnknownTZCityException):
tzview.parse_tz(tz_str)
class TestTZView:
@pytest.mark.parametrize('to_tzs, from_tz, dt_str, dt_format, expected', [
(['asia/dHaKa', 'America/Guayaquil'], 'Europe/Oslo',
"2020-02-23 21:23:42", None, [(2, 23), (15, 23)]),
# With dt_format
(['asia/dHaKa', 'America/Guayaquil'], 'Europe/Oslo',
"2020-February-23 21:23:42", "%Y-%B-%d %H:%M:%S",
[(2, 23), (15, 23)]),
# With city names (via tzcity package)
(['caracas', 'bratislava'], 'oslo',
"2020-February-23 21:23:42", None,
[(16, 23), (21, 23)]),
# Mixed. Both city and time zone names
(['moscow', 'asia/Baku'], 'dushanbe',
"31-Jan-2020", None,
[(22, 0), (23, 0)]),
])
def test_valid(self, to_tzs, from_tz, dt_str, dt_format, expected):
"""
Valid usages
"""
rv = tzview.tzview(to_tzs, from_tz, dt_str, dt_format)
value = [(dt.hour, dt.minute) for dt in rv]
assert value == expected
@pytest.mark.parametrize('dt_str, to_tzs, from_tz, dt_format', [
# Invalid hour
("2020-02-23 24:23:42", ['America/Guayaquil'], 'Europe/Oslo', None),
# Unknown time zone name
("2020-02-23 21:23:42", ['America/Guayaquil'], 'Australia/Oslo', None),
# Invalid dt_format
("-230-02-23 24:23:42", ['America/Guayaquil'], 'Europe/Oslo', "%d-%B"),
])
def test_invalid(self, dt_str, to_tzs, from_tz, dt_format):
"""
Test cases that should raise exception because of incorrect dt
"""
with pytest.raises(ValueError):
tzview.tzview(to_tzs, from_tz, dt_str)
@pytest.mark.parametrize('dt_str, to_tzs, from_tz, dt_format, wrong', [
# Invalid hour
("2020-02-23 22:23:42", ['Amrica/Guayaquil'], 'Europe/Oslo',
None, 'amrica/guayaquil'),
])
def test_unknown_tzcity(self, dt_str, to_tzs, from_tz, dt_format, wrong):
"""
Test cases that should raise exception because of unknown city
of time zone name
"""
with pytest.raises(tzcity.UnknownTZCityException) as utzce:
tzview.tzview(to_tzs, from_tz, dt_str)
assert utzce.value.citytz == wrong
| 30.865248 | 79 | 0.57307 |
37359fb7536ee6baf379d71533f816f67dcb9a35 | 25,897 | py | Python | packages/python/plotly/plotly/graph_objs/layout/yaxis/__init__.py | sgn/plotly.py | 587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6 | [
"MIT"
] | 3 | 2020-02-04T21:39:20.000Z | 2020-11-17T19:07:07.000Z | packages/python/plotly/plotly/graph_objs/layout/yaxis/__init__.py | sgn/plotly.py | 587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6 | [
"MIT"
] | 5 | 2021-03-10T05:39:37.000Z | 2022-02-13T04:56:40.000Z | packages/python/plotly/plotly/graph_objs/layout/yaxis/__init__.py | sgn/plotly.py | 587075c9f5a57a3dd60b03b2d47d925fbbb9b9b6 | [
"MIT"
] | 17 | 2019-11-21T14:11:29.000Z | 2019-11-21T15:26:23.000Z | from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Title(_BaseLayoutHierarchyType):
# font
# ----
@property
def font(self):
"""
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.layout.yaxis.title.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.yaxis.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# standoff
# --------
@property
def standoff(self):
"""
Sets the standoff distance (in px) between the axis labels and
the title text The default value is a function of the axis tick
labels, the title `font.size` and the axis `linewidth`. Note
that the axis title position is always constrained within the
margins, so the actual standoff distance is always less than
the set or default value. By setting `standoff` and turning on
`automargin`, plotly.js will push the margins to fit the axis
title at given standoff distance.
The 'standoff' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["standoff"]
@standoff.setter
def standoff(self, val):
self["standoff"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of this axis. Note that before the existence of
`title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.yaxis"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this axis' title font. Note that the title's font
used to be customized by the now deprecated `titlefont`
attribute.
standoff
Sets the standoff distance (in px) between the axis
labels and the title text The default value is a
function of the axis tick labels, the title `font.size`
and the axis `linewidth`. Note that the axis title
position is always constrained within the margins, so
the actual standoff distance is always less than the
set or default value. By setting `standoff` and turning
on `automargin`, plotly.js will push the margins to fit
the axis title at given standoff distance.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, standoff=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.layout.yaxis.Title
font
Sets this axis' title font. Note that the title's font
used to be customized by the now deprecated `titlefont`
attribute.
standoff
Sets the standoff distance (in px) between the axis
labels and the title text The default value is a
function of the axis tick labels, the title `font.size`
and the axis `linewidth`. Note that the axis title
position is always constrained within the margins, so
the actual standoff distance is always less than the
set or default value. By setting `standoff` and turning
on `automargin`, plotly.js will push the margins to fit
the axis title at given standoff distance.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.yaxis.Title
constructor must be a dict or
an instance of plotly.graph_objs.layout.yaxis.Title"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.layout.yaxis import title as v_title
# Initialize validators
# ---------------------
self._validators["font"] = v_title.FontValidator()
self._validators["standoff"] = v_title.StandoffValidator()
self._validators["text"] = v_title.TextValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("standoff", None)
self["standoff"] = standoff if standoff is not None else _v
_v = arg.pop("text", None)
self["text"] = text if text is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Tickformatstop(_BaseLayoutHierarchyType):
# dtickrange
# ----------
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# value
# -----
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.yaxis"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.layout.yaxis.Tickformatstop
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super(Tickformatstop, self).__init__("tickformatstops")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.yaxis.Tickformatstop
constructor must be a dict or
an instance of plotly.graph_objs.layout.yaxis.Tickformatstop"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.layout.yaxis import tickformatstop as v_tickformatstop
# Initialize validators
# ---------------------
self._validators["dtickrange"] = v_tickformatstop.DtickrangeValidator()
self._validators["enabled"] = v_tickformatstop.EnabledValidator()
self._validators["name"] = v_tickformatstop.NameValidator()
self._validators[
"templateitemname"
] = v_tickformatstop.TemplateitemnameValidator()
self._validators["value"] = v_tickformatstop.ValueValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtickrange", None)
self["dtickrange"] = dtickrange if dtickrange is not None else _v
_v = arg.pop("enabled", None)
self["enabled"] = enabled if enabled is not None else _v
_v = arg.pop("name", None)
self["name"] = name if name is not None else _v
_v = arg.pop("templateitemname", None)
self["templateitemname"] = (
templateitemname if templateitemname is not None else _v
)
_v = arg.pop("value", None)
self["value"] = value if value is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Tickfont(_BaseLayoutHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.yaxis"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.layout.yaxis.Tickfont
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.yaxis.Tickfont
constructor must be a dict or
an instance of plotly.graph_objs.layout.yaxis.Tickfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.layout.yaxis import tickfont as v_tickfont
# Initialize validators
# ---------------------
self._validators["color"] = v_tickfont.ColorValidator()
self._validators["family"] = v_tickfont.FamilyValidator()
self._validators["size"] = v_tickfont.SizeValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Tickfont", "Tickformatstop", "Tickformatstop", "Title", "title"]
from plotly.graph_objs.layout.yaxis import title
| 35.282016 | 85 | 0.57022 |
70208e1fbd1a2457d72c08f072536822f22d0127 | 477 | py | Python | SimpleGAN/resize_image.py | Pixir/Pixir | 63a6fc0728403af92eadf188f532f9f41cd9f912 | [
"MIT"
] | null | null | null | SimpleGAN/resize_image.py | Pixir/Pixir | 63a6fc0728403af92eadf188f532f9f41cd9f912 | [
"MIT"
] | 1 | 2020-02-10T08:11:23.000Z | 2020-02-10T08:11:23.000Z | SimpleGAN/resize_image.py | Pixir/Pixir | 63a6fc0728403af92eadf188f532f9f41cd9f912 | [
"MIT"
] | 3 | 2020-02-09T11:14:33.000Z | 2020-04-11T16:10:17.000Z | from SimpleGAN.Read_data import read_images
import numpy as np
def resize_image(width=128, height=128):
original_image = read_images()
resized_image_set = []
i = 0
for image in original_image:
i += 1
resized_image = image.resize((width, height))
resized_image.save(f'resized_images/{i}.jpg')
resized_image_set.append(np.array(resized_image))
return np.array(resized_image_set)
if __name__ == '__main__':
resize_image()
| 28.058824 | 57 | 0.691824 |
5663ffc02775893c769bc29c28af56f23fdbefea | 3,246 | py | Python | RoboticsCV/app.py | FinneganHunter/RoboticsCV | 443fcf914770ed6c18d6a0c41b287e265ae30763 | [
"MIT"
] | null | null | null | RoboticsCV/app.py | FinneganHunter/RoboticsCV | 443fcf914770ed6c18d6a0c41b287e265ae30763 | [
"MIT"
] | null | null | null | RoboticsCV/app.py | FinneganHunter/RoboticsCV | 443fcf914770ed6c18d6a0c41b287e265ae30763 | [
"MIT"
] | null | null | null | # open cv module
# arduino communication module
# remote control?
# wifi camera stream
from typing import List, Any
from .ArduinoComm.class_test import ArduinoCommClass
from RoboticsCV.ComputerVision import computer_vision, face_recog
import cv2 as cv
def main():
"""
# 1: Run the camera (module or script) WifiCam
# 2: Start the computer vision (module) starts openCV & returns the roi
# 3: Start Arduino communication (module) takes the roi & verifies communication
# 4: Run computer vision in different modes: image, object, face recognition / tracking (function in a module)
# 5: Make movements / operations based on assigned/defined task (functions in a module)
"""
print('this ran')
# 1
# some script to start IP camera stream
# video feed is VideoCapture
# set_wh is global variables for dimensions of the video
video = computer_vision.video_feed()
face_recog.set_wh(video)
# 2
arduino = ArduinoCommClass(5, 6, 13, 19)
print(*arduino.pin_nums)
# 3
# ArduinoCommClass.comm_verify(7)
while True:
# 4
# computer_vision.computer_vision_test(video) # test works perfectly well
# computer_vision.obj_recog(video) # loop over or run as process/thread
# TODO: execute video capture in it's own thread
# video capture up to: ret, img = cap.read() -> gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# TODO: execute in it's own thread
# Based on runtime, lag, and continuous operation, there's just going to have to be a master recog
# function/module that gets called for all the models and cascades that want to be run
# maybe it should just take a list of string with cascade names and adds them to teh list of ones that should be
# run in either their own threaded or processes.
#TODO
# Might be able to make it so only 1 view of any object or face, if 1 dectected: lock into just using that one;
# if not detected: parse to see if another perspective works: frontal -> 3/4 -> profile
# should probably also find out a way to combine the rois and coordinates being output
face_recog.single_face(video) # necessary for individual frame processing
# face_recog.multi_face_loop(video) # wont work until reject
# ier is implemented
# loop over face_recog or computer_vision, class variables get updated for speed & turning
app_face_data = face_recog.face_data
app_img_data = face_recog.width, face_recog.height
print(f'face data {app_face_data} in image {app_img_data}')
arduino.positional_data(app_face_data, app_img_data)
# computer_vision.haarcascade_test(video) # necessary to test haar cascade recognition
# TODO: this would never be hit, define a loop which would take the data
# capture thread and processing thread
if cv.waitKey(1) & 0xFF == 27:
break
# 5
# arduino.turning() # loop over turning
# arduino.movement() # loop over speed
video.release()
cv.destroyAllWindows()
# test parallelism vs serial for end of program optimization
| 39.108434 | 120 | 0.679298 |
5b6b3f7338219ee6a611caa8ffd695e341bc76b9 | 16,693 | py | Python | saleor/graphql/views.py | atellezsazo/saleor | 71c51c4d5076d4774c6f88d329eb8627f7963351 | [
"CC-BY-4.0"
] | null | null | null | saleor/graphql/views.py | atellezsazo/saleor | 71c51c4d5076d4774c6f88d329eb8627f7963351 | [
"CC-BY-4.0"
] | 99 | 2021-07-12T04:28:37.000Z | 2022-03-28T04:51:18.000Z | saleor/graphql/views.py | atellezsazo/saleor | 71c51c4d5076d4774c6f88d329eb8627f7963351 | [
"CC-BY-4.0"
] | null | null | null | import fnmatch
import hashlib
import json
import logging
import traceback
from typing import Any, Dict, List, Optional, Tuple, Union
import opentracing
import opentracing.tags
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from django.db.backends.postgresql.base import DatabaseWrapper
from django.http import HttpRequest, HttpResponseNotAllowed, JsonResponse
from django.shortcuts import render
from django.urls import reverse
from django.utils.functional import SimpleLazyObject
from django.views.generic import View
from graphene_django.settings import graphene_settings
from graphene_django.views import instantiate_middleware
from graphql import GraphQLDocument, get_default_backend
from graphql.error import GraphQLError, GraphQLSyntaxError
from graphql.error import format_error as format_graphql_error
from graphql.execution import ExecutionResult
from jwt.exceptions import PyJWTError
from .. import __version__ as saleor_version
from ..core.exceptions import PermissionDenied, ReadOnlyException
from ..core.utils import is_valid_ipv4, is_valid_ipv6
from .utils import query_fingerprint
API_PATH = SimpleLazyObject(lambda: reverse("api"))
INT_ERROR_MSG = "Int cannot represent non 32-bit signed integer value"
unhandled_errors_logger = logging.getLogger("saleor.graphql.errors.unhandled")
handled_errors_logger = logging.getLogger("saleor.graphql.errors.handled")
def tracing_wrapper(execute, sql, params, many, context):
conn: DatabaseWrapper = context["connection"]
operation = f"{conn.alias} {conn.display_name}"
with opentracing.global_tracer().start_active_span(operation) as scope:
span = scope.span
span.set_tag(opentracing.tags.COMPONENT, "db")
span.set_tag(opentracing.tags.DATABASE_STATEMENT, sql)
span.set_tag(opentracing.tags.DATABASE_TYPE, conn.display_name)
span.set_tag(opentracing.tags.PEER_HOSTNAME, conn.settings_dict.get("HOST"))
span.set_tag(opentracing.tags.PEER_PORT, conn.settings_dict.get("PORT"))
span.set_tag("service.name", "postgres")
span.set_tag("span.type", "sql")
return execute(sql, params, many, context)
class GraphQLView(View):
# This class is our implementation of `graphene_django.views.GraphQLView`,
# which was extended to support the following features:
# - Playground as default the API explorer (see
# https://github.com/prisma/graphql-playground)
# - file upload (https://github.com/lmcgartland/graphene-file-upload)
# - query batching
# - CORS
schema = None
executor = None
backend = None
middleware = None
root_value = None
HANDLED_EXCEPTIONS = (GraphQLError, PyJWTError, ReadOnlyException, PermissionDenied)
def __init__(
self, schema=None, executor=None, middleware=None, root_value=None, backend=None
):
super().__init__()
if schema is None:
schema = graphene_settings.SCHEMA
if backend is None:
backend = get_default_backend()
if middleware is None:
middleware = graphene_settings.MIDDLEWARE
self.schema = self.schema or schema
if middleware is not None:
self.middleware = list(instantiate_middleware(middleware))
self.executor = executor
self.root_value = root_value
self.backend = backend
def dispatch(self, request, *args, **kwargs):
# Handle options method the GraphQlView restricts it.
if request.method == "GET":
if settings.PLAYGROUND_ENABLED:
return self.render_playground(request)
return HttpResponseNotAllowed(["OPTIONS", "POST"])
if request.method == "OPTIONS":
response = self.options(request, *args, **kwargs)
elif request.method == "POST":
response = self.handle_query(request)
else:
return HttpResponseNotAllowed(["GET", "OPTIONS", "POST"])
# Add access control headers
if "HTTP_ORIGIN" in request.META:
for origin in settings.ALLOWED_GRAPHQL_ORIGINS:
if fnmatch.fnmatchcase(request.META["HTTP_ORIGIN"], origin):
response["Access-Control-Allow-Origin"] = request.META[
"HTTP_ORIGIN"
]
response["Access-Control-Allow-Methods"] = "POST, OPTIONS"
response["Access-Control-Allow-Headers"] = (
"Origin, Content-Type, Accept, Authorization, "
"Authorization-Bearer"
)
response["Access-Control-Allow-Credentials"] = "true"
break
return response
def render_playground(self, request):
return render(
request,
"graphql/playground.html",
{"api_url": request.build_absolute_uri(str(API_PATH))},
)
def _handle_query(self, request: HttpRequest) -> JsonResponse:
try:
data = self.parse_body(request)
except ValueError:
return JsonResponse(
data={"errors": [self.format_error("Unable to parse query.")]},
status=400,
)
if isinstance(data, list):
responses = [self.get_response(request, entry) for entry in data]
result: Union[list, Optional[dict]] = [
response for response, code in responses
]
status_code = max((code for response, code in responses), default=200)
else:
result, status_code = self.get_response(request, data)
return JsonResponse(data=result, status=status_code, safe=False)
def handle_query(self, request: HttpRequest) -> JsonResponse:
tracer = opentracing.global_tracer()
# Disable extending spans from header due to:
# https://github.com/DataDog/dd-trace-py/issues/2030
# span_context = tracer.extract(
# format=Format.HTTP_HEADERS, carrier=dict(request.headers)
# )
# We should:
# Add `from opentracing.propagation import Format` to imports
# Add `child_of=span_ontext` to `start_active_span`
with tracer.start_active_span("http") as scope:
span = scope.span
span.set_tag(opentracing.tags.COMPONENT, "http")
span.set_tag(opentracing.tags.HTTP_METHOD, request.method)
span.set_tag(
opentracing.tags.HTTP_URL,
request.build_absolute_uri(request.get_full_path()),
)
span.set_tag("http.useragent", request.META.get("HTTP_USER_AGENT", ""))
span.set_tag("span.type", "web")
request_ips = request.META.get(settings.REAL_IP_ENVIRON, "")
for ip in request_ips.split(","):
if is_valid_ipv4(ip):
span.set_tag(opentracing.tags.PEER_HOST_IPV4, ip)
elif is_valid_ipv6(ip):
span.set_tag(opentracing.tags.PEER_HOST_IPV6, ip)
else:
continue
break
response = self._handle_query(request)
span.set_tag(opentracing.tags.HTTP_STATUS_CODE, response.status_code)
# RFC2616: Content-Length is defined in bytes,
# we can calculate the RAW UTF-8 size using the length of
# response.content of type 'bytes'
span.set_tag("http.content_length", len(response.content))
return response
def get_response(
self, request: HttpRequest, data: dict
) -> Tuple[Optional[Dict[str, List[Any]]], int]:
execution_result = self.execute_graphql_request(request, data)
status_code = 200
if execution_result:
response = {}
if execution_result.errors:
response["errors"] = [
self.format_error(e) for e in execution_result.errors
]
if execution_result.invalid:
status_code = 400
else:
response["data"] = execution_result.data
result: Optional[Dict[str, List[Any]]] = response
else:
result = None
return result, status_code
def get_root_value(self):
return self.root_value
def parse_query(
self, query: str
) -> Tuple[Optional[GraphQLDocument], Optional[ExecutionResult]]:
"""Attempt to parse a query (mandatory) to a gql document object.
If no query was given or query is not a string, it returns an error.
If the query is invalid, it returns an error as well.
Otherwise, it returns the parsed gql document.
"""
if not query or not isinstance(query, str):
return (
None,
ExecutionResult(
errors=[ValueError("Must provide a query string.")], invalid=True
),
)
# Attempt to parse the query, if it fails, return the error
try:
return (
self.backend.document_from_string(self.schema, query), # type: ignore
None,
)
except (ValueError, GraphQLSyntaxError) as e:
return None, ExecutionResult(errors=[e], invalid=True)
def check_if_query_contains_only_schema(self, document: GraphQLDocument):
query_with_schema = False
for definition in document.document_ast.definitions:
selections = definition.selection_set.selections
selection_count = len(selections)
for selection in selections:
selection_name = str(selection.name.value)
if selection_name == "__schema":
query_with_schema = True
if selection_count > 1:
msg = "`__schema` must be fetched in separate query"
raise GraphQLError(msg)
return query_with_schema
def execute_graphql_request(self, request: HttpRequest, data: dict):
with opentracing.global_tracer().start_active_span("graphql_query") as scope:
span = scope.span
span.set_tag(opentracing.tags.COMPONENT, "graphql")
span.set_tag(
opentracing.tags.HTTP_URL,
request.build_absolute_uri(request.get_full_path()),
)
query, variables, operation_name = self.get_graphql_params(request, data)
document, error = self.parse_query(query)
if error:
return error
if document is not None:
raw_query_string = document.document_string
span.set_tag("graphql.query", raw_query_string)
span.set_tag("graphql.query_fingerprint", query_fingerprint(document))
try:
query_contains_schema = self.check_if_query_contains_only_schema(
document
)
except GraphQLError as e:
return ExecutionResult(errors=[e], invalid=True)
extra_options: Dict[str, Optional[Any]] = {}
if self.executor:
# We only include it optionally since
# executor is not a valid argument in all backends
extra_options["executor"] = self.executor
try:
with connection.execute_wrapper(tracing_wrapper):
response = None
should_use_cache_for_scheme = query_contains_schema & (
not settings.DEBUG
)
if should_use_cache_for_scheme:
key = generate_cache_key(raw_query_string)
response = cache.get(key)
if not response:
response = document.execute( # type: ignore
root=self.get_root_value(),
variables=variables,
operation_name=operation_name,
context=request,
middleware=self.middleware,
**extra_options,
)
if should_use_cache_for_scheme:
cache.set(key, response)
return response
except Exception as e:
span.set_tag(opentracing.tags.ERROR, True)
# In the graphql-core version that we are using,
# the Exception is raised for too big integers value.
# As it's a validation error we want to raise GraphQLError instead.
if str(e).startswith(INT_ERROR_MSG) or isinstance(e, ValueError):
e = GraphQLError(str(e))
return ExecutionResult(errors=[e], invalid=True)
@staticmethod
def parse_body(request: HttpRequest):
content_type = request.content_type
if content_type == "application/graphql":
return {"query": request.body.decode("utf-8")}
if content_type == "application/json":
body = request.body.decode("utf-8")
return json.loads(body)
if content_type in ["application/x-www-form-urlencoded", "multipart/form-data"]:
return request.POST
return {}
@staticmethod
def get_graphql_params(request: HttpRequest, data: dict):
query = data.get("query")
variables = data.get("variables")
operation_name = data.get("operationName")
if operation_name == "null":
operation_name = None
if request.content_type == "multipart/form-data":
operations = json.loads(data.get("operations", "{}"))
files_map = json.loads(data.get("map", "{}"))
for file_key in files_map:
# file key is which file it is in the form-data
file_instances = files_map[file_key]
for file_instance in file_instances:
obj_set(operations, file_instance, file_key, False)
query = operations.get("query")
variables = operations.get("variables")
return query, variables, operation_name
@classmethod
def format_error(cls, error):
if isinstance(error, GraphQLError):
result = format_graphql_error(error)
else:
result = {"message": str(error)}
exc = error
while isinstance(exc, GraphQLError) and hasattr(exc, "original_error"):
exc = exc.original_error
if isinstance(exc, AssertionError):
exc = GraphQLError(str(exc))
if isinstance(exc, cls.HANDLED_EXCEPTIONS):
handled_errors_logger.info("A query had an error", exc_info=exc)
else:
unhandled_errors_logger.error("A query failed unexpectedly", exc_info=exc)
result["extensions"] = {"exception": {"code": type(exc).__name__}}
if settings.DEBUG:
lines = []
if isinstance(exc, BaseException):
for line in traceback.format_exception(
type(exc), exc, exc.__traceback__
):
lines.extend(line.rstrip().splitlines())
result["extensions"]["exception"]["stacktrace"] = lines
return result
def get_key(key):
try:
int_key = int(key)
except (TypeError, ValueError):
return key
else:
return int_key
def get_shallow_property(obj, prop):
if isinstance(prop, int):
return obj[prop]
try:
return obj.get(prop)
except AttributeError:
return None
def obj_set(obj, path, value, do_not_replace):
if isinstance(path, int):
path = [path]
if not path:
return obj
if isinstance(path, str):
new_path = [get_key(part) for part in path.split(".")]
return obj_set(obj, new_path, value, do_not_replace)
current_path = path[0]
current_value = get_shallow_property(obj, current_path)
if len(path) == 1:
if current_value is None or not do_not_replace:
obj[current_path] = value
if current_value is None:
try:
if isinstance(path[1], int):
obj[current_path] = []
else:
obj[current_path] = {}
except IndexError:
pass
return obj_set(obj[current_path], path[1:], value, do_not_replace)
def generate_cache_key(raw_query: str) -> str:
hashed_query = hashlib.sha256(str(raw_query).encode("utf-8")).hexdigest()
return f"{saleor_version}-{hashed_query}"
| 39.556872 | 88 | 0.606841 |
c638c9a8a092afe2f062d8c1f531452959acd10d | 2,885 | py | Python | Engine/Communication/sender/serial_command_sender.py | RoboCupULaval/StrategyAI | ccddde144f2c0a67113d2e5ffe7c75ed9d4a3d19 | [
"MIT"
] | 13 | 2018-03-14T10:20:10.000Z | 2021-12-10T05:36:47.000Z | Engine/Communication/sender/serial_command_sender.py | RoboCupULaval/StrategyIA | ccddde144f2c0a67113d2e5ffe7c75ed9d4a3d19 | [
"MIT"
] | 200 | 2016-04-29T23:13:01.000Z | 2018-03-13T14:36:39.000Z | Engine/Communication/sender/serial_command_sender.py | RoboCupULaval/StrategyIA | ccddde144f2c0a67113d2e5ffe7c75ed9d4a3d19 | [
"MIT"
] | 45 | 2015-07-04T18:57:39.000Z | 2018-01-11T16:11:13.000Z | # Under MIT License, see LICENSE.txt
from typing import Union
from pyhermes import McuCommunicator
from Engine.Communication.sender.sender_base_class import Sender
from Engine.Controller.robot import MAX_LINEAR_SPEED, MAX_ANGULAR_SPEED
from Util.constant import KickForce, DribbleState
from Util.geometry import clamp
import numpy as np
class SerialCommandSender(Sender):
def connect(self, connection_info):
return McuCommunicator(timeout=0.1)
def send_packet(self, packets_frame):
try:
for packet in packets_frame.packet:
if np.isnan(packet.command.x) or \
np.isnan(packet.command.y) or \
np.isnan(packet.command.orientation):
continue
cx = clamp(packet.command.x, -MAX_LINEAR_SPEED, MAX_LINEAR_SPEED)
cy = clamp(packet.command.y, -MAX_LINEAR_SPEED, MAX_LINEAR_SPEED)
orien = clamp(packet.command.orientation, -MAX_ANGULAR_SPEED, MAX_ANGULAR_SPEED)
self.connection.sendSpeedAdvance(packet.robot_id,
cx / 1000,
cy / 1000,
orien,
packet.charge_kick,
self.translate_kick_force(packet.kick_force),
self.translate_dribbler_speed(packet.dribbler_state))
except AttributeError:
raise RuntimeError("You should update your pyhermes, by reinstalling the requirement:"
"'pip install -r requirements.txt --upgrade'")
@staticmethod
def translate_kick_force(kick_force: Union[KickForce, float]) -> int:
# command = speed / 0.1536 + 0.61 / 0.1536
# The plage of usable value is 12 to 30, after 30 the force stay the same, the minimum speed is 1 m/s
if isinstance(kick_force, float):
kick_force_translated = int(clamp(kick_force / 0.1536 + 0.61 / 0.1536, 12, 30))
elif isinstance(kick_force, KickForce):
kick_force_translated = {
KickForce.NONE: 0,
KickForce.LOW: 10, # 1 m/s
KickForce.MEDIUM: 18, # 2 m/s
KickForce.HIGH: 60 # 5.5 m/s
}.get(kick_force)
else:
raise RuntimeError(f"Kick force : {kick_force} is not a KickForce or an int")
return kick_force_translated
@staticmethod
def translate_dribbler_speed(dribbler_speed: DribbleState) -> int:
dribbler_translation = {DribbleState.AUTOMATIC: 0,
DribbleState.FORCE_STOP: 0,
DribbleState.FORCE_SPIN: 3}
return dribbler_translation[dribbler_speed]
| 46.532258 | 110 | 0.577816 |
68775106033d261843f6f84b3776a79b2fb9080a | 1,633 | py | Python | Audio Processing/led_driver.py | wyager/LEDStrip | 552179dbadadf4360a13ce452a922b4935b8e402 | [
"BSD-2-Clause"
] | 42 | 2015-01-01T00:28:28.000Z | 2021-12-01T03:56:08.000Z | Audio Processing/led_driver.py | wyager/LEDStrip | 552179dbadadf4360a13ce452a922b4935b8e402 | [
"BSD-2-Clause"
] | 4 | 2015-01-12T21:48:29.000Z | 2016-05-09T23:55:35.000Z | Audio Processing/led_driver.py | wyager/LEDStrip | 552179dbadadf4360a13ce452a922b4935b8e402 | [
"BSD-2-Clause"
] | 7 | 2015-01-18T19:50:06.000Z | 2017-09-22T03:17:33.000Z | # Will Yager
# This Python script sends color/brightness data based on
# ambient sound frequencies to the LEDs.
import pyaudio as pa
import numpy as np
import sys
import serial
# Output values max at 1.0
import notes_scaled_nosaturation
import lavalamp_colors
audio_stream = pa.PyAudio().open(format=pa.paInt16, \
channels=2, \
rate=44100, \
input=True, \
# Uncomment and set this using find_input_devices.py
# if default input device is not correct
#input_device_index=2, \
frames_per_buffer=1024)
# Convert the audio data to numbers, num_samples at a time.
def read_audio(audio_stream, num_samples):
while True:
# Read all the input data.
samples = audio_stream.read(num_samples)
# Convert input data to numbers
samples = np.fromstring(samples, dtype=np.int16).astype(np.float)
samples_l = samples[::2]
samples_r = samples[1::2]
yield (samples_l, samples_r)
teensy_file = "/dev/ttyACM0"
teensy = serial.Serial(teensy_file, 115200)
def send_to_teensy(strip):
command = [(((i<<2)+0x80,r),((i<<2)+0x81,g),((i<<2)+0x82,b))
for (i,(r,g,b)) in enumerate(strip)]
command = ''.join(chr(ri)+chr(r)+chr(gi)+chr(g)+chr(bi)+chr(b)
for (ri,r),(gi,g),(bi,b) in command)
teensy.write(command)
if __name__ == '__main__':
audio = read_audio(audio_stream, num_samples=512)
leds = notes_scaled_nosaturation.process(audio, num_leds=32, num_samples=512, sample_rate=44100)
colors = lavalamp_colors.colorize(leds, num_leds=32)
for strip in colors:
# for r,g,b in strip:
# sys.stdout.write("r"*r + "g"*g + "b"*b + "\n")
# print
send_to_teensy(strip) | 30.811321 | 97 | 0.695652 |
2904ac42894c9acaadc8d3ada5d7227eb182bd61 | 20,889 | py | Python | src/pybit/connection.py | stealth-startup/pybit | 4447303813138dee0b7768c92db7c7781128bed3 | [
"MIT"
] | null | null | null | src/pybit/connection.py | stealth-startup/pybit | 4447303813138dee0b7768c92db7c7781128bed3 | [
"MIT"
] | null | null | null | src/pybit/connection.py | stealth-startup/pybit | 4447303813138dee0b7768c92db7c7781128bed3 | [
"MIT"
] | null | null | null | # Copyright (c) 2013 Rex <fdrex1987@gmail.com>
# Copyright (c) 2010 Witchspace <witchspace81@gmail.com>
"""
Connect to Bitcoin server via JSON-RPC.
"""
from pybit.proxy import AuthServiceProxy
from pybit.exceptions import wrap_exception, BitcoinException, WalletPassphraseIncorrect, WalletAlreadyUnlocked
from pybit.types import ServerInfo, AccountInfo, AddressInfo, TransactionInfo, AddressValidation, WorkItem, MiningInfo
class BitcoinConnection(object):
"""
A BitcoinConnection object defines a connection to a bitcoin server.
It is a thin wrapper around a JSON-RPC API connection.
Up-to-date for SVN revision 198.
Arguments to constructor:
- *user* -- Authenticate as user.
- *password* -- Authentication password.
- *host* -- Bitcoin JSON-RPC host.
- *port* -- Bitcoin JSON-RPC port.
"""
def __init__(self, user, password, host='localhost', port=8332,
use_https=False):
"""
Create a new bitcoin server connection.
"""
url = 'http{s}://{user}:{password}@{host}:{port}/'.format(
s='s' if use_https else '',
user=user, password=password, host=host, port=port)
self.url = url
self.proxy = AuthServiceProxy(url, exception_wrapper=wrap_exception)
def stop(self):
"""
Stop bitcoin server.
"""
self.proxy.stop()
def getblock(self, hash):
"""
Returns information about the given block hash.
"""
return self.proxy.getblock(hash)
def getblockcount(self):
"""
Returns the number of blocks in the longest block chain.
"""
return self.proxy.getblockcount()
def getblockhash(self, index):
"""
Returns hash of block in best-block-chain at index.
:param index: index ob the block
"""
return self.proxy.getblockhash(index)
def getblocknumber(self):
"""
Returns the block number of the latest block in the longest block chain.
Deprecated. Use getblockcount instead.
"""
return self.getblockcount()
def getconnectioncount(self):
"""
Returns the number of connections to other nodes.
"""
return self.proxy.getconnectioncount()
def getdifficulty(self):
"""
Returns the proof-of-work difficulty as a multiple of the minimum difficulty.
"""
return self.proxy.getdifficulty()
def getgenerate(self):
"""
Returns :const:`True` or :const:`False`, depending on whether generation is enabled.
"""
return self.proxy.getgenerate()
def setgenerate(self, generate, genproclimit=None):
"""
Enable or disable generation (mining) of coins.
Arguments:
- *generate* -- is :const:`True` or :const:`False` to turn generation on or off.
- *genproclimit* -- Number of processors that are used for generation, -1 is unlimited.
"""
if genproclimit is None:
return self.proxy.setgenerate(generate)
else:
return self.proxy.setgenerate(generate, genproclimit)
def gethashespersec(self):
"""
Returns a recent hashes per second performance measurement while generating.
"""
return self.proxy.gethashespersec()
def getinfo(self):
"""
Returns an :class:`~bitcoinrpc.data.ServerInfo` object containing various state info.
"""
return ServerInfo(**self.proxy.getinfo())
def getmininginfo(self):
"""
Returns an :class:`~bitcoinrpc.data.MiningInfo` object containing various
mining state info.
"""
return MiningInfo(**self.proxy.getmininginfo())
def getnewaddress(self, account=None):
"""
Returns a new bitcoin address for receiving payments.
Arguments:
- *account* -- If account is specified (recommended), it is added to the address book
so that payments received with the address will be credited to it.
"""
if account is None:
return self.proxy.getnewaddress()
else:
return self.proxy.getnewaddress(account)
def getaccountaddress(self, account):
"""
Returns the current bitcoin address for receiving payments to an account.
Arguments:
- *account* -- Account for which the address should be returned.
"""
return self.proxy.getaccountaddress(account)
def setaccount(self, bitcoinaddress, account):
"""
Sets the account associated with the given address.
Arguments:
- *bitcoinaddress* -- Bitcoin address to associate.
- *account* -- Account to associate the address to.
"""
return self.proxy.setaccount(bitcoinaddress, account)
def getaccount(self, bitcoinaddress):
"""
Returns the account associated with the given address.
Arguments:
- *bitcoinaddress* -- Bitcoin address to get account for.
"""
return self.proxy.getaccount(bitcoinaddress)
def getaddressesbyaccount(self, account):
"""
Returns the list of addresses for the given account.
Arguments:
- *account* -- Account to get list of addresses for.
"""
return self.proxy.getaddressesbyaccount(account)
def sendtoaddress(self, bitcoinaddress, amount, comment=None, comment_to=None):
"""
Sends *amount* from the server's available balance to *bitcoinaddress*.
Arguments:
- *bitcoinaddress* -- Bitcoin address to send to.
- *amount* -- Amount to send (float, rounded to the nearest 0.01).
- *minconf* -- Minimum number of confirmations required for transferred balance.
- *comment* -- Comment for transaction.
- *comment_to* -- Comment for to-address.
"""
if comment is None:
return self.proxy.sendtoaddress(bitcoinaddress, amount)
elif comment_to is None:
return self.proxy.sendtoaddress(bitcoinaddress, amount, comment)
else:
return self.proxy.sendtoaddress(bitcoinaddress, amount, comment, comment_to)
def getreceivedbyaddress(self, bitcoinaddress, minconf=1):
"""
Returns the total amount received by a bitcoin address in transactions with at least a
certain number of confirmations.
Arguments:
- *bitcoinaddress* -- Address to query for total amount.
- *minconf* -- Number of confirmations to require, defaults to 1.
"""
return self.proxy.getreceivedbyaddress(bitcoinaddress, minconf)
def getreceivedbyaccount(self, account, minconf=1):
"""
Returns the total amount received by addresses with an account in transactions with
at least a certain number of confirmations.
Arguments:
- *account* -- Account to query for total amount.
- *minconf* -- Number of confirmations to require, defaults to 1.
"""
return self.proxy.getreceivedbyaccount(account, minconf)
def gettransaction(self, txid):
"""
Get detailed information about transaction
Arguments:
- *txid* -- Transactiond id for which the info should be returned
"""
return TransactionInfo(**self.proxy.gettransaction(txid))
def getrawtransaction(self, txid, verbose=True):
"""
Get transaction raw info
Arguments:
- *txid* -- Transactiond id for which the info should be returned.
- *verbose* -- If False, return only the "hex" of the transaction.
"""
return self.proxy.getrawtransaction(txid, int(verbose))
def createrawtransaction(self, inputs, outputs):
"""
Creates a raw transaction spending given inputs
(a list of dictionaries, each containing a transaction id and an output number),
sending to given address(es).
Returns hex-encoded raw transaction.
Example usage:
>>> conn.createrawtransaction(
[{"txid": "a9d4599e15b53f3eb531608ddb31f48c695c3d0b3538a6bda871e8b34f2f430c",
"vout": 0}],
{"mkZBYBiq6DNoQEKakpMJegyDbw2YiNQnHT":50})
Arguments:
- *inputs* -- A list of {"txid": txid, "vout": n} dictionaries.
- *outputs* -- A dictionary mapping (public) addresses to the amount
they are to be paid.
"""
return self.proxy.createrawtransaction(inputs, outputs)
def signrawtransaction(self, hexstring, previous_transactions=None, private_keys=None):
"""
Sign inputs for raw transaction (serialized, hex-encoded).
Returns a dictionary with the keys:
"hex": raw transaction with signature(s) (hex-encoded string)
"complete": 1 if transaction has a complete set of signature(s), 0 if not
Arguments:
- *hexstring* -- A hex string of the transaction to sign.
- *previous_transactions* -- A (possibly empty) list of dictionaries of the form:
{"txid": txid, "vout": n, "scriptPubKey": hex, "redeemScript": hex}, representing
previous transaction outputs that this transaction depends on but may not yet be
in the block chain.
- *private_keys* -- A (possibly empty) list of base58-encoded private
keys that, if given, will be the only keys used to sign the transaction.
"""
return self.proxy.signrawtransaction(hexstring, previous_transactions, private_keys)
def sendrawtransaction(self, hexstring):
"""
send signed rawtransaction to the Bitcoin network
returns transaction hash, or an error if the transaction is invalid for any reason
"""
return self.proxy.sendrawtransaction(hexstring)
def decoderawtransaction(self, hexstring):
"""
Produces a human-readable JSON object for a raw transaction.
Arguments:
- *hexstring* -- A hex string of the transaction to be decoded.
"""
return dict(self.proxy.decoderawtransaction(hexstring))
def listsinceblock(self, block_hash):
res = self.proxy.listsinceblock(block_hash)
res['transactions'] = [TransactionInfo(**x) for x in res['transactions']]
return res
def listreceivedbyaddress(self, minconf=1, includeempty=False):
"""
Returns a list of addresses.
Each address is represented with a :class:`~bitcoinrpc.data.AddressInfo` object.
Arguments:
- *minconf* -- Minimum number of confirmations before payments are included.
- *includeempty* -- Whether to include addresses that haven't received any payments.
"""
return [AddressInfo(**x) for x in
self.proxy.listreceivedbyaddress(minconf, includeempty)]
def listaccounts(self, minconf=1, as_dict=True):
"""
Returns a list of account names.
Arguments:
- *minconf* -- Minimum number of confirmations before payments are included.
- *as_dict* -- Returns a dictionary of account names, with their balance as values.
"""
if as_dict:
return dict(self.proxy.listaccounts(minconf))
else:
return self.proxy.listaccounts(minconf).keys()
def listreceivedbyaccount(self, minconf=1, includeempty=False):
"""
Returns a list of accounts.
Each account is represented with a :class:`~bitcoinrpc.data.AccountInfo` object.
Arguments:
- *minconf* -- Minimum number of confirmations before payments are included.
- *includeempty* -- Whether to include addresses that haven't received any payments.
"""
return [AccountInfo(**x) for x in
self.proxy.listreceivedbyaccount(minconf, includeempty)]
def listtransactions(self, account=None, count=10, from_=0, address=None):
"""
Returns a list of the last transactions for an account.
Each transaction is represented with a :class:`~bitcoinrpc.data.TransactionInfo` object.
Arguments:
- *account* -- Account to list transactions from. Return transactions from
all accounts if None.
- *count* -- Number of transactions to return.
- *from_* -- Skip the first <from_> transactions.
- *address* -- Receive address to consider
"""
accounts = [account] if account is not None else self.listaccounts(as_dict=True).iterkeys()
return [TransactionInfo(**tx) for acc in accounts for
tx in self.proxy.listtransactions(acc, count, from_) if
address is None or tx["address"] == address]
def backupwallet(self, destination):
"""
Safely copies ``wallet.dat`` to *destination*, which can be a directory or a path
with filename.
Arguments:
- *destination* -- directory or path with filename to backup wallet to.
"""
return self.proxy.backupwallet(destination)
def validateaddress(self, validateaddress):
"""
Validate a bitcoin address and return information for it.
The information is represented by a :class:`~bitcoinrpc.data.AddressValidation` object.
Arguments: -- Address to validate.
- *validateaddress*
"""
return AddressValidation(**self.proxy.validateaddress(validateaddress))
def getbalance(self, account=None, minconf=None):
"""
Get the current balance, either for an account or the total server balance.
Arguments:
- *account* -- If this parameter is specified, returns the balance in the account.
- *minconf* -- Minimum number of confirmations required for transferred balance.
"""
args = []
if account is not None:
args.append(account)
if minconf is not None:
args.append(minconf)
return self.proxy.getbalance(*args)
def move(self, fromaccount, toaccount, amount, minconf=1, comment=None):
"""
Move from one account in your wallet to another.
Arguments:
- *fromaccount* -- Source account name.
- *toaccount* -- Destination account name.
- *amount* -- Amount to transfer.
- *minconf* -- Minimum number of confirmations required for transferred balance.
- *comment* -- Comment to add to transaction log.
"""
if comment is None:
return self.proxy.move(fromaccount, toaccount, amount, minconf)
else:
return self.proxy.move(fromaccount, toaccount, amount, minconf, comment)
def sendfrom(self, fromaccount, tobitcoinaddress, amount, minconf=1, comment=None,
comment_to=None):
"""
Sends amount from account's balance to bitcoinaddress. This method will fail
if there is less than amount bitcoins with minconf confirmations in the account's
balance (unless account is the empty-string-named default account; it
behaves like the sendtoaddress method). Returns transaction ID on success.
Arguments:
- *fromaccount* -- Account to send from.
- *tobitcoinaddress* -- Bitcoin address to send to.
- *amount* -- Amount to send (float, rounded to the nearest 0.01).
- *minconf* -- Minimum number of confirmations required for transferred balance.
- *comment* -- Comment for transaction.
- *comment_to* -- Comment for to-address.
"""
if comment is None:
return self.proxy.sendfrom(fromaccount, tobitcoinaddress, amount, minconf)
elif comment_to is None:
return self.proxy.sendfrom(fromaccount, tobitcoinaddress, amount, minconf, comment)
else:
return self.proxy.sendfrom(fromaccount, tobitcoinaddress, amount, minconf,
comment, comment_to)
def sendmany(self, fromaccount, todict, minconf=1, comment=None):
"""
Sends specified amounts from account's balance to bitcoinaddresses. This method will fail
if there is less than total amount bitcoins with minconf confirmations in the account's
balance (unless account is the empty-string-named default account; Returns transaction ID
on success.
Arguments:
- *fromaccount* -- Account to send from.
- *todict* -- Dictionary with Bitcoin addresses as keys and amounts as values.
- *minconf* -- Minimum number of confirmations required for transferred balance.
- *comment* -- Comment for transaction.
"""
if comment is None:
return self.proxy.sendmany(fromaccount, todict, minconf)
else:
return self.proxy.sendmany(fromaccount, todict, minconf, comment)
def verifymessage(self, bitcoinaddress, signature, message):
"""
Verifies a signature given the bitcoinaddress used to sign,
the signature itself, and the message that was signed.
Returns :const:`True` if the signature is valid, and :const:`False` if it is invalid.
Arguments:
- *bitcoinaddress* -- the bitcoinaddress used to sign the message
- *signature* -- the signature to be verified
- *message* -- the message that was originally signed
"""
return self.proxy.verifymessage(bitcoinaddress, signature, message)
def getwork(self, data=None):
"""
Get work for remote mining, or submit result.
If data is specified, the server tries to solve the block
using the provided data and returns :const:`True` if it was successful.
If not, the function returns formatted hash data (:class:`~bitcoinrpc.data.WorkItem`)
to work on.
Arguments:
- *data* -- Result from remote mining.
"""
if data is None:
# Only if no data provided, it returns a WorkItem
return WorkItem(**self.proxy.getwork())
else:
return self.proxy.getwork(data)
def listunspent(self, minconf=1, maxconf=999999):
"""
Returns a list of unspent transaction inputs in the wallet.
Arguments:
- *minconf* -- Minimum number of confirmations required to be listed.
- *maxconf* -- Maximal number of confirmations allowed to be listed.
"""
return [TransactionInfo(**tx) for tx in
self.proxy.listunspent(minconf, maxconf)]
def keypoolrefill(self):
"Fills the keypool, requires wallet passphrase to be set."
self.proxy.keypoolrefill()
def walletpassphrase(self, passphrase, timeout, dont_raise=False):
"""
Stores the wallet decryption key in memory for <timeout> seconds.
- *passphrase* -- The wallet passphrase.
- *timeout* -- Time in seconds to keep the wallet unlocked
(by keeping the passphrase in memory).
- *dont_raise* -- instead of raising `~bitcoinrpc.exceptions.WalletPassphraseIncorrect`
return False.
"""
try:
self.proxy.walletpassphrase(passphrase, timeout)
return True
except BitcoinException as exception:
if dont_raise:
if isinstance(exception, WalletPassphraseIncorrect):
return False
elif isinstance(exception, WalletAlreadyUnlocked):
return True
raise exception
def walletlock(self):
"""
Removes the wallet encryption key from memory, locking the wallet.
After calling this method, you will need to call walletpassphrase
again before being able to call any methods which require the wallet
to be unlocked.
"""
return self.proxy.walletlock()
def walletpassphrasechange(self, oldpassphrase, newpassphrase, dont_raise=False):
"""
Changes the wallet passphrase from <oldpassphrase> to <newpassphrase>.
Arguments:
- *dont_raise* -- instead of raising `~bitcoinrpc.exceptions.WalletPassphraseIncorrect`
return False.
"""
try:
self.proxy.walletpassphrasechange(oldpassphrase, newpassphrase)
return True
except BitcoinException as exception:
if dont_raise and isinstance(exception, WalletPassphraseIncorrect):
return False
raise exception
def dumpprivkey(self, address):
"""
Returns the private key belonging to <address>.
Arguments:
- *address* -- Bitcoin address whose private key should be returned.
"""
return self.proxy.dumpprivkey(address)
| 35.166667 | 118 | 0.630954 |
572d43f645d213342b79a33a8babc2cd98de58f0 | 408 | py | Python | oi/loj/P6433/hack.py | Riteme/test | b511d6616a25f4ae8c3861e2029789b8ee4dcb8d | [
"BSD-Source-Code"
] | 3 | 2018-08-30T09:43:20.000Z | 2019-12-03T04:53:43.000Z | oi/loj/P6433/hack.py | Riteme/test | b511d6616a25f4ae8c3861e2029789b8ee4dcb8d | [
"BSD-Source-Code"
] | null | null | null | oi/loj/P6433/hack.py | Riteme/test | b511d6616a25f4ae8c3861e2029789b8ee4dcb8d | [
"BSD-Source-Code"
] | null | null | null | #!/usr/bin/env pypy
from os import *
def sh(x):
assert not system(x)
sh('cp brute.cpp /tmp')
sh('cp main.cpp /tmp')
sh('cp gen.py /tmp')
chdir('/tmp')
sh('g++ main.cpp -O3 -o a.out')
sh('g++ brute.cpp -O3 -o b.out')
cnt = 0
while True:
cnt += 1
print cnt
sh('./gen.py 9 100 > data.in')
sh('./a.out < data.in > a.ans')
sh('./b.out < data.in > b.ans')
sh('diff -Bb a.ans b.ans')
| 18.545455 | 35 | 0.541667 |
a8a549260fca4ab33778fb072a160631e7ec227f | 427 | py | Python | ginger/scripts/templates/project_templates/project_name/urls.py | vivsh/django-ginger | d293109becc72845a23f2aeb732ed808a7a67d69 | [
"MIT"
] | null | null | null | ginger/scripts/templates/project_templates/project_name/urls.py | vivsh/django-ginger | d293109becc72845a23f2aeb732ed808a7a67d69 | [
"MIT"
] | null | null | null | ginger/scripts/templates/project_templates/project_name/urls.py | vivsh/django-ginger | d293109becc72845a23f2aeb732ed808a7a67d69 | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.conf import settings
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include("{{project_name}}.registration.urls")),
url(r'', include("{{project_name}}.main.urls")),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 35.583333 | 70 | 0.735363 |
74ee5b7b57bda48ea65bd7cc70672e7de3e48b7b | 12,424 | py | Python | precon_project/precon/models.py | kevinr/precon-project | 12975c6c21cfc4b4114c6b88b10ae114a0cf5e89 | [
"MIT"
] | null | null | null | precon_project/precon/models.py | kevinr/precon-project | 12975c6c21cfc4b4114c6b88b10ae114a0cf5e89 | [
"MIT"
] | null | null | null | precon_project/precon/models.py | kevinr/precon-project | 12975c6c21cfc4b4114c6b88b10ae114a0cf5e89 | [
"MIT"
] | null | null | null | import string, random
from datetime import datetime
from django.db import models
from django.db.models.query import QuerySet
from django.utils.safestring import mark_safe
def id_generator(size=6, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
class Participant(models.Model):
creation_time = models.DateTimeField(auto_now_add=True, editable=False)
modification_time = models.DateTimeField(auto_now=True, editable=False)
nonce = models.CharField(default=lambda: id_generator(size=6), unique=True, editable=False, max_length=6)
name = models.CharField(max_length=50, help_text="Your name, as you would like it to appear in any published material")
email = models.EmailField(max_length=50)
phone = models.CharField("Phone number", max_length=15, null=True, blank=True, help_text="If you're interested in presenting (as a panelist etc.), please give us a phone number so we can reach you during the convention if necessary.")
panel_proposals_responded = models.ManyToManyField('PanelProposal', through='PanelProposalResponse', related_name='participants_responded', null=True, blank=True)
slots_attending = models.ManyToManyField('Slot', verbose_name="At which of these times do you expect to be in attendance at Precon?", related_name='participants_attending', null=True, blank=True)
slots_available = models.ManyToManyField('Slot', verbose_name="At which of these times would you be available AND HAPPY to sit on panels?", related_name='participants_available', null=True, blank=True)
slots_maybe = models.ManyToManyField('Slot', verbose_name="At which of these times would you be available to sit on panels?", related_name='participants_maybe', null=True, blank=True)
anything_else = models.TextField("Anything else you'd like to tell us?", max_length=1000, null=True, blank=True)
MAX_PANELS_CHOICES = (
('0', '0'),
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
('6', '6'),
('7', '7'),
('8', '8'),
('9', '9'),
('10', '10'),
)
max_panels = models.CharField("How many panels/other events can we schedule you to present for at MAXIMUM?", max_length=10, choices=MAX_PANELS_CHOICES, default='0')
def responses(self):
return PanelProposalResponse.objects.filter(participant=self)
# XXX TODO FIXME
def max_panels_as_int(self):
"Yes this is a hack."
return int(self.max_panels)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
class Panelist(models.Model):
name = models.CharField(max_length=50, unique=True)
participant = models.ForeignKey(Participant, default=None, null=True, blank=True, related_name='panelists', on_delete=models.SET_NULL)
def __unicode__(self):
return self.participant and self.participant.name or self.name
def name_nbsp(self):
return mark_safe(' '.join(unicode(self).split(' ')))
def as_email_html(self):
return mark_safe("%s <%s>" % (self.name, self.participant.email))
def panels_by_slot(self):
return [ (slot, self.panels.filter(slot=slot)) for slot in Slot.objects.all() ]
def panels_moderating_by_slot(self):
return [ (slot, self.panels_moderating.filter(slot=slot)) for slot in Slot.objects.all() ]
class Meta:
ordering = ['name']
class PanelProposal(models.Model):
PANEL = 'Panel'
TALK = 'Talk'
WORKSHOP = 'Workshop'
DISCUSSION = 'Discussion'
TABLETOP = 'Tabletop Game'
TYPE_CHOICES = (
(PANEL, PANEL),
(TALK, TALK),
(WORKSHOP, WORKSHOP),
(DISCUSSION, DISCUSSION),
(TABLETOP, TABLETOP),
)
name = models.CharField(max_length=100, unique=True)
type = models.CharField(max_length=50, choices=TYPE_CHOICES, default=PANEL)
blurb = models.TextField(max_length=4000)
needs_panelists = models.BooleanField(default=True)
panelists = models.ManyToManyField(Panelist, related_name='panelproposals_panelist', null=True, blank=True)
suggested_by = models.ForeignKey(Panelist, related_name='panelproposals_suggested', null=True, blank=True)
def responses(self):
return PanelProposalResponse.objects.filter(panel_proposal=self)
def attending_score(self):
rs = self.responses()
return (rs.attending_definitely_interesteds().count() * 3) + (rs.attending_interesteds().count() * 2) + rs.attending_potentially_interesteds().count()
def negativity(self):
return self.responses().attending_actively_disinteresteds().count()
def __unicode__(self):
return "%s Proposal: \"%s\"" % (self.type, self.name,)
class Meta:
ordering = ['name']
class PanelProposalResponseQuerySet(QuerySet):
presenting_not_interesteds = lambda x: x.filter(presenting_interest=PanelProposalResponse.PRESENTING_NOT_INTERESTED)
presenting_all = lambda x: x.exclude(presenting_interest=PanelProposalResponse.PRESENTING_NOT_INTERESTED)
presenting_if_neededs = lambda x: x.filter(presenting_interest=PanelProposalResponse.PRESENTING_IF_NEEDED)
presenting_interesteds = lambda x: x.filter(presenting_interest=PanelProposalResponse.PRESENTING_INTERESTED)
presenting_pick_mes = lambda x: x.filter(presenting_interest=PanelProposalResponse.PRESENTING_PICK_ME)
presenting_suggesters = lambda x: x.filter(presenting_interest=PanelProposalResponse.PRESENTING_SUGGESTER)
attending_actively_disinteresteds = lambda x: x.filter(attending_interest=PanelProposalResponse.ATTENDING_ACTIVELY_DISINTERESTED)
attending_not_interesteds = lambda x: x.filter(attending_interest=PanelProposalResponse.ATTENDING_NOT_INTERESTED)
attending_potentially_interesteds = lambda x: x.filter(attending_interest=PanelProposalResponse.ATTENDING_POTENTIALLY_INTERESTED)
attending_interesteds = lambda x: x.filter(attending_interest=PanelProposalResponse.ATTENDING_INTERESTED)
attending_definitely_interesteds = lambda x: x.filter(attending_interest=PanelProposalResponse.ATTENDING_DEFINITELY_INTERESTED)
class PanelProposalResponseManager(models.Manager):
def get_query_set(self):
return PanelProposalResponseQuerySet(self.model)
def __getattr__(self, name):
return getattr(self.get_query_set(), name)
class PanelProposalResponse(models.Model):
PRESENTING_NOT_INTERESTED = 'not interested in presenting'
PRESENTING_IF_NEEDED = 'could be a presenter if needed'
PRESENTING_INTERESTED = 'would be interested in presenting'
PRESENTING_PICK_ME = 'would like to present'
PRESENTING_SUGGESTER = 'I suggested this, and I would like to present'
PRESENTING_INTEREST_CHOICES = (
(PRESENTING_NOT_INTERESTED, PRESENTING_NOT_INTERESTED),
(PRESENTING_IF_NEEDED, PRESENTING_IF_NEEDED),
(PRESENTING_INTERESTED, PRESENTING_INTERESTED),
(PRESENTING_PICK_ME, PRESENTING_PICK_ME),
(PRESENTING_SUGGESTER, PRESENTING_SUGGESTER),
)
ATTENDING_ACTIVELY_DISINTERESTED = 'actively disinterested in attending'
ATTENDING_NOT_INTERESTED = 'not interested in attending'
ATTENDING_POTENTIALLY_INTERESTED = 'might attend'
ATTENDING_INTERESTED = 'will likely attend'
ATTENDING_DEFINITELY_INTERESTED = 'will definitely attend'
ATTENDING_INTEREST_CHOICES = (
(ATTENDING_ACTIVELY_DISINTERESTED, ATTENDING_ACTIVELY_DISINTERESTED),
(ATTENDING_NOT_INTERESTED, ATTENDING_NOT_INTERESTED),
(ATTENDING_POTENTIALLY_INTERESTED, ATTENDING_POTENTIALLY_INTERESTED),
(ATTENDING_INTERESTED, ATTENDING_INTERESTED),
(ATTENDING_DEFINITELY_INTERESTED, ATTENDING_DEFINITELY_INTERESTED),
)
creation_time = models.DateTimeField(auto_now_add=True, editable=False)
modification_time = models.DateTimeField(auto_now=True, editable=False)
participant = models.ForeignKey(Participant)
panel_proposal = models.ForeignKey(PanelProposal)
attending_interest = models.CharField("How interested would you be in attending this event?", max_length=50, choices=ATTENDING_INTEREST_CHOICES, default=ATTENDING_NOT_INTERESTED)
presenting_interest = models.CharField("How interested would you be in presenting at this event?", max_length=50, choices=PRESENTING_INTEREST_CHOICES, default=PRESENTING_NOT_INTERESTED)
presenting_comments = models.TextField("What (if applicable) makes you interested in presenting at this event?", max_length=1000, null=True, blank=True, help_text="If you suggested this and said so in the field above, you don't need to fill this out.")
attending_comments = models.TextField("Any comments?", max_length=1000, null=True, blank=True)
# managers
objects = PanelProposalResponseManager()
def __unicode__(self):
return "Response: \"%s\": %s" % (self.panel_proposal.name, self.participant)
class Panel(models.Model):
PANEL = 'Panel'
PANEL_PRESENTER = 'Panelist'
TALK = 'Talk'
TALK_PRESENTER = 'Speaker'
WORKSHOP = 'Workshop'
WORKSHOP_PRESENTER = 'Leader'
DISCUSSION = 'Discussion'
DISCUSSION_PRESENTER = 'Facilitator'
TABLETOP = 'Tabletop Game'
TABLETOP_PRESENTER = 'GM'
TYPE_CHOICES = (
(PANEL, PANEL),
(TALK, TALK),
(WORKSHOP, WORKSHOP),
(DISCUSSION, DISCUSSION),
(TABLETOP, TABLETOP),
)
PRESENTER_TYPES = {
PANEL: PANEL_PRESENTER,
TALK: TALK_PRESENTER,
WORKSHOP: WORKSHOP_PRESENTER,
DISCUSSION: DISCUSSION_PRESENTER,
TABLETOP: TABLETOP_PRESENTER,
}
type = models.CharField(max_length=50, choices=TYPE_CHOICES, default=PANEL)
name = models.CharField(max_length=100, unique=True)
blurb = models.TextField(max_length=4000)
panelists = models.ManyToManyField(Panelist, related_name='panels', null=True, blank=True)
slot = models.ManyToManyField('Slot', related_name='panels', null=True, blank=True)
room = models.ForeignKey('Room', related_name='panels', null=True, blank=True)
panel_proposal = models.ForeignKey('PanelProposal', related_name='panels_accepted', null=True, blank=True)
moderator = models.ForeignKey(Panelist, related_name='panels_moderating', null=True, blank=True)
needs_projector = models.BooleanField('Needs a projector?')
def __unicode__(self):
return "\"%s\"" % (self.name,)
def anchor(self):
return "%d" % (self.id,)
def panelists_nbsp(self):
panelists = [panelist for panelist in self.panelists.all()]
panelist_names = []
if self.moderator and self.moderator in panelists:
panelists.remove(self.moderator)
panelist_names.append(mark_safe("%s (m)" % (self.moderator.name_nbsp(),)))
panelist_names.extend([panelist.name_nbsp() for panelist in panelists])
return panelist_names
def panelists_as_email_html(self):
return [panelist.as_email_html() for panelist in self.panelists.all()]
def presenter_type(self):
return self.PRESENTER_TYPES[self.type]
def length(self):
return self.slot.count()
class Meta:
ordering = ['name']
class Schedule(models.Model):
name = models.CharField(max_length=20, unique=True)
def __unicode__(self):
return self.name
class Day(models.Model):
name = models.CharField(max_length=20)
def __unicode__(self):
return self.name
class Slot(models.Model):
schedule = models.ForeignKey(Schedule, related_name='slots')
name = models.CharField(max_length=20)
day = models.ForeignKey('Day', related_name='slots', null=True, blank=True)
def __unicode__(self):
return self.name
def get_panel_for_room(self, room):
for panel in self.panels.all():
if panel.room == room:
return panel
return None
class Room(models.Model):
schedule = models.ForeignKey(Schedule, related_name='rooms')
name = models.CharField(max_length=20)
def __unicode__(self):
return self.name
class Change(models.Model):
description = models.TextField(max_length=4000)
def __unicode__(self):
return self.description
class Meta:
ordering = ['-id']
class SiteConfig(models.Model):
current_schedule = models.ForeignKey(Schedule, default=None, null=True, blank=True, on_delete=models.SET_NULL)
| 42.258503 | 256 | 0.721265 |
5758e56521d675cf05a3f30368bddcb2ed79c429 | 924 | py | Python | app/main/forms.py | cecibarasa/JVUNE | 6fb8d40538852f12b4b683a45d8f4457a85a3657 | [
"MIT"
] | 2 | 2020-06-19T18:48:47.000Z | 2021-06-12T22:08:12.000Z | app/main/forms.py | cecibarasa/JVUNE | 6fb8d40538852f12b4b683a45d8f4457a85a3657 | [
"MIT"
] | null | null | null | app/main/forms.py | cecibarasa/JVUNE | 6fb8d40538852f12b4b683a45d8f4457a85a3657 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField,SelectField
from wtforms.validators import Required, Email, Length
class BlogForm(FlaskForm):
author = StringField('Author', validators = [Required()])
text = TextAreaField('Blog',validators = [Required()])
submit = SubmitField('Post')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Update Bio', validators = [Required()])
submit = SubmitField('Submit')
class CommentForm(FlaskForm):
name = StringField('Your name', validators = [Required(), Length(min = 3, max = 20)])
text = TextAreaField('Leave a Comment',validators = [Required()])
submit = SubmitField('Add Comment')
class SubscriberForm(FlaskForm):
name = StringField('Your name', validators = [Required()])
email = StringField('Your email address', validators = [Required(), Email()])
submit = SubmitField('Subscribe') | 42 | 89 | 0.713203 |
f42a2c66d69161d42c0e397980388505590e78b0 | 18,305 | py | Python | sdk/python/pulumi_azure_native/network/virtual_network_peering.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/virtual_network_peering.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/virtual_network_peering.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['VirtualNetworkPeering']
class VirtualNetworkPeering(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_forwarded_traffic: Optional[pulumi.Input[bool]] = None,
allow_gateway_transit: Optional[pulumi.Input[bool]] = None,
allow_virtual_network_access: Optional[pulumi.Input[bool]] = None,
do_not_verify_remote_gateways: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
peering_state: Optional[pulumi.Input[Union[str, 'VirtualNetworkPeeringState']]] = None,
remote_address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None,
remote_bgp_communities: Optional[pulumi.Input[pulumi.InputType['VirtualNetworkBgpCommunitiesArgs']]] = None,
remote_virtual_network: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
use_remote_gateways: Optional[pulumi.Input[bool]] = None,
virtual_network_name: Optional[pulumi.Input[str]] = None,
virtual_network_peering_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Peerings in a virtual network resource.
API Version: 2020-08-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_forwarded_traffic: Whether the forwarded traffic from the VMs in the local virtual network will be allowed/disallowed in remote virtual network.
:param pulumi.Input[bool] allow_gateway_transit: If gateway links can be used in remote virtual networking to link to this virtual network.
:param pulumi.Input[bool] allow_virtual_network_access: Whether the VMs in the local virtual network space would be able to access the VMs in remote virtual network space.
:param pulumi.Input[bool] do_not_verify_remote_gateways: If we need to verify the provisioning state of the remote gateway.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[Union[str, 'VirtualNetworkPeeringState']] peering_state: The status of the virtual network peering.
:param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] remote_address_space: The reference to the remote virtual network address space.
:param pulumi.Input[pulumi.InputType['VirtualNetworkBgpCommunitiesArgs']] remote_bgp_communities: The reference to the remote virtual network's Bgp Communities.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] remote_virtual_network: The reference to the remote virtual network. The remote virtual network can be in the same or different region (preview). See here to register for the preview and learn more (https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-create-peering).
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] type: Resource type.
:param pulumi.Input[bool] use_remote_gateways: If remote gateways can be used on this virtual network. If the flag is set to true, and allowGatewayTransit on remote peering is also true, virtual network will use gateways of remote virtual network for transit. Only one peering can have this flag set to true. This flag cannot be set if virtual network already has a gateway.
:param pulumi.Input[str] virtual_network_name: The name of the virtual network.
:param pulumi.Input[str] virtual_network_peering_name: The name of the peering.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['allow_forwarded_traffic'] = allow_forwarded_traffic
__props__['allow_gateway_transit'] = allow_gateway_transit
__props__['allow_virtual_network_access'] = allow_virtual_network_access
__props__['do_not_verify_remote_gateways'] = do_not_verify_remote_gateways
__props__['id'] = id
__props__['name'] = name
__props__['peering_state'] = peering_state
__props__['remote_address_space'] = remote_address_space
__props__['remote_bgp_communities'] = remote_bgp_communities
__props__['remote_virtual_network'] = remote_virtual_network
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['type'] = type
__props__['use_remote_gateways'] = use_remote_gateways
if virtual_network_name is None and not opts.urn:
raise TypeError("Missing required property 'virtual_network_name'")
__props__['virtual_network_name'] = virtual_network_name
__props__['virtual_network_peering_name'] = virtual_network_peering_name
__props__['etag'] = None
__props__['provisioning_state'] = None
__props__['resource_guid'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/latest:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/latest:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20160601:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160601:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20160901:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160901:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20161201:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20161201:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20170301:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170301:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20170601:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170601:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20170801:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170801:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20170901:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170901:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20171001:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20171001:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20171101:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20171101:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20180101:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180101:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20180201:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180201:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20180401:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180401:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20180601:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180601:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20180701:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180701:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20180801:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20181001:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20181101:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20181201:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20190201:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20190401:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20190601:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20190701:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20190801:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20190901:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20191101:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20191201:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20200301:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200301:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20200401:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20200501:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20200601:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20200701:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VirtualNetworkPeering"), pulumi.Alias(type_="azure-native:network/v20200801:VirtualNetworkPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VirtualNetworkPeering")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualNetworkPeering, __self__).__init__(
'azure-native:network:VirtualNetworkPeering',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualNetworkPeering':
"""
Get an existing VirtualNetworkPeering resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["allow_forwarded_traffic"] = None
__props__["allow_gateway_transit"] = None
__props__["allow_virtual_network_access"] = None
__props__["do_not_verify_remote_gateways"] = None
__props__["etag"] = None
__props__["name"] = None
__props__["peering_state"] = None
__props__["provisioning_state"] = None
__props__["remote_address_space"] = None
__props__["remote_bgp_communities"] = None
__props__["remote_virtual_network"] = None
__props__["resource_guid"] = None
__props__["type"] = None
__props__["use_remote_gateways"] = None
return VirtualNetworkPeering(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowForwardedTraffic")
def allow_forwarded_traffic(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the forwarded traffic from the VMs in the local virtual network will be allowed/disallowed in remote virtual network.
"""
return pulumi.get(self, "allow_forwarded_traffic")
@property
@pulumi.getter(name="allowGatewayTransit")
def allow_gateway_transit(self) -> pulumi.Output[Optional[bool]]:
"""
If gateway links can be used in remote virtual networking to link to this virtual network.
"""
return pulumi.get(self, "allow_gateway_transit")
@property
@pulumi.getter(name="allowVirtualNetworkAccess")
def allow_virtual_network_access(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the VMs in the local virtual network space would be able to access the VMs in remote virtual network space.
"""
return pulumi.get(self, "allow_virtual_network_access")
@property
@pulumi.getter(name="doNotVerifyRemoteGateways")
def do_not_verify_remote_gateways(self) -> pulumi.Output[Optional[bool]]:
"""
If we need to verify the provisioning state of the remote gateway.
"""
return pulumi.get(self, "do_not_verify_remote_gateways")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringState")
def peering_state(self) -> pulumi.Output[Optional[str]]:
"""
The status of the virtual network peering.
"""
return pulumi.get(self, "peering_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the virtual network peering resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="remoteAddressSpace")
def remote_address_space(self) -> pulumi.Output[Optional['outputs.AddressSpaceResponse']]:
"""
The reference to the remote virtual network address space.
"""
return pulumi.get(self, "remote_address_space")
@property
@pulumi.getter(name="remoteBgpCommunities")
def remote_bgp_communities(self) -> pulumi.Output[Optional['outputs.VirtualNetworkBgpCommunitiesResponse']]:
"""
The reference to the remote virtual network's Bgp Communities.
"""
return pulumi.get(self, "remote_bgp_communities")
@property
@pulumi.getter(name="remoteVirtualNetwork")
def remote_virtual_network(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The reference to the remote virtual network. The remote virtual network can be in the same or different region (preview). See here to register for the preview and learn more (https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-create-peering).
"""
return pulumi.get(self, "remote_virtual_network")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resourceGuid property of the Virtual Network Peering resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="useRemoteGateways")
def use_remote_gateways(self) -> pulumi.Output[Optional[bool]]:
"""
If remote gateways can be used on this virtual network. If the flag is set to true, and allowGatewayTransit on remote peering is also true, virtual network will use gateways of remote virtual network for transit. Only one peering can have this flag set to true. This flag cannot be set if virtual network already has a gateway.
"""
return pulumi.get(self, "use_remote_gateways")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 70.949612 | 5,163 | 0.727178 |
f50c3bca2d549b75454aab78cbf6ae29310ef460 | 4,380 | py | Python | exercise2.py | ILS-Z399/05b-Exercises-pygame-physics | b8807ae1eedabe41e9e69e29b1a87cacb444b948 | [
"MIT"
] | 1 | 2020-12-30T12:15:52.000Z | 2020-12-30T12:15:52.000Z | exercise2.py | ILS-Z399/05b-Exercises-pygame-physics | b8807ae1eedabe41e9e69e29b1a87cacb444b948 | [
"MIT"
] | null | null | null | exercise2.py | ILS-Z399/05b-Exercises-pygame-physics | b8807ae1eedabe41e9e69e29b1a87cacb444b948 | [
"MIT"
] | 9 | 2018-03-30T17:05:40.000Z | 2018-04-18T00:55:24.000Z | #!/usr/bin/env python
'''
For every line in the collide method (lines 58-91), please add a comment describing what it does.
Try to describe each line within the context of the program as a whole, rather than just mechanically
Feel free to alter the parameters to see how things change. That can be a great way to be able to intuit what is supposed to be happening
I will do a few lines for you as an example
'''
import sys, logging, math, pygame, random as r
assert sys.version_info >= (3,4), 'This script requires at least Python 3.4'
logging.basicConfig(format='[%(filename)s:%(lineno)d] %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
screen_size = (WIDTH,HEIGHT) = (600,600)
FPS = 60
black = (0,0,0)
class Ball(pygame.sprite.Sprite):
def __init__(self, label, size, mass, color, position, direction):
pygame.sprite.Sprite.__init__(self)
self.label = label
self.size = size
self.image = pygame.Surface(size)
self.rect = self.image.get_rect()
pygame.draw.ellipse(self.image, color, self.rect)
self.image.set_colorkey((0,0,0))
(self.rect.x,self.rect.y) = position
self.direction = direction
self.mass = mass
self.collided = False
def update(self):
(dx,dy) = self.direction
self.rect.x += dx
self.rect.y += dy
(WIDTH,HEIGHT) = screen_size
if self.rect.right > WIDTH:
self.rect.right = WIDTH
dx *= -1
if self.rect.left < 0:
self.rect.left = 0
dx *= -1
if self.rect.top < 0:
self.rect.top = 0
dy *= -1
if self.rect.bottom > HEIGHT:
self.rect.bottom = HEIGHT
dy *= -1
self.direction = (dx,dy)
def collide(self, other_object):
'''
Checks to see if the object has collided with another object. Assumes that each collision will be calculated pairwise.
If there has been a collision, and the objects are still moving toward each other, the direction attribute of both objects is updated
'''
(dx,dy) = self.direction # the x and y components of the direction
(odx,ody) = other_object.direction # the x and y components of the other object's direction
(cx,cy) = self.rect.center
(ocx,ocy) = other_object.rect.center
radius = self.rect.width/2
oradius = other_object.rect.width/2
#find the hypotenuse
distance = math.sqrt(abs(cx-ocx)**2 + abs(cy-ocy)**2)
if distance <= 0:
distance = 0.1
combined_distance = (radius+oradius)
if distance <= combined_distance: #collision
normal = ((cx-ocx)/distance,(cy-ocy)/distance) # a vector tangent to the plane of collision
velocity_delta = ((odx-dx),(ody-dy)) #the relative difference between the speed of the two objects
(nx,ny) = normal
(vdx,vdy) = velocity_delta
dot_product = nx*vdx + ny*vdy
if dot_product >= 0: #check if the objects are moving toward each other
impulse_strength = dot_product * (self.mass / other_object.mass)
impulse = (ix,iy) = (impulse_strength * nx, impulse_strength * ny)
dx += ix * (other_object.mass/self.mass)
dy += iy * (other_object.mass/self.mass)
self.direction = (dx,dy)
odx -= ix * (self.mass/other_object.mass)
ody -= iy * (self.mass/other_object.mass)
other_object.direction = (odx,ody)
def draw(self,screen):
self.image.blit(screen,(0,0),self.rect)
def get_energy(self):
(dx,dy) = self.direction
return math.sqrt(abs(dx)**2 + abs(dy)**2)/self.mass
def main():
pygame.init()
screen = pygame.display.set_mode(screen_size)
clock = pygame.time.Clock()
balls = []
colors = [(255,212,59),(34,139,230),(240,62,62),(174,62,201),(253,126,20),(64,192,87),(194,37,92),(73,80,87)]
positions = [(260,180),(180,100),(260,100),(340,100),(220,60),(220,140),(300,140),(300,60)]
size = (50,50)
mass = 30
initial_velocity = (0,0)
for c in range(len(colors)):
initial_position = positions[c]
ball = Ball('{0}'.format(c+1),size,mass,colors[c],initial_position,initial_velocity)
balls.append(ball)
ball = Ball('Cue',size,mass,(255,255,255),(260,500),(0,-20))
balls.append(ball)
ball_group = pygame.sprite.Group()
for b in balls:
ball_group.add(b)
while True:
clock.tick(FPS)
screen.fill(black)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit(0)
for b in balls:
for c in balls:
if b.label != c.label:
b.collide(c)
ball_group.update()
ball_group.draw(screen)
pygame.display.flip()
if __name__ == '__main__':
main() | 31.06383 | 137 | 0.681963 |
1212371e350bc3f756f520e2e4e508df3d05cee5 | 7,871 | py | Python | contracts/vote.py | lucas7788/decentralized-voting-box | 62af6561bf917b7732e6c671463dcc1305b46e0c | [
"MIT"
] | null | null | null | contracts/vote.py | lucas7788/decentralized-voting-box | 62af6561bf917b7732e6c671463dcc1305b46e0c | [
"MIT"
] | null | null | null | contracts/vote.py | lucas7788/decentralized-voting-box | 62af6561bf917b7732e6c671463dcc1305b46e0c | [
"MIT"
] | 1 | 2018-11-18T03:13:46.000Z | 2018-11-18T03:13:46.000Z | """
A sample of OEP5 smart contract
"""
from boa.interop.System.Storage import GetContext, Get, Put, Delete
from boa.interop.System.Runtime import CheckWitness, Notify, Serialize, Deserialize
from boa.interop.System.ExecutionEngine import GetExecutingScriptHash
from boa.builtins import ToScriptHash, sha256, concat
# 举办选举活动
KEY_VOTE_ACTION = 'VoteAction'
# 候选人
KEY_CANDIDATE = 'Candidate' # 候选人
KEY_CANDIDATE_APPLY = 'Applier' # 申请人
KEY_POLL = 'Poll' # 票数
KEY_VICTOR = 'Victor' #竞选成功的人
ctx = GetContext()
selfAddr = GetExecutingScriptHash()
def Main(operation, args):
if operation == 'createVoteAction':
if len(args) != 2:
return False
actionName = args[0]
admin = args[1]
return createVoteAction(actionName, admin)
if operation == 'getVoteAction':
if len(args) != 1:
return False
actionName = args[0]
return getVoteAction(actionName)
if operation == 'applyToCandidate':
if len(args) != 2:
return False
actionName = args[0]
address = args[1]
return applyToCandidate(actionName, address)
if operation == 'getApplyInfo':
if len(args) != 1:
return False
actionName = args[0]
return getApplyInfo(actionName)
if operation == 'approveApply':
if len(args) != 3:
return False
actionName = args[0]
admin = args[1]
address = args[2]
return approveApply(actionName, admin, address)
if operation == 'getCandadite':
if len(args) != 1:
return False
actionName = args[0]
return getCandadite(actionName)
if operation == "vote":
if len(args) != 3:
return False
actionName = args[0]
voter = args[1]
candidate = args[2]
return vote(actionName, voter, candidate)
if operation == "getPoll":
actionName = args[0]
candadite = args[1]
return getPoll(actionName, candadite)
if operation == "endAction":
actionName = args[0]
admin = args[1]
return endAction(actionName, admin)
if operation == "getVictor":
actionName = args[0]
return getVictor(actionName)
def createVoteAction(actionName, admin):
'''
create a vote action
:return:
'''
if Get(ctx, concat(KEY_VOTE_ACTION, actionName)):
Notify(["action name have existed"])
return False
Notify([admin])
if not CheckWitness(admin):
Notify(["admin CheckWitness failed"])
return False
# 0表示投票进行中 1表示投票结束
actionInfo = [actionName, admin, 0]
info = Serialize(actionInfo)
Put(ctx, concat(KEY_VOTE_ACTION, actionName), info)
Notify(["create action success"])
return True
def getVoteAction(actionName):
'''
query vote action
:return:
'''
info = Get(ctx, concat(KEY_VOTE_ACTION, actionName))
if info is None or info == "":
return False
return Deserialize(info)
def applyToCandidate(actionName, address):
'''
apply to be candidate of a vote action
:return:
'''
#if not CheckWitness(address):
# return False
if not Get(ctx, concat(KEY_CANDIDATE_APPLY, actionName)):
appliesList = []
else:
appliers = Get(ctx, concat(KEY_CANDIDATE_APPLY, actionName))
appliesList = Deserialize(appliers)
for addr in appliesList:
if addr == address:
return False
appliesList.append(address)
appliers = Serialize(appliesList)
Put(ctx, concat(KEY_CANDIDATE_APPLY, actionName), appliers)
return True
def getApplyInfo(actionName):
'''
query apllier information
:return:
'''
appliers = Get(ctx, concat(KEY_CANDIDATE_APPLY, actionName))
applierList = Deserialize(appliers)
return applierList
def approveApply(actionName, admin, address):
'''
admin of a vote action approve one's apply
:return:
'''
if not Get(ctx, concat(KEY_CANDIDATE_APPLY, actionName)):
return False
if not CheckWitness(admin):
return False
Notify(["111111"])
info = Get(ctx, concat(KEY_VOTE_ACTION, actionName))
actionInfo = Deserialize(info)
if actionInfo[1] != admin:
return False
appliers = Get(ctx, concat(KEY_CANDIDATE_APPLY, actionName))
if appliers is None or appliers == "":
return False
applierList = Deserialize(appliers)
hasApplier = False
for applier in applierList:
if applier == address:
hasApplier = True
if not hasApplier:
Notify(["no applier", address])
return False
candidate = Get(ctx, concat(KEY_CANDIDATE, actionName))
if candidate is None or candidate == "":
candidateList = []
else:
candidateList = Deserialize(candidate)
if len(candidateList) != 0:
for candidateTemp in candidateList:
if candidateTemp == address:
Notify(["have been a candidate", address])
return False
candidateList.append(address)
Put(ctx, concat(KEY_CANDIDATE, actionName), Serialize(candidateList))
applierList2 = []
for applier in applierList:
if applier != address:
applierList2.append(applier)
Put(ctx, concat(KEY_CANDIDATE_APPLY, actionName), Serialize(applierList2))
Notify(["end"])
return True
def getCandadite(actionName):
'''
query candidate info
:return:
'''
candidate = Get(ctx, concat(KEY_CANDIDATE, actionName))
if candidate is None or candidate == "":
return False
candidateList = Deserialize(candidate)
Notify([candidateList])
return candidateList
def vote(actionName, voter, candidate):
'''
vote to candidate
:return:
'''
if not CheckWitness(voter):
return False
candidates = Get(ctx, concat(KEY_CANDIDATE, actionName))
if candidates is None or candidates == "":
return False
candidateList = Deserialize(candidates)
hasCandidate = False
for candi in candidateList:
if candi == candidate:
hasCandidate = True
if not hasCandidate:
return False
num = Get(ctx, concat(concat(KEY_POLL, actionName), candidate))
if num is None or num == "":
num = 0
num = num + 1
Put(ctx, concat(concat(KEY_POLL, actionName), candidate), num)
return True
def getPoll(actionName, candidate):
'''
query poll
:return:
'''
num = Get(ctx, concat(concat(KEY_POLL, actionName), candidate))
if num is None or num == "":
return 0
return num
def endAction(actionName, admin):
if not CheckWitness(admin):
return False
info = Get(ctx, concat(KEY_VOTE_ACTION, actionName))
if info is None or info == "":
return False
actionInfo = Deserialize(info)
if actionInfo[1] != admin:
return False
candidates = Get(ctx, concat(KEY_CANDIDATE, actionName))
if candidates is None or candidates == "":
return False
candidateList = Deserialize(candidates)
victor = ""
victorNum = 0
for candidate in candidateList:
num = Get(ctx, concat(concat(KEY_POLL, actionName), candidate))
if num is None or num == "":
n = 0
else:
n = num
if n > victorNum:
victorNum = n
victor = candidate
actionInfo = [actionName, admin, 1]
# update action state
Put(ctx, concat(KEY_VOTE_ACTION, actionName), Serialize(actionInfo))
resList = []
resList.append(victor)
resList.append(victorNum)
Put(ctx, concat(KEY_VICTOR, actionName), Serialize(resList))
return True
def getVictor(actionName):
victor = Get(ctx, concat(KEY_VICTOR, actionName))
if victor is None or victor == "":
return False
else:
return Deserialize(victor)
| 28.31295 | 83 | 0.628383 |
0302c30914b65e0703879324e022207a119be610 | 33,258 | py | Python | Lib/mhlib.py | M-Spencer-94/configNOW | 56828587253202089e77cfdfcf5329f2a7f09b3f | [
"PSF-2.0",
"Apache-2.0",
"MIT"
] | 3 | 2019-07-09T20:02:48.000Z | 2021-11-21T20:00:37.000Z | Lib/mhlib.py | M-Spencer-94/configNOW | 56828587253202089e77cfdfcf5329f2a7f09b3f | [
"PSF-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | Lib/mhlib.py | M-Spencer-94/configNOW | 56828587253202089e77cfdfcf5329f2a7f09b3f | [
"PSF-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-04-11T11:27:01.000Z | 2019-04-11T11:27:01.000Z | """MH interface -- purely object-oriented (well, almost)
Executive summary:
import mhlib
mh = mhlib.MH() # use default mailbox directory and profile
mh = mhlib.MH(mailbox) # override mailbox location (default from profile)
mh = mhlib.MH(mailbox, profile) # override mailbox and profile
mh.error(format, ...) # print error message -- can be overridden
s = mh.getprofile(key) # profile entry (None if not set)
path = mh.getpath() # mailbox pathname
name = mh.getcontext() # name of current folder
mh.setcontext(name) # set name of current folder
list = mh.listfolders() # names of top-level folders
list = mh.listallfolders() # names of all folders, including subfolders
list = mh.listsubfolders(name) # direct subfolders of given folder
list = mh.listallsubfolders(name) # all subfolders of given folder
mh.makefolder(name) # create new folder
mh.deletefolder(name) # delete folder -- must have no subfolders
f = mh.openfolder(name) # new open folder object
f.error(format, ...) # same as mh.error(format, ...)
path = f.getfullname() # folder's full pathname
path = f.getsequencesfilename() # full pathname of folder's sequences file
path = f.getmessagefilename(n) # full pathname of message n in folder
list = f.listmessages() # list of messages in folder (as numbers)
n = f.getcurrent() # get current message
f.setcurrent(n) # set current message
list = f.parsesequence(seq) # parse msgs syntax into list of messages
n = f.getlast() # get last message (0 if no messagse)
f.setlast(n) # set last message (internal use only)
dict = f.getsequences() # dictionary of sequences in folder {name: list}
f.putsequences(dict) # write sequences back to folder
f.createmessage(n, fp) # add message from file f as number n
f.removemessages(list) # remove messages in list from folder
f.refilemessages(list, tofolder) # move messages in list to other folder
f.movemessage(n, tofolder, ton) # move one message to a given destination
f.copymessage(n, tofolder, ton) # copy one message to a given destination
m = f.openmessage(n) # new open message object (costs a file descriptor)
m is a derived class of mimetools.Message(rfc822.Message), with:
s = m.getheadertext() # text of message's headers
s = m.getheadertext(pred) # text of message's headers, filtered by pred
s = m.getbodytext() # text of message's body, decoded
s = m.getbodytext(0) # text of message's body, not decoded
"""
# XXX To do, functionality:
# - annotate messages
# - send messages
#
# XXX To do, organization:
# - move IntSet to separate file
# - move most Message functionality to module mimetools
# Customizable defaults
MH_PROFILE = '~/.mh_profile'
PATH = '~/Mail'
MH_SEQUENCES = '.mh_sequences'
FOLDER_PROTECT = 0700
# Imported modules
import os
import sys
from stat import ST_NLINK
import re
import mimetools
import multifile
import shutil
from bisect import bisect
__all__ = ["MH","Error","Folder","Message"]
# Exported constants
class Error(Exception):
pass
class MH:
"""Class representing a particular collection of folders.
Optional constructor arguments are the pathname for the directory
containing the collection, and the MH profile to use.
If either is omitted or empty a default is used; the default
directory is taken from the MH profile if it is specified there."""
def __init__(self, path = None, profile = None):
"""Constructor."""
if not profile: profile = MH_PROFILE
self.profile = os.path.expanduser(profile)
if not path: path = self.getprofile('Path')
if not path: path = PATH
if not os.path.isabs(path) and path[0] != '~':
path = os.path.join('~', path)
path = os.path.expanduser(path)
if not os.path.isdir(path): raise Error, 'MH() path not found'
self.path = path
def __repr__(self):
"""String representation."""
return 'MH(%s, %s)' % (`self.path`, `self.profile`)
def error(self, msg, *args):
"""Routine to print an error. May be overridden by a derived class."""
sys.stderr.write('MH error: %s\n' % (msg % args))
def getprofile(self, key):
"""Return a profile entry, None if not found."""
return pickline(self.profile, key)
def getpath(self):
"""Return the path (the name of the collection's directory)."""
return self.path
def getcontext(self):
"""Return the name of the current folder."""
context = pickline(os.path.join(self.getpath(), 'context'),
'Current-Folder')
if not context: context = 'inbox'
return context
def setcontext(self, context):
"""Set the name of the current folder."""
fn = os.path.join(self.getpath(), 'context')
f = open(fn, "w")
f.write("Current-Folder: %s\n" % context)
f.close()
def listfolders(self):
"""Return the names of the top-level folders."""
folders = []
path = self.getpath()
for name in os.listdir(path):
fullname = os.path.join(path, name)
if os.path.isdir(fullname):
folders.append(name)
folders.sort()
return folders
def listsubfolders(self, name):
"""Return the names of the subfolders in a given folder
(prefixed with the given folder name)."""
fullname = os.path.join(self.path, name)
# Get the link count so we can avoid listing folders
# that have no subfolders.
st = os.stat(fullname)
nlinks = st[ST_NLINK]
if nlinks <= 2:
return []
subfolders = []
subnames = os.listdir(fullname)
for subname in subnames:
fullsubname = os.path.join(fullname, subname)
if os.path.isdir(fullsubname):
name_subname = os.path.join(name, subname)
subfolders.append(name_subname)
# Stop looking for subfolders when
# we've seen them all
nlinks = nlinks - 1
if nlinks <= 2:
break
subfolders.sort()
return subfolders
def listallfolders(self):
"""Return the names of all folders and subfolders, recursively."""
return self.listallsubfolders('')
def listallsubfolders(self, name):
"""Return the names of subfolders in a given folder, recursively."""
fullname = os.path.join(self.path, name)
# Get the link count so we can avoid listing folders
# that have no subfolders.
st = os.stat(fullname)
nlinks = st[ST_NLINK]
if nlinks <= 2:
return []
subfolders = []
subnames = os.listdir(fullname)
for subname in subnames:
if subname[0] == ',' or isnumeric(subname): continue
fullsubname = os.path.join(fullname, subname)
if os.path.isdir(fullsubname):
name_subname = os.path.join(name, subname)
subfolders.append(name_subname)
if not os.path.islink(fullsubname):
subsubfolders = self.listallsubfolders(
name_subname)
subfolders = subfolders + subsubfolders
# Stop looking for subfolders when
# we've seen them all
nlinks = nlinks - 1
if nlinks <= 2:
break
subfolders.sort()
return subfolders
def openfolder(self, name):
"""Return a new Folder object for the named folder."""
return Folder(self, name)
def makefolder(self, name):
"""Create a new folder (or raise os.error if it cannot be created)."""
protect = pickline(self.profile, 'Folder-Protect')
if protect and isnumeric(protect):
mode = int(protect, 8)
else:
mode = FOLDER_PROTECT
os.mkdir(os.path.join(self.getpath(), name), mode)
def deletefolder(self, name):
"""Delete a folder. This removes files in the folder but not
subdirectories. Raise os.error if deleting the folder itself fails."""
fullname = os.path.join(self.getpath(), name)
for subname in os.listdir(fullname):
fullsubname = os.path.join(fullname, subname)
try:
os.unlink(fullsubname)
except os.error:
self.error('%s not deleted, continuing...' %
fullsubname)
os.rmdir(fullname)
numericprog = re.compile('^[1-9][0-9]*$')
def isnumeric(str):
return numericprog.match(str) is not None
class Folder:
"""Class representing a particular folder."""
def __init__(self, mh, name):
"""Constructor."""
self.mh = mh
self.name = name
if not os.path.isdir(self.getfullname()):
raise Error, 'no folder %s' % name
def __repr__(self):
"""String representation."""
return 'Folder(%s, %s)' % (`self.mh`, `self.name`)
def error(self, *args):
"""Error message handler."""
apply(self.mh.error, args)
def getfullname(self):
"""Return the full pathname of the folder."""
return os.path.join(self.mh.path, self.name)
def getsequencesfilename(self):
"""Return the full pathname of the folder's sequences file."""
return os.path.join(self.getfullname(), MH_SEQUENCES)
def getmessagefilename(self, n):
"""Return the full pathname of a message in the folder."""
return os.path.join(self.getfullname(), str(n))
def listsubfolders(self):
"""Return list of direct subfolders."""
return self.mh.listsubfolders(self.name)
def listallsubfolders(self):
"""Return list of all subfolders."""
return self.mh.listallsubfolders(self.name)
def listmessages(self):
"""Return the list of messages currently present in the folder.
As a side effect, set self.last to the last message (or 0)."""
messages = []
match = numericprog.match
append = messages.append
for name in os.listdir(self.getfullname()):
if match(name):
append(name)
messages = map(int, messages)
messages.sort()
if messages:
self.last = messages[-1]
else:
self.last = 0
return messages
def getsequences(self):
"""Return the set of sequences for the folder."""
sequences = {}
fullname = self.getsequencesfilename()
try:
f = open(fullname, 'r')
except IOError:
return sequences
while 1:
line = f.readline()
if not line: break
fields = line.split(':')
if len(fields) != 2:
self.error('bad sequence in %s: %s' %
(fullname, line.strip()))
key = fields[0].strip()
value = IntSet(fields[1].strip(), ' ').tolist()
sequences[key] = value
return sequences
def putsequences(self, sequences):
"""Write the set of sequences back to the folder."""
fullname = self.getsequencesfilename()
f = None
for key in sequences.keys():
s = IntSet('', ' ')
s.fromlist(sequences[key])
if not f: f = open(fullname, 'w')
f.write('%s: %s\n' % (key, s.tostring()))
if not f:
try:
os.unlink(fullname)
except os.error:
pass
else:
f.close()
def getcurrent(self):
"""Return the current message. Raise Error when there is none."""
seqs = self.getsequences()
try:
return max(seqs['cur'])
except (ValueError, KeyError):
raise Error, "no cur message"
def setcurrent(self, n):
"""Set the current message."""
updateline(self.getsequencesfilename(), 'cur', str(n), 0)
def parsesequence(self, seq):
"""Parse an MH sequence specification into a message list.
Attempt to mimic mh-sequence(5) as close as possible.
Also attempt to mimic observed behavior regarding which
conditions cause which error messages."""
# XXX Still not complete (see mh-format(5)).
# Missing are:
# - 'prev', 'next' as count
# - Sequence-Negation option
all = self.listmessages()
# Observed behavior: test for empty folder is done first
if not all:
raise Error, "no messages in %s" % self.name
# Common case first: all is frequently the default
if seq == 'all':
return all
# Test for X:Y before X-Y because 'seq:-n' matches both
i = seq.find(':')
if i >= 0:
head, dir, tail = seq[:i], '', seq[i+1:]
if tail[:1] in '-+':
dir, tail = tail[:1], tail[1:]
if not isnumeric(tail):
raise Error, "bad message list %s" % seq
try:
count = int(tail)
except (ValueError, OverflowError):
# Can't use sys.maxint because of i+count below
count = len(all)
try:
anchor = self._parseindex(head, all)
except Error, msg:
seqs = self.getsequences()
if not seqs.has_key(head):
if not msg:
msg = "bad message list %s" % seq
raise Error, msg, sys.exc_info()[2]
msgs = seqs[head]
if not msgs:
raise Error, "sequence %s empty" % head
if dir == '-':
return msgs[-count:]
else:
return msgs[:count]
else:
if not dir:
if head in ('prev', 'last'):
dir = '-'
if dir == '-':
i = bisect(all, anchor)
return all[max(0, i-count):i]
else:
i = bisect(all, anchor-1)
return all[i:i+count]
# Test for X-Y next
i = seq.find('-')
if i >= 0:
begin = self._parseindex(seq[:i], all)
end = self._parseindex(seq[i+1:], all)
i = bisect(all, begin-1)
j = bisect(all, end)
r = all[i:j]
if not r:
raise Error, "bad message list %s" % seq
return r
# Neither X:Y nor X-Y; must be a number or a (pseudo-)sequence
try:
n = self._parseindex(seq, all)
except Error, msg:
seqs = self.getsequences()
if not seqs.has_key(seq):
if not msg:
msg = "bad message list %s" % seq
raise Error, msg
return seqs[seq]
else:
if n not in all:
if isnumeric(seq):
raise Error, "message %d doesn't exist" % n
else:
raise Error, "no %s message" % seq
else:
return [n]
def _parseindex(self, seq, all):
"""Internal: parse a message number (or cur, first, etc.)."""
if isnumeric(seq):
try:
return int(seq)
except (OverflowError, ValueError):
return sys.maxint
if seq in ('cur', '.'):
return self.getcurrent()
if seq == 'first':
return all[0]
if seq == 'last':
return all[-1]
if seq == 'next':
n = self.getcurrent()
i = bisect(all, n)
try:
return all[i]
except IndexError:
raise Error, "no next message"
if seq == 'prev':
n = self.getcurrent()
i = bisect(all, n-1)
if i == 0:
raise Error, "no prev message"
try:
return all[i-1]
except IndexError:
raise Error, "no prev message"
raise Error, None
def openmessage(self, n):
"""Open a message -- returns a Message object."""
return Message(self, n)
def removemessages(self, list):
"""Remove one or more messages -- may raise os.error."""
errors = []
deleted = []
for n in list:
path = self.getmessagefilename(n)
commapath = self.getmessagefilename(',' + str(n))
try:
os.unlink(commapath)
except os.error:
pass
try:
os.rename(path, commapath)
except os.error, msg:
errors.append(msg)
else:
deleted.append(n)
if deleted:
self.removefromallsequences(deleted)
if errors:
if len(errors) == 1:
raise os.error, errors[0]
else:
raise os.error, ('multiple errors:', errors)
def refilemessages(self, list, tofolder, keepsequences=0):
"""Refile one or more messages -- may raise os.error.
'tofolder' is an open folder object."""
errors = []
refiled = {}
for n in list:
ton = tofolder.getlast() + 1
path = self.getmessagefilename(n)
topath = tofolder.getmessagefilename(ton)
try:
os.rename(path, topath)
except os.error:
# Try copying
try:
shutil.copy2(path, topath)
os.unlink(path)
except (IOError, os.error), msg:
errors.append(msg)
try:
os.unlink(topath)
except os.error:
pass
continue
tofolder.setlast(ton)
refiled[n] = ton
if refiled:
if keepsequences:
tofolder._copysequences(self, refiled.items())
self.removefromallsequences(refiled.keys())
if errors:
if len(errors) == 1:
raise os.error, errors[0]
else:
raise os.error, ('multiple errors:', errors)
def _copysequences(self, fromfolder, refileditems):
"""Helper for refilemessages() to copy sequences."""
fromsequences = fromfolder.getsequences()
tosequences = self.getsequences()
changed = 0
for name, seq in fromsequences.items():
try:
toseq = tosequences[name]
new = 0
except:
toseq = []
new = 1
for fromn, ton in refileditems:
if fromn in seq:
toseq.append(ton)
changed = 1
if new and toseq:
tosequences[name] = toseq
if changed:
self.putsequences(tosequences)
def movemessage(self, n, tofolder, ton):
"""Move one message over a specific destination message,
which may or may not already exist."""
path = self.getmessagefilename(n)
# Open it to check that it exists
f = open(path)
f.close()
del f
topath = tofolder.getmessagefilename(ton)
backuptopath = tofolder.getmessagefilename(',%d' % ton)
try:
os.rename(topath, backuptopath)
except os.error:
pass
try:
os.rename(path, topath)
except os.error:
# Try copying
ok = 0
try:
tofolder.setlast(None)
shutil.copy2(path, topath)
ok = 1
finally:
if not ok:
try:
os.unlink(topath)
except os.error:
pass
os.unlink(path)
self.removefromallsequences([n])
def copymessage(self, n, tofolder, ton):
"""Copy one message over a specific destination message,
which may or may not already exist."""
path = self.getmessagefilename(n)
# Open it to check that it exists
f = open(path)
f.close()
del f
topath = tofolder.getmessagefilename(ton)
backuptopath = tofolder.getmessagefilename(',%d' % ton)
try:
os.rename(topath, backuptopath)
except os.error:
pass
ok = 0
try:
tofolder.setlast(None)
shutil.copy2(path, topath)
ok = 1
finally:
if not ok:
try:
os.unlink(topath)
except os.error:
pass
def createmessage(self, n, txt):
"""Create a message, with text from the open file txt."""
path = self.getmessagefilename(n)
backuppath = self.getmessagefilename(',%d' % n)
try:
os.rename(path, backuppath)
except os.error:
pass
ok = 0
BUFSIZE = 16*1024
try:
f = open(path, "w")
while 1:
buf = txt.read(BUFSIZE)
if not buf:
break
f.write(buf)
f.close()
ok = 1
finally:
if not ok:
try:
os.unlink(path)
except os.error:
pass
def removefromallsequences(self, list):
"""Remove one or more messages from all sequences (including last)
-- but not from 'cur'!!!"""
if hasattr(self, 'last') and self.last in list:
del self.last
sequences = self.getsequences()
changed = 0
for name, seq in sequences.items():
if name == 'cur':
continue
for n in list:
if n in seq:
seq.remove(n)
changed = 1
if not seq:
del sequences[name]
if changed:
self.putsequences(sequences)
def getlast(self):
"""Return the last message number."""
if not hasattr(self, 'last'):
messages = self.listmessages()
return self.last
def setlast(self, last):
"""Set the last message number."""
if last is None:
if hasattr(self, 'last'):
del self.last
else:
self.last = last
class Message(mimetools.Message):
def __init__(self, f, n, fp = None):
"""Constructor."""
self.folder = f
self.number = n
if not fp:
path = f.getmessagefilename(n)
fp = open(path, 'r')
mimetools.Message.__init__(self, fp)
def __repr__(self):
"""String representation."""
return 'Message(%s, %s)' % (repr(self.folder), self.number)
def getheadertext(self, pred = None):
"""Return the message's header text as a string. If an
argument is specified, it is used as a filter predicate to
decide which headers to return (its argument is the header
name converted to lower case)."""
if not pred:
return ''.join(self.headers)
headers = []
hit = 0
for line in self.headers:
if not line[0].isspace():
i = line.find(':')
if i > 0:
hit = pred(line[:i].lower())
if hit: headers.append(line)
return ''.join(headers)
def getbodytext(self, decode = 1):
"""Return the message's body text as string. This undoes a
Content-Transfer-Encoding, but does not interpret other MIME
features (e.g. multipart messages). To suppress decoding,
pass 0 as an argument."""
self.fp.seek(self.startofbody)
encoding = self.getencoding()
if not decode or encoding in ('', '7bit', '8bit', 'binary'):
return self.fp.read()
from StringIO import StringIO
output = StringIO()
mimetools.decode(self.fp, output, encoding)
return output.getvalue()
def getbodyparts(self):
"""Only for multipart messages: return the message's body as a
list of SubMessage objects. Each submessage object behaves
(almost) as a Message object."""
if self.getmaintype() != 'multipart':
raise Error, 'Content-Type is not multipart/*'
bdry = self.getparam('boundary')
if not bdry:
raise Error, 'multipart/* without boundary param'
self.fp.seek(self.startofbody)
mf = multifile.MultiFile(self.fp)
mf.push(bdry)
parts = []
while mf.next():
n = str(self.number) + '.' + `1 + len(parts)`
part = SubMessage(self.folder, n, mf)
parts.append(part)
mf.pop()
return parts
def getbody(self):
"""Return body, either a string or a list of messages."""
if self.getmaintype() == 'multipart':
return self.getbodyparts()
else:
return self.getbodytext()
class SubMessage(Message):
def __init__(self, f, n, fp):
"""Constructor."""
Message.__init__(self, f, n, fp)
if self.getmaintype() == 'multipart':
self.body = Message.getbodyparts(self)
else:
self.body = Message.getbodytext(self)
self.bodyencoded = Message.getbodytext(self, decode=0)
# XXX If this is big, should remember file pointers
def __repr__(self):
"""String representation."""
f, n, fp = self.folder, self.number, self.fp
return 'SubMessage(%s, %s, %s)' % (f, n, fp)
def getbodytext(self, decode = 1):
if not decode:
return self.bodyencoded
if type(self.body) == type(''):
return self.body
def getbodyparts(self):
if type(self.body) == type([]):
return self.body
def getbody(self):
return self.body
class IntSet:
"""Class implementing sets of integers.
This is an efficient representation for sets consisting of several
continuous ranges, e.g. 1-100,200-400,402-1000 is represented
internally as a list of three pairs: [(1,100), (200,400),
(402,1000)]. The internal representation is always kept normalized.
The constructor has up to three arguments:
- the string used to initialize the set (default ''),
- the separator between ranges (default ',')
- the separator between begin and end of a range (default '-')
The separators must be strings (not regexprs) and should be different.
The tostring() function yields a string that can be passed to another
IntSet constructor; __repr__() is a valid IntSet constructor itself.
"""
# XXX The default begin/end separator means that negative numbers are
# not supported very well.
#
# XXX There are currently no operations to remove set elements.
def __init__(self, data = None, sep = ',', rng = '-'):
self.pairs = []
self.sep = sep
self.rng = rng
if data: self.fromstring(data)
def reset(self):
self.pairs = []
def __cmp__(self, other):
return cmp(self.pairs, other.pairs)
def __hash__(self):
return hash(self.pairs)
def __repr__(self):
return 'IntSet(%s, %s, %s)' % (`self.tostring()`,
`self.sep`, `self.rng`)
def normalize(self):
self.pairs.sort()
i = 1
while i < len(self.pairs):
alo, ahi = self.pairs[i-1]
blo, bhi = self.pairs[i]
if ahi >= blo-1:
self.pairs[i-1:i+1] = [(alo, max(ahi, bhi))]
else:
i = i+1
def tostring(self):
s = ''
for lo, hi in self.pairs:
if lo == hi: t = `lo`
else: t = `lo` + self.rng + `hi`
if s: s = s + (self.sep + t)
else: s = t
return s
def tolist(self):
l = []
for lo, hi in self.pairs:
m = range(lo, hi+1)
l = l + m
return l
def fromlist(self, list):
for i in list:
self.append(i)
def clone(self):
new = IntSet()
new.pairs = self.pairs[:]
return new
def min(self):
return self.pairs[0][0]
def max(self):
return self.pairs[-1][-1]
def contains(self, x):
for lo, hi in self.pairs:
if lo <= x <= hi: return 1
return 0
def append(self, x):
for i in range(len(self.pairs)):
lo, hi = self.pairs[i]
if x < lo: # Need to insert before
if x+1 == lo:
self.pairs[i] = (x, hi)
else:
self.pairs.insert(i, (x, x))
if i > 0 and x-1 == self.pairs[i-1][1]:
# Merge with previous
self.pairs[i-1:i+1] = [
(self.pairs[i-1][0],
self.pairs[i][1])
]
return
if x <= hi: # Already in set
return
i = len(self.pairs) - 1
if i >= 0:
lo, hi = self.pairs[i]
if x-1 == hi:
self.pairs[i] = lo, x
return
self.pairs.append((x, x))
def addpair(self, xlo, xhi):
if xlo > xhi: return
self.pairs.append((xlo, xhi))
self.normalize()
def fromstring(self, data):
new = []
for part in data.split(self.sep):
list = []
for subp in part.split(self.rng):
s = subp.strip()
list.append(int(s))
if len(list) == 1:
new.append((list[0], list[0]))
elif len(list) == 2 and list[0] <= list[1]:
new.append((list[0], list[1]))
else:
raise ValueError, 'bad data passed to IntSet'
self.pairs = self.pairs + new
self.normalize()
# Subroutines to read/write entries in .mh_profile and .mh_sequences
def pickline(file, key, casefold = 1):
try:
f = open(file, 'r')
except IOError:
return None
pat = re.escape(key) + ':'
prog = re.compile(pat, casefold and re.IGNORECASE)
while 1:
line = f.readline()
if not line: break
if prog.match(line):
text = line[len(key)+1:]
while 1:
line = f.readline()
if not line or not line[0].isspace():
break
text = text + line
return text.strip()
return None
def updateline(file, key, value, casefold = 1):
try:
f = open(file, 'r')
lines = f.readlines()
f.close()
except IOError:
lines = []
pat = re.escape(key) + ':(.*)\n'
prog = re.compile(pat, casefold and re.IGNORECASE)
if value is None:
newline = None
else:
newline = '%s: %s\n' % (key, value)
for i in range(len(lines)):
line = lines[i]
if prog.match(line):
if newline is None:
del lines[i]
else:
lines[i] = newline
break
else:
if newline is not None:
lines.append(newline)
tempfile = file + "~"
f = open(tempfile, 'w')
for line in lines:
f.write(line)
f.close()
os.rename(tempfile, file)
# Test program
def test():
global mh, f
os.system('rm -rf $HOME/Mail/@test')
mh = MH()
def do(s): print s; print eval(s)
do('mh.listfolders()')
do('mh.listallfolders()')
testfolders = ['@test', '@test/test1', '@test/test2',
'@test/test1/test11', '@test/test1/test12',
'@test/test1/test11/test111']
for t in testfolders: do('mh.makefolder(%s)' % `t`)
do('mh.listsubfolders(\'@test\')')
do('mh.listallsubfolders(\'@test\')')
f = mh.openfolder('@test')
do('f.listsubfolders()')
do('f.listallsubfolders()')
do('f.getsequences()')
seqs = f.getsequences()
seqs['foo'] = IntSet('1-10 12-20', ' ').tolist()
print seqs
f.putsequences(seqs)
do('f.getsequences()')
testfolders.reverse()
for t in testfolders: do('mh.deletefolder(%s)' % `t`)
do('mh.getcontext()')
context = mh.getcontext()
f = mh.openfolder(context)
do('f.getcurrent()')
for seq in ['first', 'last', 'cur', '.', 'prev', 'next',
'first:3', 'last:3', 'cur:3', 'cur:-3',
'prev:3', 'next:3',
'1:3', '1:-3', '100:3', '100:-3', '10000:3', '10000:-3',
'all']:
try:
do('f.parsesequence(%s)' % `seq`)
except Error, msg:
print "Error:", msg
stuff = os.popen("pick %s 2>/dev/null" % `seq`).read()
list = map(int, stuff.split())
print list, "<-- pick"
do('f.listmessages()')
if __name__ == '__main__':
test()
| 33.125498 | 79 | 0.530699 |
8b18fc9f1a3234399d95889f3726aa325f5cd189 | 2,227 | py | Python | pydis_site/apps/api/models/bot/message.py | Hotdogszbg/site | 8071847742f39258781105bb3cfe19fc8c8c967c | [
"MIT"
] | null | null | null | pydis_site/apps/api/models/bot/message.py | Hotdogszbg/site | 8071847742f39258781105bb3cfe19fc8c8c967c | [
"MIT"
] | 10 | 2021-03-19T12:46:42.000Z | 2022-03-12T00:52:11.000Z | pydis_site/apps/api/models/bot/message.py | wookie184/site | 923cbeae0079b4a542fffda19bf3bce3daf15205 | [
"MIT"
] | null | null | null | from datetime import datetime
from django.contrib.postgres import fields as pgfields
from django.core.validators import MinValueValidator
from django.db import models
from django.utils import timezone
from pydis_site.apps.api.models.bot.tag import validate_tag_embed
from pydis_site.apps.api.models.bot.user import User
from pydis_site.apps.api.models.utils import ModelReprMixin
class Message(ModelReprMixin, models.Model):
"""A message, sent somewhere on the Discord server."""
id = models.BigIntegerField(
primary_key=True,
help_text="The message ID as taken from Discord.",
validators=(
MinValueValidator(
limit_value=0,
message="Message IDs cannot be negative."
),
)
)
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
help_text="The author of this message."
)
channel_id = models.BigIntegerField(
help_text=(
"The channel ID that this message was "
"sent in, taken from Discord."
),
validators=(
MinValueValidator(
limit_value=0,
message="Channel IDs cannot be negative."
),
)
)
content = models.CharField(
max_length=2_000,
help_text="The content of this message, taken from Discord.",
blank=True
)
embeds = pgfields.ArrayField(
pgfields.JSONField(
validators=(validate_tag_embed,)
),
help_text="Embeds attached to this message."
)
attachments = pgfields.ArrayField(
models.URLField(
max_length=512
),
blank=True,
help_text="Attachments attached to this message."
)
@property
def timestamp(self) -> datetime:
"""Attribute that represents the message timestamp as derived from the snowflake id."""
tz_naive_datetime = datetime.utcfromtimestamp(((self.id >> 22) + 1420070400000) / 1000)
tz_aware_datetime = timezone.make_aware(tz_naive_datetime, timezone=timezone.utc)
return tz_aware_datetime
class Meta:
"""Metadata provided for Django's ORM."""
abstract = True
| 30.506849 | 95 | 0.633139 |
b768d4ad53cf5b9139d925df51ce8b06ee3c3af0 | 3,317 | py | Python | applications/FluidDynamicsApplication/python_scripts/check_and_prepare_model_process_fluid.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 2 | 2020-04-30T19:13:08.000Z | 2021-04-14T19:40:47.000Z | applications/FluidDynamicsApplication/python_scripts/check_and_prepare_model_process_fluid.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 1 | 2020-04-30T19:19:09.000Z | 2020-05-02T14:22:36.000Z | applications/FluidDynamicsApplication/python_scripts/check_and_prepare_model_process_fluid.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 1 | 2020-06-12T08:51:24.000Z | 2020-06-12T08:51:24.000Z | import KratosMultiphysics
import KratosMultiphysics.FluidDynamicsApplication
def Factory(settings, Model):
if(type(settings) != KratosMultiphysics.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return CheckAndPrepareModelProcess(Model, settings["Parameters"])
## All the processes python should be derived from "Process"
class CheckAndPrepareModelProcess(KratosMultiphysics.Process):
def __init__(self, main_model_part, Parameters ):
self.main_model_part = main_model_part
default_parameters = KratosMultiphysics.Parameters(r'''{
"volume_model_part_name" : "",
"skin_parts" : [],
"assign_neighbour_elements_to_conditions" : false
}''')
Parameters.ValidateAndAssignDefaults(default_parameters)
if Parameters["volume_model_part_name"].GetString() == "":
raise Exception("Please define the \"volume_model_part_name\" (string) argument.")
self.volume_model_part_name = Parameters["volume_model_part_name"].GetString()
self.skin_name_list = Parameters["skin_parts"]
self.assign_neighbour_elements = Parameters["assign_neighbour_elements_to_conditions"].GetBool()
#self.volume_model_part_name = Parameters["volume_model_part_name"].GetString()
#self.list_of_inlets = Parameters["list_of_inlets"]
#self.list_of_slip = Parameters["list_of_inlets"]
#self.list_of_inlets = Parameters["list_of_inlets"]
def Execute(self):
self.volume_model_part = self.main_model_part.GetSubModelPart(self.volume_model_part_name)
skin_parts = []
for i in range(self.skin_name_list.size()):
skin_parts.append(self.main_model_part.GetSubModelPart(self.skin_name_list[i].GetString()))
#construct a model part which contains both the skin and the volume
#temporarily we call it "fluid_computational_model_part"
self.main_model_part.CreateSubModelPart("fluid_computational_model_part")
fluid_computational_model_part= self.main_model_part.GetSubModelPart("fluid_computational_model_part")
fluid_computational_model_part.ProcessInfo = self.main_model_part.ProcessInfo
for node in self.volume_model_part.Nodes:
fluid_computational_model_part.AddNode(node,0)
for elem in self.volume_model_part.Elements:
fluid_computational_model_part.AddElement(elem,0)
#do some gymnastics to have this done fast. - create an ordered list to be added
list_of_ids = set()
for part in skin_parts:
for cond in part.Conditions:
list_of_ids.add(cond.Id)
fluid_computational_model_part.AddConditions(list(list_of_ids))
#verify the orientation of the skin
tmoc = KratosMultiphysics.TetrahedralMeshOrientationCheck
throw_errors = False
flags = tmoc.NOT_COMPUTE_NODAL_NORMALS | tmoc.NOT_COMPUTE_CONDITION_NORMALS
if self.assign_neighbour_elements:
flags |= tmoc.ASSIGN_NEIGHBOUR_ELEMENTS_TO_CONDITIONS
else:
flags |= tmoc.NOT_ASSIGN_NEIGHBOUR_ELEMENTS_TO_CONDITIONS
KratosMultiphysics.TetrahedralMeshOrientationCheck(fluid_computational_model_part,throw_errors, flags).Execute()
| 46.069444 | 120 | 0.730479 |
7d816d76a54e59cd05a1faf61e243701b9c832a6 | 81,227 | py | Python | venv/lib/python3.6/site-packages/tensorflow_core/compiler/tf2xla/ops/gen_xla_ops.py | databill86/HyperFoods | 9267937c8c70fd84017c0f153c241d2686a356dd | [
"MIT"
] | 2 | 2020-09-30T00:11:09.000Z | 2021-10-04T13:00:38.000Z | venv/lib/python3.6/site-packages/tensorflow_core/compiler/tf2xla/ops/gen_xla_ops.py | databill86/HyperFoods | 9267937c8c70fd84017c0f153c241d2686a356dd | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/tensorflow_core/compiler/tf2xla/ops/gen_xla_ops.py | databill86/HyperFoods | 9267937c8c70fd84017c0f153c241d2686a356dd | [
"MIT"
] | null | null | null | """Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: gen_xla_ops.cc
"""
import collections
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
_XlaBroadcastHelperOutput = collections.namedtuple(
"XlaBroadcastHelper",
["lhs_output", "rhs_output"])
@_dispatch.add_dispatch_list
@tf_export('xla_broadcast_helper')
def xla_broadcast_helper(lhs, rhs, broadcast_dims, name=None):
r"""Helper operator for performing XLA-style broadcasts
Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to
whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules
for binary operators.
Args:
lhs: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
the LHS input tensor
rhs: A `Tensor`. Must have the same type as `lhs`. the RHS input tensor
broadcast_dims: A `Tensor`. Must be one of the following types: `int32`, `int64`.
an XLA-style broadcast dimension specification
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (lhs_output, rhs_output).
lhs_output: A `Tensor`. Has the same type as `lhs`. the broadcasted LHS tensor
rhs_output: A `Tensor`. Has the same type as `lhs`. the broadcasted RHS tensor
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaBroadcastHelper", name,
tld.op_callbacks, lhs, rhs, broadcast_dims)
_result = _XlaBroadcastHelperOutput._make(_result)
return _result
except _core._FallbackException:
try:
return xla_broadcast_helper_eager_fallback(
lhs, rhs, broadcast_dims, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_broadcast_helper, lhs=lhs, rhs=rhs,
broadcast_dims=broadcast_dims, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaBroadcastHelper", lhs=lhs, rhs=rhs, broadcast_dims=broadcast_dims,
name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_broadcast_helper, lhs=lhs, rhs=rhs,
broadcast_dims=broadcast_dims, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Tindices",
_op._get_attr_type("Tindices"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaBroadcastHelper", _inputs_flat, _attrs, _result)
_result = _XlaBroadcastHelperOutput._make(_result)
return _result
XlaBroadcastHelper = tf_export("raw_ops.XlaBroadcastHelper")(_ops.to_raw_op(xla_broadcast_helper))
def xla_broadcast_helper_eager_fallback(lhs, rhs, broadcast_dims, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([lhs, rhs], ctx)
(lhs, rhs) = _inputs_T
_attr_Tindices, (broadcast_dims,) = _execute.args_to_matching_eager([broadcast_dims], ctx)
_inputs_flat = [lhs, rhs, broadcast_dims]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"XlaBroadcastHelper", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaBroadcastHelper", _inputs_flat, _attrs, _result)
_result = _XlaBroadcastHelperOutput._make(_result)
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_conv')
def xla_conv(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count, dimension_numbers, precision_config, name=None):
r"""Wraps the XLA ConvGeneralDilated operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
.
Args:
lhs: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
the input tensor
rhs: A `Tensor`. Must have the same type as `lhs`. the kernel tensor
window_strides: A `Tensor`. Must be one of the following types: `int32`, `int64`.
the inter-window strides
padding: A `Tensor`. Must have the same type as `window_strides`.
the padding to apply at the start and end of each input dimensions
lhs_dilation: A `Tensor`. Must have the same type as `window_strides`.
dilation to apply between input elements
rhs_dilation: A `Tensor`. Must have the same type as `window_strides`.
dilation to apply between kernel elements
feature_group_count: A `Tensor`. Must have the same type as `window_strides`.
number of feature groups for grouped convolution.
dimension_numbers: A `string`.
a serialized xla::ConvolutionDimensionNumbers proto.
precision_config: A `string`. a serialized xla::PrecisionConfig proto.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `lhs`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaConv", name,
tld.op_callbacks, lhs, rhs, window_strides, padding, lhs_dilation,
rhs_dilation, feature_group_count, "dimension_numbers",
dimension_numbers, "precision_config", precision_config)
return _result
except _core._FallbackException:
try:
return xla_conv_eager_fallback(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
feature_group_count, dimension_numbers=dimension_numbers,
precision_config=precision_config, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_conv, lhs=lhs, rhs=rhs, window_strides=window_strides,
padding=padding, lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
feature_group_count=feature_group_count,
dimension_numbers=dimension_numbers,
precision_config=precision_config, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers")
precision_config = _execute.make_str(precision_config, "precision_config")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaConv", lhs=lhs, rhs=rhs, window_strides=window_strides,
padding=padding, lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
feature_group_count=feature_group_count,
dimension_numbers=dimension_numbers,
precision_config=precision_config, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_conv, lhs=lhs, rhs=rhs, window_strides=window_strides,
padding=padding, lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
feature_group_count=feature_group_count,
dimension_numbers=dimension_numbers,
precision_config=precision_config, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Tindices",
_op._get_attr_type("Tindices"), "dimension_numbers",
_op.get_attr("dimension_numbers"), "precision_config",
_op.get_attr("precision_config"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaConv", _inputs_flat, _attrs, _result)
_result, = _result
return _result
XlaConv = tf_export("raw_ops.XlaConv")(_ops.to_raw_op(xla_conv))
def xla_conv_eager_fallback(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count, dimension_numbers, precision_config, name, ctx):
dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers")
precision_config = _execute.make_str(precision_config, "precision_config")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lhs, rhs], ctx)
(lhs, rhs) = _inputs_T
_attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count], ctx)
(window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count) = _inputs_Tindices
_inputs_flat = [lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "dimension_numbers",
dimension_numbers, "precision_config", precision_config)
_result = _execute.execute(b"XlaConv", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaConv", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_dequantize')
def xla_dequantize(input, min_range, max_range, mode, transpose_output, name=None):
r"""Takes the packed uint32 input and unpacks the input to uint8 to do
Dequantization on deivce.
Args:
input: A `Tensor` of type `uint32`.
Input tensors whose types is uint32, shape is [d0, ..., dn].
min_range: A `float`.
The minimum scalar value possibly produced for the input.
max_range: A `float`.
The maximum scalar value possibly produced for the input.
mode: A `string`.
String to determine the dequantize mode in {"MIN_COMBINED", "MIN_FIRST", "SCALED"}.
transpose_output: A `bool`.
Boolean to determine if output is transposed. transpose_output
is faster when input is large and rank of input is higher than 1.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bfloat16`.
Output tensors whose types is bloat16. If transpose_output is true,
output shape is [dn * 4, dn-1, ..., d1, d0]. If transpose_output
is false, output shape is [d0,..., dn * 4].
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaDequantize", name,
tld.op_callbacks, input, "min_range", min_range, "max_range",
max_range, "mode", mode, "transpose_output", transpose_output)
return _result
except _core._FallbackException:
try:
return xla_dequantize_eager_fallback(
input, min_range=min_range, max_range=max_range, mode=mode,
transpose_output=transpose_output, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_dequantize, input=input, min_range=min_range,
max_range=max_range, mode=mode,
transpose_output=transpose_output, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
min_range = _execute.make_float(min_range, "min_range")
max_range = _execute.make_float(max_range, "max_range")
mode = _execute.make_str(mode, "mode")
transpose_output = _execute.make_bool(transpose_output, "transpose_output")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaDequantize", input=input, min_range=min_range,
max_range=max_range, mode=mode,
transpose_output=transpose_output, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_dequantize, input=input, min_range=min_range,
max_range=max_range, mode=mode,
transpose_output=transpose_output, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("min_range", _op.get_attr("min_range"), "max_range",
_op.get_attr("max_range"), "mode", _op.get_attr("mode"),
"transpose_output", _op._get_attr_bool("transpose_output"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaDequantize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
XlaDequantize = tf_export("raw_ops.XlaDequantize")(_ops.to_raw_op(xla_dequantize))
def xla_dequantize_eager_fallback(input, min_range, max_range, mode, transpose_output, name, ctx):
min_range = _execute.make_float(min_range, "min_range")
max_range = _execute.make_float(max_range, "max_range")
mode = _execute.make_str(mode, "mode")
transpose_output = _execute.make_bool(transpose_output, "transpose_output")
input = _ops.convert_to_tensor(input, _dtypes.uint32)
_inputs_flat = [input]
_attrs = ("min_range", min_range, "max_range", max_range, "mode", mode,
"transpose_output", transpose_output)
_result = _execute.execute(b"XlaDequantize", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaDequantize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_dot')
def xla_dot(lhs, rhs, dimension_numbers, precision_config, name=None):
r"""Wraps the XLA DotGeneral operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
.
Args:
lhs: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
the LHS tensor
rhs: A `Tensor`. Must have the same type as `lhs`. the RHS tensor
dimension_numbers: A `string`.
a serialized xla::DotDimensionNumbers proto.
precision_config: A `string`. a serialized xla::PrecisionConfig proto.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `lhs`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaDot", name,
tld.op_callbacks, lhs, rhs, "dimension_numbers", dimension_numbers,
"precision_config", precision_config)
return _result
except _core._FallbackException:
try:
return xla_dot_eager_fallback(
lhs, rhs, dimension_numbers=dimension_numbers,
precision_config=precision_config, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_dot, lhs=lhs, rhs=rhs, dimension_numbers=dimension_numbers,
precision_config=precision_config, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers")
precision_config = _execute.make_str(precision_config, "precision_config")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaDot", lhs=lhs, rhs=rhs, dimension_numbers=dimension_numbers,
precision_config=precision_config, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_dot, lhs=lhs, rhs=rhs, dimension_numbers=dimension_numbers,
precision_config=precision_config, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "dimension_numbers",
_op.get_attr("dimension_numbers"), "precision_config",
_op.get_attr("precision_config"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaDot", _inputs_flat, _attrs, _result)
_result, = _result
return _result
XlaDot = tf_export("raw_ops.XlaDot")(_ops.to_raw_op(xla_dot))
def xla_dot_eager_fallback(lhs, rhs, dimension_numbers, precision_config, name, ctx):
dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers")
precision_config = _execute.make_str(precision_config, "precision_config")
_attr_T, _inputs_T = _execute.args_to_matching_eager([lhs, rhs], ctx)
(lhs, rhs) = _inputs_T
_inputs_flat = [lhs, rhs]
_attrs = ("T", _attr_T, "dimension_numbers", dimension_numbers,
"precision_config", precision_config)
_result = _execute.execute(b"XlaDot", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaDot", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_dynamic_slice')
def xla_dynamic_slice(input, start_indices, size_indices, name=None):
r"""Wraps the XLA DynamicSlice operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice
.
DynamicSlice extracts a sub-array from the input array at dynamic
start_indices. The size of the slice in each dimension is passed in
size_indices, which specify the end point of exclusive slice intervals in each
dimension -- [start, start + size). The shape of start_indices must have rank 1,
with dimension size equal to the rank of operand.
Args:
input: A `Tensor`. A `Tensor` of type T.
start_indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
List of N integers containing the slice size for each
dimension. Each value must be strictly greater than zero, and start + size
must be less than or equal to the size of the dimension to avoid
implementation defined behavior.
size_indices: A `Tensor`. Must have the same type as `start_indices`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaDynamicSlice", name,
tld.op_callbacks, input, start_indices, size_indices)
return _result
except _core._FallbackException:
try:
return xla_dynamic_slice_eager_fallback(
input, start_indices, size_indices, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_dynamic_slice, input=input, start_indices=start_indices,
size_indices=size_indices, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaDynamicSlice", input=input, start_indices=start_indices,
size_indices=size_indices, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_dynamic_slice, input=input, start_indices=start_indices,
size_indices=size_indices, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Tindices",
_op._get_attr_type("Tindices"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaDynamicSlice", _inputs_flat, _attrs, _result)
_result, = _result
return _result
XlaDynamicSlice = tf_export("raw_ops.XlaDynamicSlice")(_ops.to_raw_op(xla_dynamic_slice))
def xla_dynamic_slice_eager_fallback(input, start_indices, size_indices, name, ctx):
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
_attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([start_indices, size_indices], ctx)
(start_indices, size_indices) = _inputs_Tindices
_inputs_flat = [input, start_indices, size_indices]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"XlaDynamicSlice", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaDynamicSlice", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_dynamic_update_slice')
def xla_dynamic_update_slice(input, update, indices, name=None):
r"""Wraps the XLA DynamicUpdateSlice operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice
.
XlaDynamicUpdateSlice generates a result which is the value of the `input`
operand, with a slice update overwritten at `indices`. The shape of `update`
determines the shape of the sub-array of the result which is updated. The shape
of indices must be rank == 1, with dimension size equal to the rank of `input`.
Handling of out-of-bounds slice indices is implementation-defined.
Args:
input: A `Tensor`. A `Tensor` of type T.
update: A `Tensor`. Must have the same type as `input`.
A `Tensor` of type T. Same rank as `input`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A vector of indices into `input`. Must have length equal to the rank of
`input`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`. A `Tensor` of type T.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaDynamicUpdateSlice", name,
tld.op_callbacks, input, update, indices)
return _result
except _core._FallbackException:
try:
return xla_dynamic_update_slice_eager_fallback(
input, update, indices, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_dynamic_update_slice, input=input, update=update,
indices=indices, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaDynamicUpdateSlice", input=input, update=update, indices=indices,
name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_dynamic_update_slice, input=input, update=update,
indices=indices, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Tindices",
_op._get_attr_type("Tindices"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaDynamicUpdateSlice", _inputs_flat, _attrs, _result)
_result, = _result
return _result
XlaDynamicUpdateSlice = tf_export("raw_ops.XlaDynamicUpdateSlice")(_ops.to_raw_op(xla_dynamic_update_slice))
def xla_dynamic_update_slice_eager_fallback(input, update, indices, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, update], ctx)
(input, update) = _inputs_T
_attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx)
_inputs_flat = [input, update, indices]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"XlaDynamicUpdateSlice", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaDynamicUpdateSlice", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_einsum')
def xla_einsum(a, b, equation, name=None):
r"""An op which supports basic einsum op with 2 inputs and 1 output.
This op has better TPU performnce since it doesn't have explicitly reshape and
transpose operations as tf.einsum does.
Args:
a: A `Tensor`. Must be one of the following types: `complex64`, `bfloat16`, `float32`.
b: A `Tensor`. Must have the same type as `a`.
equation: A `string`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `a`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaEinsum", name,
tld.op_callbacks, a, b, "equation", equation)
return _result
except _core._FallbackException:
try:
return xla_einsum_eager_fallback(
a, b, equation=equation, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_einsum, a=a, b=b, equation=equation, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
equation = _execute.make_str(equation, "equation")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaEinsum", a=a, b=b, equation=equation, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_einsum, a=a, b=b, equation=equation, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("equation", _op.get_attr("equation"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaEinsum", _inputs_flat, _attrs, _result)
_result, = _result
return _result
XlaEinsum = tf_export("raw_ops.XlaEinsum")(_ops.to_raw_op(xla_einsum))
def xla_einsum_eager_fallback(a, b, equation, name, ctx):
equation = _execute.make_str(equation, "equation")
_attr_T, _inputs_T = _execute.args_to_matching_eager([a, b], ctx)
(a, b) = _inputs_T
_inputs_flat = [a, b]
_attrs = ("equation", equation, "T", _attr_T)
_result = _execute.execute(b"XlaEinsum", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaEinsum", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_if')
def xla_if(cond, inputs, then_branch, else_branch, Tout, name=None):
r"""output = cond ? then_branch(inputs) : else_branch(inputs).
Args:
cond: A `Tensor`. A boolean scalar.
inputs: A list of `Tensor` objects. A list of input tensors.
then_branch: A function decorated with @Defun.
A function takes 'inputs' and returns a list of tensors,
whose types are the same as what else_branch returns.
else_branch: A function decorated with @Defun.
A function takes 'inputs' and returns a list of tensors.
whose types are the same as what then_branch returns.
Tout: A list of `tf.DTypes`.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `Tout`.
A list of tensors returned by either then_branch(inputs) or
else_branch(inputs). The input shapes of the then_branch and
else_branch must match.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaIf", name,
tld.op_callbacks, cond, inputs, "then_branch", then_branch,
"else_branch", else_branch, "Tout", Tout)
return _result
except _core._FallbackException:
try:
return xla_if_eager_fallback(
cond, inputs, then_branch=then_branch, else_branch=else_branch,
Tout=Tout, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_if, cond=cond, inputs=inputs, then_branch=then_branch,
else_branch=else_branch, Tout=Tout, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if not isinstance(Tout, (list, tuple)):
raise TypeError(
"Expected list for 'Tout' argument to "
"'xla_if' Op, not %r." % Tout)
Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaIf", cond=cond, inputs=inputs, then_branch=then_branch,
else_branch=else_branch, Tout=Tout, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_if, cond=cond, inputs=inputs, then_branch=then_branch,
else_branch=else_branch, Tout=Tout, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if not _result:
return _op
if _execute.must_record_gradient():
_attrs = ("Tcond", _op._get_attr_type("Tcond"), "then_branch",
_op.get_attr("then_branch"), "else_branch",
_op.get_attr("else_branch"), "Tin", _op.get_attr("Tin"), "Tout",
_op.get_attr("Tout"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaIf", _inputs_flat, _attrs, _result)
return _result
XlaIf = tf_export("raw_ops.XlaIf")(_ops.to_raw_op(xla_if))
def xla_if_eager_fallback(cond, inputs, then_branch, else_branch, Tout, name, ctx):
if not isinstance(Tout, (list, tuple)):
raise TypeError(
"Expected list for 'Tout' argument to "
"'xla_if' Op, not %r." % Tout)
Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
_attr_Tcond, (cond,) = _execute.args_to_matching_eager([cond], ctx)
_attr_Tin, inputs = _execute.convert_to_mixed_eager_tensors(inputs, ctx)
_inputs_flat = [cond] + list(inputs)
_attrs = ("Tcond", _attr_Tcond, "then_branch", then_branch, "else_branch",
else_branch, "Tin", _attr_Tin, "Tout", Tout)
_result = _execute.execute(b"XlaIf", len(Tout), inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaIf", _inputs_flat, _attrs, _result)
return _result
_XlaKeyValueSortOutput = collections.namedtuple(
"XlaKeyValueSort",
["sorted_keys", "sorted_values"])
@_dispatch.add_dispatch_list
@tf_export('xla_key_value_sort')
def xla_key_value_sort(keys, values, name=None):
r"""Wraps the XLA Sort operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#sort
.
Sorts a tensor. Currently only sorts in ascending order are supported.
Args:
keys: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
A `Tensor` of type K.
values: A `Tensor`. A `Tensor` of type V.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (sorted_keys, sorted_values).
sorted_keys: A `Tensor`. Has the same type as `keys`. A `Tensor` of type K.
sorted_values: A `Tensor`. Has the same type as `values`. A `Tensor` of type V.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaKeyValueSort", name,
tld.op_callbacks, keys, values)
_result = _XlaKeyValueSortOutput._make(_result)
return _result
except _core._FallbackException:
try:
return xla_key_value_sort_eager_fallback(
keys, values, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_key_value_sort, keys=keys, values=values, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaKeyValueSort", keys=keys, values=values, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_key_value_sort, keys=keys, values=values, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("K", _op._get_attr_type("K"), "V", _op._get_attr_type("V"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaKeyValueSort", _inputs_flat, _attrs, _result)
_result = _XlaKeyValueSortOutput._make(_result)
return _result
XlaKeyValueSort = tf_export("raw_ops.XlaKeyValueSort")(_ops.to_raw_op(xla_key_value_sort))
def xla_key_value_sort_eager_fallback(keys, values, name, ctx):
_attr_K, (keys,) = _execute.args_to_matching_eager([keys], ctx)
_attr_V, (values,) = _execute.args_to_matching_eager([values], ctx)
_inputs_flat = [keys, values]
_attrs = ("K", _attr_K, "V", _attr_V)
_result = _execute.execute(b"XlaKeyValueSort", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaKeyValueSort", _inputs_flat, _attrs, _result)
_result = _XlaKeyValueSortOutput._make(_result)
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_pad')
def xla_pad(input, padding_value, padding_low, padding_high, padding_interior, name=None):
r"""Wraps the XLA Pad operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#pad
.
Args:
input: A `Tensor`. A `Tensor` of type T.
padding_value: A `Tensor`. Must have the same type as `input`.
A scalar `Tensor` of type T.
padding_low: A `Tensor`. Must be one of the following types: `int32`, `int64`.
the padding to apply at the start of each input dimensions
padding_high: A `Tensor`. Must have the same type as `padding_low`.
the padding to apply at the end of each input dimension.
padding_interior: A `Tensor`. Must have the same type as `padding_low`.
the padding to apply between each input element.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`. A `Tensor` of type T.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaPad", name,
tld.op_callbacks, input, padding_value, padding_low, padding_high,
padding_interior)
return _result
except _core._FallbackException:
try:
return xla_pad_eager_fallback(
input, padding_value, padding_low, padding_high, padding_interior,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_pad, input=input, padding_value=padding_value,
padding_low=padding_low, padding_high=padding_high,
padding_interior=padding_interior, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaPad", input=input, padding_value=padding_value,
padding_low=padding_low, padding_high=padding_high,
padding_interior=padding_interior, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_pad, input=input, padding_value=padding_value,
padding_low=padding_low, padding_high=padding_high,
padding_interior=padding_interior, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Tindices",
_op._get_attr_type("Tindices"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaPad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
XlaPad = tf_export("raw_ops.XlaPad")(_ops.to_raw_op(xla_pad))
def xla_pad_eager_fallback(input, padding_value, padding_low, padding_high, padding_interior, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, padding_value], ctx)
(input, padding_value) = _inputs_T
_attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([padding_low, padding_high, padding_interior], ctx)
(padding_low, padding_high, padding_interior) = _inputs_Tindices
_inputs_flat = [input, padding_value, padding_low, padding_high, padding_interior]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
_result = _execute.execute(b"XlaPad", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaPad", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_recv')
def xla_recv(dtype, tensor_name, shape, name=None):
r"""Receives the named tensor from another XLA computation. Wraps the XLA Recv
operator documented at
https://www.tensorflow.org/performance/xla/operation_semantics#recv .
Args:
dtype: A `tf.DType`. The type of the tensor.
tensor_name: A `string`. A string key that identifies the channel.
shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`. The tensor to receive.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaRecv", name,
tld.op_callbacks, "dtype", dtype, "tensor_name", tensor_name, "shape",
shape)
return _result
except _core._FallbackException:
try:
return xla_recv_eager_fallback(
dtype=dtype, tensor_name=tensor_name, shape=shape, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_recv, dtype=dtype, tensor_name=tensor_name, shape=shape,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
dtype = _execute.make_type(dtype, "dtype")
tensor_name = _execute.make_str(tensor_name, "tensor_name")
shape = _execute.make_shape(shape, "shape")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaRecv", dtype=dtype, tensor_name=tensor_name, shape=shape,
name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_recv, dtype=dtype, tensor_name=tensor_name, shape=shape,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("dtype", _op._get_attr_type("dtype"), "tensor_name",
_op.get_attr("tensor_name"), "shape", _op.get_attr("shape"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaRecv", _inputs_flat, _attrs, _result)
_result, = _result
return _result
XlaRecv = tf_export("raw_ops.XlaRecv")(_ops.to_raw_op(xla_recv))
def xla_recv_eager_fallback(dtype, tensor_name, shape, name, ctx):
dtype = _execute.make_type(dtype, "dtype")
tensor_name = _execute.make_str(tensor_name, "tensor_name")
shape = _execute.make_shape(shape, "shape")
_inputs_flat = []
_attrs = ("dtype", dtype, "tensor_name", tensor_name, "shape", shape)
_result = _execute.execute(b"XlaRecv", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaRecv", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_reduce')
def xla_reduce(input, init_value, dimensions_to_reduce, reducer, name=None):
r"""Wraps the XLA Reduce operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#reduce .
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
the input tensor
init_value: A `Tensor`. Must have the same type as `input`.
a scalar representing the initial value for the reduction
dimensions_to_reduce: A list of `ints`.
dimension numbers over which to reduce
reducer: A function decorated with @Defun. a reducer function to apply
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaReduce", name,
tld.op_callbacks, input, init_value, "dimensions_to_reduce",
dimensions_to_reduce, "reducer", reducer)
return _result
except _core._FallbackException:
try:
return xla_reduce_eager_fallback(
input, init_value, dimensions_to_reduce=dimensions_to_reduce,
reducer=reducer, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_reduce, input=input, init_value=init_value,
dimensions_to_reduce=dimensions_to_reduce,
reducer=reducer, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if not isinstance(dimensions_to_reduce, (list, tuple)):
raise TypeError(
"Expected list for 'dimensions_to_reduce' argument to "
"'xla_reduce' Op, not %r." % dimensions_to_reduce)
dimensions_to_reduce = [_execute.make_int(_i, "dimensions_to_reduce") for _i in dimensions_to_reduce]
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaReduce", input=input, init_value=init_value,
dimensions_to_reduce=dimensions_to_reduce,
reducer=reducer, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_reduce, input=input, init_value=init_value,
dimensions_to_reduce=dimensions_to_reduce,
reducer=reducer, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "dimensions_to_reduce",
_op.get_attr("dimensions_to_reduce"), "reducer",
_op.get_attr("reducer"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaReduce", _inputs_flat, _attrs, _result)
_result, = _result
return _result
XlaReduce = tf_export("raw_ops.XlaReduce")(_ops.to_raw_op(xla_reduce))
def xla_reduce_eager_fallback(input, init_value, dimensions_to_reduce, reducer, name, ctx):
if not isinstance(dimensions_to_reduce, (list, tuple)):
raise TypeError(
"Expected list for 'dimensions_to_reduce' argument to "
"'xla_reduce' Op, not %r." % dimensions_to_reduce)
dimensions_to_reduce = [_execute.make_int(_i, "dimensions_to_reduce") for _i in dimensions_to_reduce]
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, init_value], ctx)
(input, init_value) = _inputs_T
_inputs_flat = [input, init_value]
_attrs = ("T", _attr_T, "dimensions_to_reduce", dimensions_to_reduce,
"reducer", reducer)
_result = _execute.execute(b"XlaReduce", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaReduce", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_reduce_window')
def xla_reduce_window(input, init_value, window_dimensions, window_strides, base_dilations, window_dilations, padding, computation, name=None):
r"""Wraps the XLA ReduceWindow operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow .
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
the input tensor
init_value: A `Tensor`. Must have the same type as `input`.
a scalar representing the initial value for the reduction
window_dimensions: A `Tensor`. Must be one of the following types: `int32`, `int64`.
the shape of the window
window_strides: A `Tensor`. Must have the same type as `window_dimensions`.
the inter-window strides
base_dilations: A `Tensor`. Must have the same type as `window_dimensions`.
window_dilations: A `Tensor`. Must have the same type as `window_dimensions`.
padding: A `Tensor`. Must have the same type as `window_dimensions`.
the padding to apply at the start and end of each input dimensions
computation: A function decorated with @Defun. a reducer function to apply
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaReduceWindow", name,
tld.op_callbacks, input, init_value, window_dimensions,
window_strides, base_dilations, window_dilations, padding,
"computation", computation)
return _result
except _core._FallbackException:
try:
return xla_reduce_window_eager_fallback(
input, init_value, window_dimensions, window_strides,
base_dilations, window_dilations, padding,
computation=computation, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_reduce_window, input=input, init_value=init_value,
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=base_dilations,
window_dilations=window_dilations,
padding=padding, computation=computation,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaReduceWindow", input=input, init_value=init_value,
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=base_dilations,
window_dilations=window_dilations, padding=padding,
computation=computation, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_reduce_window, input=input, init_value=init_value,
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=base_dilations,
window_dilations=window_dilations,
padding=padding, computation=computation,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Tindices",
_op._get_attr_type("Tindices"), "computation",
_op.get_attr("computation"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaReduceWindow", _inputs_flat, _attrs, _result)
_result, = _result
return _result
XlaReduceWindow = tf_export("raw_ops.XlaReduceWindow")(_ops.to_raw_op(xla_reduce_window))
def xla_reduce_window_eager_fallback(input, init_value, window_dimensions, window_strides, base_dilations, window_dilations, padding, computation, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([input, init_value], ctx)
(input, init_value) = _inputs_T
_attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([window_dimensions, window_strides, base_dilations, window_dilations, padding], ctx)
(window_dimensions, window_strides, base_dilations, window_dilations, padding) = _inputs_Tindices
_inputs_flat = [input, init_value, window_dimensions, window_strides, base_dilations, window_dilations, padding]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "computation",
computation)
_result = _execute.execute(b"XlaReduceWindow", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaReduceWindow", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_replica_id')
def xla_replica_id(name=None):
r"""Replica ID.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaReplicaId", name,
tld.op_callbacks)
return _result
except _core._FallbackException:
try:
return xla_replica_id_eager_fallback(
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_replica_id, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaReplicaId", name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_replica_id, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ()
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaReplicaId", _inputs_flat, _attrs, _result)
_result, = _result
return _result
XlaReplicaId = tf_export("raw_ops.XlaReplicaId")(_ops.to_raw_op(xla_replica_id))
def xla_replica_id_eager_fallback(name, ctx):
_inputs_flat = []
_attrs = None
_result = _execute.execute(b"XlaReplicaId", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaReplicaId", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_select_and_scatter')
def xla_select_and_scatter(operand, window_dimensions, window_strides, padding, source, init_value, select, scatter, name=None):
r"""Wraps the XLA SelectAndScatter operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#selectandscatter
.
Args:
operand: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
the input tensor
window_dimensions: A `Tensor`. Must be one of the following types: `int32`, `int64`.
the shape of the window
window_strides: A `Tensor`. Must have the same type as `window_dimensions`.
the inter-window strides
padding: A `Tensor`. Must have the same type as `window_dimensions`.
the padding to apply at the start and end of each input dimensions
source: A `Tensor`. Must have the same type as `operand`.
a tensor of values to scatter
init_value: A `Tensor`. Must have the same type as `operand`.
a scalar representing the initial value for the output tensor
select: A function decorated with @Defun. a selection function to apply
scatter: A function decorated with @Defun. a scatter function to apply
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `operand`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaSelectAndScatter", name,
tld.op_callbacks, operand, window_dimensions, window_strides, padding,
source, init_value, "select", select, "scatter", scatter)
return _result
except _core._FallbackException:
try:
return xla_select_and_scatter_eager_fallback(
operand, window_dimensions, window_strides, padding, source,
init_value, select=select, scatter=scatter, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_select_and_scatter, operand=operand,
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding, source=source,
init_value=init_value, select=select,
scatter=scatter, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaSelectAndScatter", operand=operand,
window_dimensions=window_dimensions,
window_strides=window_strides, padding=padding,
source=source, init_value=init_value,
select=select, scatter=scatter, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_select_and_scatter, operand=operand,
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding, source=source,
init_value=init_value, select=select,
scatter=scatter, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Tindices",
_op._get_attr_type("Tindices"), "select",
_op.get_attr("select"), "scatter", _op.get_attr("scatter"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaSelectAndScatter", _inputs_flat, _attrs, _result)
_result, = _result
return _result
XlaSelectAndScatter = tf_export("raw_ops.XlaSelectAndScatter")(_ops.to_raw_op(xla_select_and_scatter))
def xla_select_and_scatter_eager_fallback(operand, window_dimensions, window_strides, padding, source, init_value, select, scatter, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([operand, source, init_value], ctx)
(operand, source, init_value) = _inputs_T
_attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([window_dimensions, window_strides, padding], ctx)
(window_dimensions, window_strides, padding) = _inputs_Tindices
_inputs_flat = [operand, window_dimensions, window_strides, padding, source, init_value]
_attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "select", select,
"scatter", scatter)
_result = _execute.execute(b"XlaSelectAndScatter", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaSelectAndScatter", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_XlaSelfAdjointEigOutput = collections.namedtuple(
"XlaSelfAdjointEig",
["w", "v"])
@_dispatch.add_dispatch_list
@tf_export('xla_self_adjoint_eig')
def xla_self_adjoint_eig(a, lower, max_iter, epsilon, name=None):
r"""Computes the eigen decomposition of a batch of self-adjoint matrices
(Note: Only real inputs are supported).
Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in
tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], for
i=0...N-1.
Args:
a: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
the input tensor.
lower: A `bool`.
a boolean specifies whether the calculation is done with the lower
triangular part or the upper triangular part.
max_iter: An `int`.
maximum number of sweep update, i.e., the whole lower triangular
part or upper triangular part based on parameter lower. Heuristically, it has
been argued that approximatly logN sweeps are needed in practice (Ref: Golub &
van Loan "Matrix Computation").
epsilon: A `float`. the tolerance ratio.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (w, v).
w: A `Tensor`. Has the same type as `a`. The eigenvalues in ascending order, each repeated according to its
multiplicity.
v: A `Tensor`. Has the same type as `a`. The column v[..., :, i] is the normalized eigenvector corresponding to the
eigenvalue w[..., i].
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaSelfAdjointEig", name,
tld.op_callbacks, a, "lower", lower, "max_iter", max_iter, "epsilon",
epsilon)
_result = _XlaSelfAdjointEigOutput._make(_result)
return _result
except _core._FallbackException:
try:
return xla_self_adjoint_eig_eager_fallback(
a, lower=lower, max_iter=max_iter, epsilon=epsilon, name=name,
ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_self_adjoint_eig, a=a, lower=lower, max_iter=max_iter,
epsilon=epsilon, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
lower = _execute.make_bool(lower, "lower")
max_iter = _execute.make_int(max_iter, "max_iter")
epsilon = _execute.make_float(epsilon, "epsilon")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaSelfAdjointEig", a=a, lower=lower, max_iter=max_iter,
epsilon=epsilon, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_self_adjoint_eig, a=a, lower=lower, max_iter=max_iter,
epsilon=epsilon, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("lower", _op._get_attr_bool("lower"), "max_iter",
_op._get_attr_int("max_iter"), "epsilon",
_op.get_attr("epsilon"), "T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaSelfAdjointEig", _inputs_flat, _attrs, _result)
_result = _XlaSelfAdjointEigOutput._make(_result)
return _result
XlaSelfAdjointEig = tf_export("raw_ops.XlaSelfAdjointEig")(_ops.to_raw_op(xla_self_adjoint_eig))
def xla_self_adjoint_eig_eager_fallback(a, lower, max_iter, epsilon, name, ctx):
lower = _execute.make_bool(lower, "lower")
max_iter = _execute.make_int(max_iter, "max_iter")
epsilon = _execute.make_float(epsilon, "epsilon")
_attr_T, (a,) = _execute.args_to_matching_eager([a], ctx)
_inputs_flat = [a]
_attrs = ("lower", lower, "max_iter", max_iter, "epsilon", epsilon, "T",
_attr_T)
_result = _execute.execute(b"XlaSelfAdjointEig", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaSelfAdjointEig", _inputs_flat, _attrs, _result)
_result = _XlaSelfAdjointEigOutput._make(_result)
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_send')
def xla_send(tensor, tensor_name, name=None):
r"""Sends the named tensor to another XLA computation. Wraps the XLA Send operator
documented at
https://www.tensorflow.org/performance/xla/operation_semantics#send .
Args:
tensor: A `Tensor`. The tensor to send.
tensor_name: A `string`. A string key that identifies the channel.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaSend", name,
tld.op_callbacks, tensor, "tensor_name", tensor_name)
return _result
except _core._FallbackException:
try:
return xla_send_eager_fallback(
tensor, tensor_name=tensor_name, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_send, tensor=tensor, tensor_name=tensor_name, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
tensor_name = _execute.make_str(tensor_name, "tensor_name")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaSend", tensor=tensor, tensor_name=tensor_name, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_send, tensor=tensor, tensor_name=tensor_name, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
return _op
XlaSend = tf_export("raw_ops.XlaSend")(_ops.to_raw_op(xla_send))
def xla_send_eager_fallback(tensor, tensor_name, name, ctx):
tensor_name = _execute.make_str(tensor_name, "tensor_name")
_attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], ctx)
_inputs_flat = [tensor]
_attrs = ("T", _attr_T, "tensor_name", tensor_name)
_result = _execute.execute(b"XlaSend", 0, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
_result = None
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_sharding')
def xla_sharding(input, name=None):
r"""An op which shards the input based on the given sharding attribute.
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaSharding", name,
tld.op_callbacks, input)
return _result
except _core._FallbackException:
try:
return xla_sharding_eager_fallback(
input, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_sharding, input=input, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaSharding", input=input, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_sharding, input=input, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaSharding", _inputs_flat, _attrs, _result)
_result, = _result
return _result
XlaSharding = tf_export("raw_ops.XlaSharding")(_ops.to_raw_op(xla_sharding))
def xla_sharding_eager_fallback(input, name, ctx):
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"XlaSharding", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaSharding", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_sort')
def xla_sort(input, name=None):
r"""Wraps the XLA Sort operator, documented at
https://www.tensorflow.org/performance/xla/operation_semantics#sort
.
Sorts a tensor. Currently only sorts in ascending order are supported.
Args:
input: A `Tensor`. A `Tensor` of type T.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`. A `Tensor` of type T.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaSort", name,
tld.op_callbacks, input)
return _result
except _core._FallbackException:
try:
return xla_sort_eager_fallback(
input, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_sort, input=input, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaSort", input=input, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_sort, input=input, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaSort", _inputs_flat, _attrs, _result)
_result, = _result
return _result
XlaSort = tf_export("raw_ops.XlaSort")(_ops.to_raw_op(xla_sort))
def xla_sort_eager_fallback(input, name, ctx):
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"XlaSort", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaSort", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_XlaSvdOutput = collections.namedtuple(
"XlaSvd",
["s", "u", "v"])
@_dispatch.add_dispatch_list
@tf_export('xla_svd')
def xla_svd(a, max_iter, epsilon, precision_config, name=None):
r"""Computes the eigen decomposition of a batch of self-adjoint matrices
(Note: Only real inputs are supported).
Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in
tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * Transpose(v[...,:,:]).
Args:
a: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
the input tensor.
max_iter: An `int`.
maximum number of sweep update, i.e., the whole lower triangular
part or upper triangular part based on parameter lower. Heuristically, it has
been argued that approximatly log(min (M, N)) sweeps are needed in practice
(Ref: Golub & van Loan "Matrix Computation").
epsilon: A `float`. the tolerance ratio.
precision_config: A `string`. a serialized xla::PrecisionConfig proto.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (s, u, v).
s: A `Tensor`. Has the same type as `a`. Singular values. The values are sorted in reverse order of magnitude, so
s[..., 0] is the largest value, s[..., 1] is the second largest, etc.
u: A `Tensor`. Has the same type as `a`. Left singular vectors.
v: A `Tensor`. Has the same type as `a`. Right singular vectors.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaSvd", name,
tld.op_callbacks, a, "max_iter", max_iter, "epsilon", epsilon,
"precision_config", precision_config)
_result = _XlaSvdOutput._make(_result)
return _result
except _core._FallbackException:
try:
return xla_svd_eager_fallback(
a, max_iter=max_iter, epsilon=epsilon,
precision_config=precision_config, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_svd, a=a, max_iter=max_iter, epsilon=epsilon,
precision_config=precision_config, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
max_iter = _execute.make_int(max_iter, "max_iter")
epsilon = _execute.make_float(epsilon, "epsilon")
precision_config = _execute.make_str(precision_config, "precision_config")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaSvd", a=a, max_iter=max_iter, epsilon=epsilon,
precision_config=precision_config, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_svd, a=a, max_iter=max_iter, epsilon=epsilon,
precision_config=precision_config, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("max_iter", _op._get_attr_int("max_iter"), "epsilon",
_op.get_attr("epsilon"), "precision_config",
_op.get_attr("precision_config"), "T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaSvd", _inputs_flat, _attrs, _result)
_result = _XlaSvdOutput._make(_result)
return _result
XlaSvd = tf_export("raw_ops.XlaSvd")(_ops.to_raw_op(xla_svd))
def xla_svd_eager_fallback(a, max_iter, epsilon, precision_config, name, ctx):
max_iter = _execute.make_int(max_iter, "max_iter")
epsilon = _execute.make_float(epsilon, "epsilon")
precision_config = _execute.make_str(precision_config, "precision_config")
_attr_T, (a,) = _execute.args_to_matching_eager([a], ctx)
_inputs_flat = [a]
_attrs = ("max_iter", max_iter, "epsilon", epsilon, "precision_config",
precision_config, "T", _attr_T)
_result = _execute.execute(b"XlaSvd", 3, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaSvd", _inputs_flat, _attrs, _result)
_result = _XlaSvdOutput._make(_result)
return _result
@_dispatch.add_dispatch_list
@tf_export('xla_while')
def xla_while(input, cond, body, name=None):
r"""output = input; While (Cond(output)) { output = Body(output) }
Args:
input: A list of `Tensor` objects.
A list of input tensors whose types are T.
cond: A function decorated with @Defun.
A function takes 'input' and returns a tensor. If the tensor is
a scalar of non-boolean, the scalar is converted to a boolean
according to the following rule: if the scalar is a numerical
value, non-zero means True and zero means False; if the scalar is
a string, non-empty means True and empty means False. If the
tensor is not a scalar, non-emptiness means True and False
otherwise.
body: A function decorated with @Defun.
A function that takes a list of tensors and returns another
list of tensors. Both lists have the same types as specified by T.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects. Has the same type as `input`.
A list of output tensors whose types are T.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "XlaWhile", name,
tld.op_callbacks, input, "cond", cond, "body", body)
return _result
except _core._FallbackException:
try:
return xla_while_eager_fallback(
input, cond=cond, body=body, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_while, input=input, cond=cond, body=body, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"XlaWhile", input=input, cond=cond, body=body, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
xla_while, input=input, cond=cond, body=body, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if not _result:
return _op
if _execute.must_record_gradient():
_attrs = ("T", _op.get_attr("T"), "cond", _op.get_attr("cond"), "body",
_op.get_attr("body"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"XlaWhile", _inputs_flat, _attrs, _result)
return _result
XlaWhile = tf_export("raw_ops.XlaWhile")(_ops.to_raw_op(xla_while))
def xla_while_eager_fallback(input, cond, body, name, ctx):
_attr_T, input = _execute.convert_to_mixed_eager_tensors(input, ctx)
_inputs_flat = list(input)
_attrs = ("T", _attr_T, "cond", cond, "body", body)
_result = _execute.execute(b"XlaWhile", len(input), inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"XlaWhile", _inputs_flat, _attrs, _result)
return _result
| 42.064733 | 232 | 0.689487 |
7eb6fbe8096107c9f6a8116bfdeadb94cf5f1c75 | 855 | py | Python | Database/SQLIte/14.ADD/U1.py | sarincr/Business-analytics-Course-with-Python- | 10e577fdb3cf90bb87c97cd23ee3ecd6a083bfc4 | [
"MIT"
] | 3 | 2022-01-18T05:35:52.000Z | 2022-03-25T06:13:54.000Z | Database/SQLIte/14.ADD/U1.py | sarincr/Business-analytics-Course-with-Python- | 10e577fdb3cf90bb87c97cd23ee3ecd6a083bfc4 | [
"MIT"
] | null | null | null | Database/SQLIte/14.ADD/U1.py | sarincr/Business-analytics-Course-with-Python- | 10e577fdb3cf90bb87c97cd23ee3ecd6a083bfc4 | [
"MIT"
] | 2 | 2022-01-17T08:23:59.000Z | 2022-01-17T08:28:18.000Z | import sqlite3
X = sqlite3.connect('NeDB.db')
Y = X.cursor()
Y.execute('''CREATE TABLE IF NOT EXISTS EMPLOYEE (
ID integer,
Name text NOT NULL,
Date_Join text,
Place text,
Age integer,
Salary real);''')
Y.execute("INSERT INTO Employee VALUES (1,'John','2020-03-01','Kerala',32,25000),(2,'Adam','2020-01-01','TN',22,30000),(3,'Mary','2022-01-01','Karnataka',24,120000)")
data = Y.execute("SELECT* from Employee");
for k in data:
print (k)
print("..........................................................................")
Y.execute('''UPDATE Employee SET Salary = Salary + 50000.0 WHERE ID=3;''')
X.commit()
data = Y.execute("SELECT* from Employee");
for k in data:
print (k)
X.commit()
Y.close()
| 19.883721 | 167 | 0.495906 |
8596085ce781bc8005fe85b04050a2d44b3ea943 | 3,868 | py | Python | python/test/lib/main_test.py | cyrill-k/netsec-scion | 4698f6057d1f4851d6bd24d9c925f9e6201ce371 | [
"Apache-2.0"
] | null | null | null | python/test/lib/main_test.py | cyrill-k/netsec-scion | 4698f6057d1f4851d6bd24d9c925f9e6201ce371 | [
"Apache-2.0"
] | null | null | null | python/test/lib/main_test.py | cyrill-k/netsec-scion | 4698f6057d1f4851d6bd24d9c925f9e6201ce371 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`lib_main_test` --- lib.main unit tests
============================================
"""
# Stdlib
from unittest.mock import patch
# External packages
import nose
import nose.tools as ntools
# SCION
from lib.main import main_default, main_wrapper
from test.testcommon import create_mock
class TestMainWrapper(object):
"""
Unit tests for lib.main.main_wrapper
"""
def test_basic(self):
main = create_mock()
# Call
main_wrapper(main, "arg1", arg2="arg2")
# Tests
main.assert_called_once_with("arg1", arg2="arg2")
def test_sysexit(self):
main = create_mock()
main.side_effect = SystemExit
# Call
ntools.assert_raises(SystemExit, main_wrapper, main)
@patch("lib.main.sys.exit", autospec=True)
@patch("lib.main.log_exception", autospec=True)
def test_excp(self, log_excp, exit):
main = create_mock()
main.side_effect = KeyError
# Call
main_wrapper(main)
# Tests
ntools.ok_(log_excp.called)
ntools.ok_(exit.called)
class TestMainDefault(object):
"""
Unit tests for lib.main.main_default
"""
@patch("lib.main.trace", autospec=True)
@patch("lib.main.init_logging", autospec=True)
@patch("lib.main.argparse.ArgumentParser", autospec=True)
@patch("lib.main.handle_signals", autospec=True)
def test_trace(self, signals, argparse, init_log, trace):
type_ = create_mock()
inst = type_.return_value = create_mock(["id", "run"])
parser = argparse.return_value
args = parser.parse_args.return_value
args.log_dir = "logging"
args.server_id = "srvid"
args.conf_dir = "confdir"
args.prom = "prom"
args.spki_cache_dir = "gen-cache"
# Call
main_default(type_, trace_=True, kwarg1="kwarg1")
# Tests
signals.assert_called_once_with()
argparse.assert_called_once_with()
ntools.ok_(parser.add_argument.called)
parser.parse_args.assert_called_once_with()
init_log.assert_called_once_with("logging/srvid")
type_.assert_called_once_with("srvid", "confdir", spki_cache_dir="gen-cache",
prom_export="prom", kwarg1="kwarg1")
trace.assert_called_once_with(inst.id)
inst.run.assert_called_once_with()
@patch("lib.main.Topology.from_file", new_callable=create_mock)
@patch("lib.main.init_logging", autospec=True)
@patch("lib.main.argparse.ArgumentParser", autospec=True)
@patch("lib.main.handle_signals", autospec=True)
def _check_core_local(self, is_core, core_called, local_called, signals,
argparse, init_log, topo):
core_type = create_mock()
local_type = create_mock()
topo.return_value = create_mock(["is_core_as"])
topo.return_value.is_core_as = is_core
# Call
main_default(core_type, local_type)
# Tests
ntools.eq_(core_type.called, core_called)
ntools.eq_(local_type.called, local_called)
def test_core_local(self):
yield self._check_core_local, True, True, False
yield self._check_core_local, False, False, True
if __name__ == "__main__":
nose.run(defaultTest=__name__)
| 34.535714 | 85 | 0.661841 |
f87567114b0b1a04aec24bfe9b304276960c32ef | 5,198 | py | Python | feline/jobposts/migrations/0001_initial.py | Jeanluis019/feline | f802f7a57490b9425b32d88fa10d9cd7234e3a1f | [
"MIT"
] | 6 | 2021-10-13T01:05:27.000Z | 2021-11-10T03:15:15.000Z | feline/jobposts/migrations/0001_initial.py | Jeanluis019/feline | f802f7a57490b9425b32d88fa10d9cd7234e3a1f | [
"MIT"
] | 1 | 2022-03-30T21:26:16.000Z | 2022-03-30T21:26:16.000Z | feline/jobposts/migrations/0001_initial.py | Jeanluis019/feline | f802f7a57490b9425b32d88fa10d9cd7234e3a1f | [
"MIT"
] | 1 | 2021-11-10T13:32:21.000Z | 2021-11-10T13:32:21.000Z | # Generated by Django 3.1.13 on 2021-10-15 04:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
import django_extensions.db.fields
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taggit', '0003_taggeditem_add_unique_index'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('name', models.CharField(max_length=255, verbose_name='nombre')),
('description', models.TextField(blank=True, null=True, verbose_name='descripción')),
('slug', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='name', verbose_name='slug')),
],
options={
'verbose_name_plural': 'categories',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('logo', models.ImageField(blank=True, null=True, upload_to='')),
('name', models.CharField(max_length=255, verbose_name='nombre')),
('description', models.TextField(blank=True, null=True, verbose_name='descripción')),
('slug', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='name', verbose_name='slug')),
('email', models.EmailField(max_length=254)),
('verified', models.BooleanField()),
('company_url', models.URLField(blank=True)),
('twitter_url', models.URLField(blank=True)),
('lindkedin_url', models.URLField(blank=True)),
('country', django_countries.fields.CountryField(max_length=2)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'companies',
},
),
migrations.CreateModel(
name='JobPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('title', models.CharField(max_length=255, verbose_name='title')),
('description', models.TextField(blank=True, null=True, verbose_name='description')),
('slug', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='title', verbose_name='slug')),
('location', django_countries.fields.CountryField(max_length=2)),
('how_to_apply', models.TextField()),
('application_url', models.URLField(blank=True, null=True)),
('application_email', models.EmailField(blank=True, max_length=254, null=True)),
('status', models.CharField(choices=[('new', 'New'), ('approved', 'Approved'), ('deleted', 'Deleted'), ('expired', 'Expired')], default='new', max_length=20)),
('job_type', models.CharField(choices=[('Part Time', 'Part Time'), ('Full Time', 'Full Time'), ('Contract', 'Contract'), ('Internship', 'Internship')], max_length=20)),
('currency', models.CharField(choices=[('DOP', 'Pesos'), ('USD', 'Dollars'), ('EUR', 'Euros')], max_length=20)),
('salary_range_start_at', models.IntegerField(blank=True, null=True)),
('salary_range_end_at', models.IntegerField(blank=True, null=True)),
('sponsor_relocation', models.BooleanField(default=False)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jobposts.category')),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jobposts.company')),
('tags', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
],
options={
'abstract': False,
},
),
]
| 60.44186 | 184 | 0.620623 |
ef9beb0bf67db79893b7b966d7de5583cf6dc557 | 817 | py | Python | tests/test_api_v1_services_dpinger_start.py | jaredhendrickson13/pfsense-api | 72d85801673eaba66bdc4a698fbed561c61130d6 | [
"Apache-2.0"
] | 311 | 2020-04-13T16:38:56.000Z | 2022-03-28T12:56:12.000Z | tests/test_api_v1_services_dpinger_start.py | jaredhendrickson13/pfsense-api | 72d85801673eaba66bdc4a698fbed561c61130d6 | [
"Apache-2.0"
] | 171 | 2020-04-23T21:41:06.000Z | 2022-03-31T19:55:12.000Z | tests/test_api_v1_services_dpinger_start.py | jaredhendrickson13/pfsense-api | 72d85801673eaba66bdc4a698fbed561c61130d6 | [
"Apache-2.0"
] | 48 | 2020-07-19T22:43:42.000Z | 2022-03-25T16:20:17.000Z | # Copyright 2022 Jared Hendrickson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unit_test_framework
class APIUnitTestServicesDpingerStart(unit_test_framework.APIUnitTest):
uri = "/api/v1/services/dpinger/start"
post_tests = [{"name": "Start the dpinger service"}]
APIUnitTestServicesDpingerStart()
| 37.136364 | 74 | 0.772338 |
cdf96353664206ccfbeae1cad0e6470f8345de30 | 1,697 | py | Python | scripts/train_yaml.py | Ramsha04/kits19-2d-reproduce | 66678f1eda3688d6dc64389e9a80ae0b754a3052 | [
"Apache-2.0"
] | null | null | null | scripts/train_yaml.py | Ramsha04/kits19-2d-reproduce | 66678f1eda3688d6dc64389e9a80ae0b754a3052 | [
"Apache-2.0"
] | null | null | null | scripts/train_yaml.py | Ramsha04/kits19-2d-reproduce | 66678f1eda3688d6dc64389e9a80ae0b754a3052 | [
"Apache-2.0"
] | null | null | null | from catalyst.dl import SupervisedRunner
import sys
sys.path.append(".")
from kits19cnn.experiments import TrainSegExperiment2D, seed_everything
from kits19cnn.visualize import plot_metrics, save_figs
def main(config):
"""
Main code for training a classification/seg/classification+seg model.
Args:
config (dict): dictionary read from a yaml file
i.e. script/configs/train.yml
Returns:
None
"""
# setting up the train/val split with filenames
seed = config["io_params"]["split_seed"]
seed_everything(seed)
exp = TrainSegExperiment2D(config)
output_key = "logits"
print(f"Seed: {seed}")
runner = SupervisedRunner(output_key=output_key)
runner.train(model=exp.model, criterion=exp.criterion, optimizer=exp.opt,
scheduler=exp.lr_scheduler, loaders=exp.loaders,
callbacks=exp.cb_list, **config["runner_params"])
# Not saving plots if plot_params not specified in config
if config.get("plot_params"):
figs = plot_metrics(logdir=config["runner_params"]["logdir"],
metrics=config["plot_params"]["metrics"])
save_figs(figs, save_dir=config["plot_params"]["save_dir"])
if __name__ == "__main__":
import yaml
import argparse
parser = argparse.ArgumentParser(description="For training.")
parser.add_argument("--yml_path", type=str, required=True,
help="Path to the .yml config.")
args = parser.parse_args()
with open(args.yml_path, 'r') as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
main(config)
| 31.425926 | 77 | 0.657631 |
79dff137e5cbee9d0806dbae4dd69ac205eecb3e | 15,066 | py | Python | venv/Lib/site-packages/pip/_vendor/urllib3/connection.py | RiccardoCherchi/Barcode-Stock | 699b977fa70ea14a7ac4d33bb7bb2f107aa2ca20 | [
"MIT"
] | 1 | 2020-10-21T04:51:46.000Z | 2020-10-21T04:51:46.000Z | venv/Lib/site-packages/pip/_vendor/urllib3/connection.py | RiccardoCherchi/Barcode-Stock | 699b977fa70ea14a7ac4d33bb7bb2f107aa2ca20 | [
"MIT"
] | 2 | 2020-10-23T06:51:04.000Z | 2020-11-12T07:03:37.000Z | venv/Lib/site-packages/pip/_vendor/urllib3/connection.py | RiccardoCherchi/Barcode-Stock | 699b977fa70ea14a7ac4d33bb7bb2f107aa2ca20 | [
"MIT"
] | 1 | 2020-10-24T05:21:20.000Z | 2020-10-24T05:21:20.000Z | from __future__ import absolute_import
import datetime
import logging
import os
import socket
from socket import error as SocketError, timeout as SocketTimeout
import warnings
from .packages import six
from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
from .packages.six.moves.http_client import HTTPException # noqa: F401
try: # Compiled with SSL?
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try:
# Python 3: not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError:
# Python 2
class ConnectionError(Exception):
pass
from .exceptions import (
NewConnectionError,
ConnectTimeoutError,
SubjectAltNameWarning,
SystemTimeWarning,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
assert_fingerprint,
create_urllib3_context,
ssl_wrap_socket
)
from .util import connection
from ._collections import HTTPHeaderDict
log = logging.getLogger(__name__)
port_by_scheme = {
'http': 80,
'https': 443,
}
# When updating RECENT_DATE, move it to within two years of the current date,
# and not less than 6 months ago.
# Example: if Today is 2018-01-01, then RECENT_DATE should be any date on or
# after 2016-01-01 (today - 2 years) AND before 2017-07-01 (today - 6 months)
RECENT_DATE = datetime.date(2017, 6, 30)
class DummyConnection(object):
"""Used to detect a failed ConnectionCls import."""
pass
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http']
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3:
kw.pop('strict', None)
# Pre-set source_address.
self.source_address = kw.get('source_address')
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
_HTTPConnection.__init__(self, *args, **kw)
@property
def host(self):
"""
Getter method to remove any trailing dots that indicate the hostname is an FQDN.
In general, SSL certificates don't include the trailing dot indicating a
fully-qualified domain name, and thus, they don't validate properly when
checked against a domain name that includes the dot. In addition, some
servers may not expect to receive the trailing dot when provided.
However, the hostname with trailing dot is critical to DNS resolution; doing a
lookup with the trailing dot will properly only resolve the appropriate FQDN,
whereas a lookup without a trailing dot will search the system's search domain
list. Thus, it's important to keep the original host around for use only in
those cases where it's appropriate (i.e., when doing DNS lookup to establish the
actual TCP connection across which we're going to send HTTP requests).
"""
return self._dns_host.rstrip('.')
@host.setter
def host(self, value):
"""
Setter for the `host` property.
We assume that only urllib3 uses the _dns_host attribute; httplib itself
only uses `host`, and it seems reasonable that other libraries follow suit.
"""
self._dns_host = value
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self._dns_host, self.port), self.timeout, **extra_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn
def _prepare_conn(self, conn):
self.sock = conn
# Google App Engine's httplib does not define _tunnel_host
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
def request_chunked(self, method, url, body=None, headers=None):
"""
Alternative to the common request method, which sends the
body with chunked encoding and not as one block
"""
headers = HTTPHeaderDict(headers if headers is not None else {})
skip_accept_encoding = 'accept-encoding' in headers
skip_host = 'host' in headers
self.putrequest(
method,
url,
skip_accept_encoding=skip_accept_encoding,
skip_host=skip_host
)
for header, value in headers.items():
self.putheader(header, value)
if 'transfer-encoding' not in headers:
self.putheader('Transfer-Encoding', 'chunked')
self.endheaders()
if body is not None:
stringish_types = six.string_types + (bytes,)
if isinstance(body, stringish_types):
body = (body,)
for chunk in body:
if not chunk:
continue
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf8')
len_str = hex(len(chunk))[2:]
self.send(len_str.encode('utf-8'))
self.send(b'\r\n')
self.send(chunk)
self.send(b'\r\n')
# After the if clause, to always have a closed body
self.send(b'0\r\n\r\n')
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
ssl_version = None
def __init__(self, host, port=None, key_file=None, cert_file=None,
key_password=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
ssl_context=None, server_hostname=None, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
self.key_password = key_password
self.ssl_context = ssl_context
self.server_hostname = server_hostname
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
# Wrap socket using verification with the root certs in
# trusted_root_certs
default_ssl_context = False
if self.ssl_context is None:
default_ssl_context = True
self.ssl_context = create_urllib3_context(
ssl_version=resolve_ssl_version(self.ssl_version),
cert_reqs=resolve_cert_reqs(self.cert_reqs),
)
# Try to load OS default certs if none are given.
# Works well on Windows (requires Python3.4+)
context = self.ssl_context
if (not self.ca_certs and not self.ca_cert_dir and default_ssl_context
and hasattr(context, 'load_default_certs')):
context.load_default_certs()
self.sock = ssl_wrap_socket(
sock=conn,
keyfile=self.key_file,
certfile=self.cert_file,
key_password=self.key_password,
ssl_context=self.ssl_context,
server_hostname=self.server_hostname
)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ca_cert_dir = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, key_password=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None,
ca_cert_dir=None):
"""
This method should only be called once, before the connection is used.
"""
# If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also
# have an SSLContext object in which case we'll use its verify_mode.
if cert_reqs is None:
if self.ssl_context is not None:
cert_reqs = self.ssl_context.verify_mode
else:
cert_reqs = resolve_cert_reqs(None)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
def connect(self):
# Add certificate verification
conn = self._new_conn()
hostname = self.host
# Google App Engine's httplib does not define _tunnel_host
if getattr(self, '_tunnel_host', None):
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
server_hostname = hostname
if self.server_hostname is not None:
server_hostname = self.server_hostname
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
default_ssl_context = False
if self.ssl_context is None:
default_ssl_context = True
self.ssl_context = create_urllib3_context(
ssl_version=resolve_ssl_version(self.ssl_version),
cert_reqs=resolve_cert_reqs(self.cert_reqs),
)
context = self.ssl_context
context.verify_mode = resolve_cert_reqs(self.cert_reqs)
# Try to load OS default certs if none are given.
# Works well on Windows (requires Python3.4+)
if (not self.ca_certs and not self.ca_cert_dir and default_ssl_context
and hasattr(context, 'load_default_certs')):
context.load_default_certs()
self.sock = ssl_wrap_socket(
sock=conn,
keyfile=self.key_file,
certfile=self.cert_file,
key_password=self.key_password,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
server_hostname=server_hostname,
ssl_context=context)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif context.verify_mode != ssl.CERT_NONE \
and not getattr(context, 'check_hostname', False) \
and self.assert_hostname is not False:
# While urllib3 attempts to always turn off hostname matching from
# the TLS library, this cannot always be done. So we check whether
# the TLS Library still thinks it's matching hostnames.
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate for {0} has no `subjectAltName`, falling back to check for a '
'`commonName` for now. This feature is being removed by major browsers and '
'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
'for details.)'.format(hostname)),
SubjectAltNameWarning
)
_match_hostname(cert, self.assert_hostname or server_hostname)
self.is_verified = (
context.verify_mode == ssl.CERT_REQUIRED or
self.assert_fingerprint is not None
)
def _match_hostname(cert, asserted_hostname):
try:
match_hostname(cert, asserted_hostname)
except CertificateError as e:
log.error(
'Certificate did not match expected hostname: %s. '
'Certificate: %s', asserted_hostname, cert
)
# Add cert to exception and reraise so client code can inspect
# the cert when catching the exception, if they want to
e._peer_cert = cert
raise
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
else:
HTTPSConnection = DummyConnection
| 36.129496 | 99 | 0.634209 |
e8867c469b6f2bda131b5b68c40b498378ba6277 | 165 | py | Python | tutorial/snippets/urls.py | itaweb/DRF_Doc_Project | 3806b94123f36e36f84de9f73bbfd8500d0814e4 | [
"MIT"
] | null | null | null | tutorial/snippets/urls.py | itaweb/DRF_Doc_Project | 3806b94123f36e36f84de9f73bbfd8500d0814e4 | [
"MIT"
] | null | null | null | tutorial/snippets/urls.py | itaweb/DRF_Doc_Project | 3806b94123f36e36f84de9f73bbfd8500d0814e4 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('snippets/', views.snippet_list),
path('snippets/<int:pk>/', views.snippet_detail),
]
| 20.625 | 53 | 0.69697 |
f0c8b53e162ffc6b66282adb777dbfe91a203804 | 1,008 | py | Python | com/LimePencil/Q25097/Controlled_Inflation.py | LimePencil/baekjoonProblems | 61eeeeb875585d165d9e39ecdb3d905b4ba6aa87 | [
"MIT"
] | null | null | null | com/LimePencil/Q25097/Controlled_Inflation.py | LimePencil/baekjoonProblems | 61eeeeb875585d165d9e39ecdb3d905b4ba6aa87 | [
"MIT"
] | null | null | null | com/LimePencil/Q25097/Controlled_Inflation.py | LimePencil/baekjoonProblems | 61eeeeb875585d165d9e39ecdb3d905b4ba6aa87 | [
"MIT"
] | null | null | null | import sys
input = sys.stdin.readline
for t in range(int(input())):
n,m=list(map(int,input().split()))
costom=[]
for i in range(n):
a=sorted(list(map(int,input().split())))
costom.append((a[0],a[-1],a[-1]-a[0]))
m=float('inf')
amount_first=costom[0][1]
amount_second=costom[0][1]
for i in range(1,n):
if i==1:
a=abs(costom[i][1]-costom[i-1][1])+costom[i][2]+amount_second
b=float('inf')
c=abs(costom[i][0]-costom[i-1][1])+costom[i][2]+amount_second
d=float('inf')
else:
a=abs(costom[i][1]-costom[i-1][1])+costom[i][2]+amount_second
b=abs(costom[i][1]-costom[i-1][0])+costom[i][2]+amount_first
c=abs(costom[i][0]-costom[i-1][1])+costom[i][2]+amount_second
d=abs(costom[i][0]-costom[i-1][0])+costom[i][2]+amount_first
amount_first=min(a,b)
amount_second=min(c,d)
print("Case #{}: {}".format(t+1,min(amount_first,amount_second))) | 37.333333 | 73 | 0.550595 |
0dc66ab298917ff532da131b3936a41fa48ac3f3 | 1,619 | py | Python | modules/dbnd/src/targets/config.py | turbaszek/dbnd | 6efbf3e7ecd175645e8e58d0d015d32fe9e95ea0 | [
"Apache-2.0"
] | null | null | null | modules/dbnd/src/targets/config.py | turbaszek/dbnd | 6efbf3e7ecd175645e8e58d0d015d32fe9e95ea0 | [
"Apache-2.0"
] | null | null | null | modules/dbnd/src/targets/config.py | turbaszek/dbnd | 6efbf3e7ecd175645e8e58d0d015d32fe9e95ea0 | [
"Apache-2.0"
] | null | null | null | import abc
import logging
import os.path
import random
import tempfile
from dbnd._core.current import try_get_databand_context, try_get_databand_run
logger = logging.getLogger(__name__)
_CONFIG_PARSER = None
_DEFAULT_VALUE_PREVIEW_MAX_LEN = 10000
class TargetConfigProvider(object):
@abc.abstractmethod
def get_config_section_values(self):
pass
@abc.abstractmethod
def get_config_value(self, section, value):
pass
def set_config_provider(config_parser):
global _CONFIG_PARSER
_CONFIG_PARSER = config_parser
def get_config_section_values(section):
config_provider = _CONFIG_PARSER
if not config_provider:
return {}
return config_provider.get_config_section_values(section)
def get_local_tempfile(*path):
run = try_get_databand_run()
if run:
tempdir = run.get_current_dbnd_local_root().partition("tmp").path
else:
tempdir = tempfile.gettempdir()
path = os.path.join(tempdir, "databand-tmp-%09d" % random.randrange(0, 1e10), *path)
base_dir = os.path.dirname(path)
try:
if not os.path.exists(base_dir):
os.makedirs(base_dir)
except Exception as ex:
logger.info("Failed to create temp dir %s: %s", base_dir, ex)
return path
def is_in_memory_cache_target_value():
dc = try_get_databand_context()
if dc:
return dc.settings.features.in_memory_cache_target_value
return False
def get_value_preview_max_len():
dc = try_get_databand_context()
if dc:
return dc.settings.core.value_preview_max_len
return _DEFAULT_VALUE_PREVIEW_MAX_LEN
| 23.808824 | 88 | 0.726992 |
0b387469ffde1d2bbe5af1877d830ec430b51694 | 614 | py | Python | tests/chart_tests/test_nginx_networkpolicy.py | astronomerio/astronomer | 939a71f097f3ca1491273a3dce40bdbe04a09a4a | [
"Apache-2.0"
] | 81 | 2018-01-15T21:48:39.000Z | 2018-11-15T07:35:11.000Z | tests/chart_tests/test_nginx_networkpolicy.py | astronomerio/astronomer | 939a71f097f3ca1491273a3dce40bdbe04a09a4a | [
"Apache-2.0"
] | 127 | 2018-01-15T21:13:34.000Z | 2018-11-13T17:19:58.000Z | tests/chart_tests/test_nginx_networkpolicy.py | astronomerio/astronomer | 939a71f097f3ca1491273a3dce40bdbe04a09a4a | [
"Apache-2.0"
] | 15 | 2018-01-19T18:39:57.000Z | 2018-10-26T06:00:12.000Z | from tests.chart_tests.helm_template_generator import render_chart
class TestNginxNetworkPolicy:
def test_nginx_networkpolicy_basics(self):
docs = render_chart(
show_only=[
"charts/nginx/templates/nginx-metrics-networkpolicy.yaml",
"charts/nginx/templates/nginx-networkpolicy.yaml",
],
)
assert len(docs) == 2
for doc in docs:
assert doc["kind"] == "NetworkPolicy"
assert doc["apiVersion"] == "networking.k8s.io/v1"
assert doc["spec"]["podSelector"]["matchLabels"]["tier"] == "nginx"
| 36.117647 | 79 | 0.609121 |
1bec40ac29c29bb51765a501e8b3f4f784a17add | 733 | py | Python | metadata_service/api/utils.py | ferras/metaflow-service | c5a45e915aca58f346e91de576240854c1742af6 | [
"Apache-2.0"
] | null | null | null | metadata_service/api/utils.py | ferras/metaflow-service | c5a45e915aca58f346e91de576240854c1742af6 | [
"Apache-2.0"
] | null | null | null | metadata_service/api/utils.py | ferras/metaflow-service | c5a45e915aca58f346e91de576240854c1742af6 | [
"Apache-2.0"
] | null | null | null | import json
import sys
import traceback
async def read_body(request_content):
byte_array = bytearray()
while not request_content.at_eof():
data = await request_content.read(4)
byte_array.extend(data)
return json.loads(byte_array.decode("utf-8"))
def get_traceback_str():
"""Get the traceback as a string."""
exc_info = sys.exc_info()
stack = traceback.extract_stack()
_tb = traceback.extract_tb(exc_info[2])
full_tb = stack[:-1] + _tb
exc_line = traceback.format_exception_only(*exc_info[:2])
return "\n".join(
[
"Traceback (most recent call last):",
"".join(traceback.format_list(full_tb)),
"".join(exc_line),
]
)
| 24.433333 | 61 | 0.631651 |
e147819a12137b1a0ec6ff733e57609c4c02619f | 560 | py | Python | gui/compile_gui.py | marty0678/BT-Auto-Patcher | 4930407d0fefa35d78e357234f70e93bd2ed507d | [
"MIT"
] | 1 | 2020-09-10T05:09:49.000Z | 2020-09-10T05:09:49.000Z | gui/compile_gui.py | marty0678/BT-Auto-Patcher | 4930407d0fefa35d78e357234f70e93bd2ed507d | [
"MIT"
] | null | null | null | gui/compile_gui.py | marty0678/BT-Auto-Patcher | 4930407d0fefa35d78e357234f70e93bd2ed507d | [
"MIT"
] | null | null | null | import subprocess
ui_file = "gui\\mainwindow.ui"
ui_python = "gui\\mainwindow.py"
subprocess.call(['pyside2-uic', ui_file, ">", ui_python], shell=True)
resource_path = "gui\\resources.qrc"
resource_python = "gui\\resources_rc.py"
subprocess.call(["pyside2-rcc", "-o", resource_python, resource_path], shell=True)
# Update import for repo directory
with open(ui_python, 'rt') as read_file:
read = read_file.read()
new = read.replace("resources_rc", "gui.resources_rc")
with open(ui_python, 'wt') as write_file:
write_file.writelines(new)
| 32.941176 | 82 | 0.719643 |
aaed479bddab5a079447c5939f55c6ad3e8d1a85 | 2,784 | py | Python | lib/python2.7/site-packages/pyami/fileutil.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | lib/python2.7/site-packages/pyami/fileutil.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | lib/python2.7/site-packages/pyami/fileutil.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | 1 | 2019-09-05T20:58:37.000Z | 2019-09-05T20:58:37.000Z | #!/usr/bin/env python
import inspect
import os
import sys
import errno
def getMyFilename(up=1):
'''
return the filename of the caller or the caller's caller, etc depending
on the "up" argument. up=1 (default) means the caller. up=2 means the
caller's caller, etc.
'''
frame_record = inspect.stack()[up]
calling_filename = frame_record[1] # second item of tuple is filename
fullname = os.path.abspath(calling_filename)
return fullname
def getMyDir(up=1):
'''
similar to getMyfilename, but get the directory containing the calling file
'''
myfile = getMyFilename(up=up+1)
dirname = os.path.dirname(myfile)
return dirname
def getMyLineno(up=1):
'''
similar to getMyfilename, but get the line number the calling file
'''
frame_record = inspect.stack()[up]
calling_lineno = frame_record[2] # third item of tuple is lineno
return calling_lineno
# Here is a replacement for os.mkdirs that won't complain if dir
# already exists (from Python Cookbook, Recipe 4.17)
def mkdirs(newdir):
originalumask = os.umask(02)
try:
os.makedirs(newdir)
except OSError, err:
os.umask(originalumask)
if err.errno != errno.EEXIST or not os.path.isdir(newdir) and os.path.splitdrive(newdir)[1]:
raise
os.umask(originalumask)
def get_config_dirs(module=None):
'''
Determine a list of directories where config files may be located.
One of the directories will be the installed module directory, but
this only works automatically if this function is called from that
module. If you want to force a certain module, pass it to this
function in the optional argument.
'''
# system config location is /etc/myami on unix like systems or
# under PROGRAMFILES on windows
if sys.platform == 'win32':
system_dir = os.path.join(os.environ['PROGRAMFILES'], 'myami')
else:
system_dir = '/etc/myami'
# installed module directory, specified by argument, or auto detected
if module is None:
# not this function, but the caller of this function, so up=2
installed_dir = getMyDir(up=2)
else:
installed_dir = os.path.dirname(os.path.abspath(module.__file__))
# user home dir
user_dir = os.path.expanduser('~')
confdirs = [system_dir, installed_dir, user_dir]
# module config environment variable
installed_dir_basename = os.path.basename(installed_dir)
config_environ_name = '%s_CFG_PATH' % (installed_dir_basename.upper())
if os.environ.has_key(config_environ_name):
confdirs.append(os.environ[config_environ_name])#added to have an option to have mutiple sinedon.cfg files
return confdirs
def open_if_not_exists(filename):
'''Creates a new file for read/write access. Raises exception if file exists'''
fd = os.open(filename, os.O_CREAT|os.O_EXCL|os.O_RDWR)
f = os.fdopen(fd, 'r+')
return f
if __name__ == '__main__':
print getMyFilename()
| 32 | 108 | 0.748204 |
3b192a4d5631125e99d6c59b57820e5e5364793e | 14,105 | py | Python | python/tests/test_client.py | 18730298725/darabonba-openapi-util | d3f91d5a43bc6de732f4ea0a69d253460962c3eb | [
"Apache-2.0"
] | 1 | 2021-07-13T08:18:16.000Z | 2021-07-13T08:18:16.000Z | python/tests/test_client.py | 18730298725/darabonba-openapi-util | d3f91d5a43bc6de732f4ea0a69d253460962c3eb | [
"Apache-2.0"
] | null | null | null | python/tests/test_client.py | 18730298725/darabonba-openapi-util | d3f91d5a43bc6de732f4ea0a69d253460962c3eb | [
"Apache-2.0"
] | null | null | null | import unittest
import os
import binascii
from alibabacloud_openapi_util.client import Client, signature_method, get_canonical_query_string
from Tea.request import TeaRequest
from Tea.model import TeaModel
module_path = os.path.dirname(__file__)
class TestClient(unittest.TestCase):
class TestConvertModel(TeaModel):
def __init__(self):
self.requestId = "test"
self.dic = {}
self.no_map = 1
self.sub_model = None
self.file = None
def to_map(self):
dic = {
'requestId': self.requestId,
'dic': self.dic,
'no_map': self.no_map,
'sub_model': self.sub_model,
'file': self.file
}
return dic
class TestConvertSubModel(TeaModel):
def __init__(self):
self.requestId = "subTest"
self.id = 2
def to_map(self):
dic = {
'requestId': self.requestId,
'id': self.id
}
return dic
class TestConvertMapModel(TeaModel):
def __init__(self):
self.requestId = ""
self.extendId = 0
self.dic = {}
self.sub_model = None
def to_map(self):
dic = {
'requestId': self.requestId,
'dic': self.dic,
'extendId': self.extendId,
'sub_model': self.sub_model,
}
return dic
def from_map(self, dic):
self.requestId = dic.get("requestId") or ""
self.extendId = dic.get("extendId") or 0
self.dic = dic.get("dic")
self.sub_model = dic.get("sub_model")
def test_get_rpc_signature(self):
query = {
'query': 'test',
'body': 'test'
}
result = Client.get_rpcsignature(query, 'GET', 'secret')
self.assertEqual("XlUyV4sXjOuX5FnjUz9IF9tm5rU=", result)
def test_get_timestamp(self):
self.assertIsNotNone(Client.get_timestamp())
self.assertIn("T", Client.get_timestamp())
self.assertIn("Z", Client.get_timestamp())
def test_query(self):
result = Client.query(None)
self.assertEqual(0, len(result))
dic = {
'str_test': 'test',
'none_test': None,
'int_test': 1
}
result = Client.query(dic)
self.assertEqual('test', result.get('str_test'))
self.assertIsNone(result.get("none_test"))
self.assertEqual("1", result.get("int_test"))
with open(os.path.join(module_path, "test.txt")) as f:
fl = [1, None]
sub_dict_fl = {
'none_test': None,
'int_test': 2,
'str_test': 'test',
'file_test': f
}
fl.append(sub_dict_fl)
sl = [1, None]
fl.append(sl)
dic['list'] = fl
result = Client.query(dic)
self.assertEqual("1", result.get("list.1"))
self.assertIsNone(result.get("list.2"))
self.assertEqual("1", result.get("int_test"))
self.assertEqual("2", result.get("list.3.int_test"))
self.assertEqual(None, result.get("list.3.file_test"))
self.assertIsNone(result.get("list.3.none_test"))
self.assertEqual("test", result.get("list.3.str_test"))
self.assertEqual("1", result.get("list.4.1"))
sub_map_fd = {
'none_test': None,
'int_test': 2,
'str_test': 'test'
}
fd = {
'first_map_map': sub_map_fd,
'first_map_list': sl,
'none_test': None,
'int_test': 2,
'str_test': 'test',
'model_test': self.TestConvertModel()
}
dic['map'] = fd
result = Client.query(dic)
self.assertEqual("1", result.get("map.first_map_list.1"))
self.assertIsNone(result.get("map.none_test"))
self.assertEqual("2", result.get("map.int_test"))
self.assertEqual("test", result.get("map.str_test"))
self.assertEqual('1', result.get("map.model_test.no_map"))
self.assertIsNone(result.get("map.first_map_map.none_test"))
self.assertEqual("2", result.get("map.first_map_map.int_test"))
self.assertEqual("test", result.get("map.first_map_map.str_test"))
def test_get_string_to_sign(self):
request = TeaRequest()
str_to_sign = Client.get_string_to_sign(request)
self.assertEqual('GET\n\n\n\n\n', str_to_sign)
request = TeaRequest()
request.method = "POST"
request.query = {
'test': 'tests'
}
str_to_sign = Client.get_string_to_sign(request)
self.assertEqual('POST\n\n\n\n\n?test=tests', str_to_sign)
request = TeaRequest()
request.headers = {
'content-md5': 'md5',
}
str_to_sign = Client.get_string_to_sign(request)
self.assertEqual('GET\n\nmd5\n\n\n', str_to_sign)
request = TeaRequest()
request.pathname = "Pathname"
request.query = {
'ccp': 'ok',
'test': 'tests',
'test1': ''
}
request.headers = {
'x-acs-meta': 'user',
"accept": "application/json",
'content-md5': 'md5',
'content-type': 'application/json',
'date': 'date'
}
str_to_sign = Client.get_string_to_sign(request)
s = 'GET\napplication/json\nmd5\napplication/json\ndate\nx-acs-meta:user\nPathname?ccp=ok&test=tests&test1'
self.assertEqual(s, str_to_sign)
def test_get_roa_signature(self):
request = TeaRequest()
str_to_sign = Client.get_string_to_sign(request)
signature = Client.get_roasignature(str_to_sign, 'secret')
self.assertEqual('GET\n\n\n\n\n', str_to_sign)
self.assertEqual('XGXDWA78AEvx/wmfxKoVCq/afWw=', signature)
def test_to_form(self):
filter = {
'client': 'test',
'client1': None,
'strs': ['str1', 'str2'],
'tag': {
'key': 'value'
}
}
result = Client.to_form(filter)
self.assertEqual('client=test&strs.1=str1&strs.2=str2&tag.key=value', result)
def test_convert(self):
filename = module_path + "/test.txt"
with open(filename) as f:
model = TestClient.TestConvertModel()
model.dic["key"] = "value"
model.dic["testKey"] = "testValue"
sub_model = TestClient.TestConvertSubModel()
model.sub_model = sub_model
model.file = f
map_model = TestClient.TestConvertMapModel()
Client.convert(model, map_model)
self.assertIsNotNone(map_model)
self.assertEqual("test", map_model.requestId)
self.assertEqual(0, map_model.extendId)
def test_array_to_string_with_specified_style(self):
array = ['ok', 'test', 2, 3]
prefix = 'instance'
t1 = Client.array_to_string_with_specified_style(array, prefix, 'repeatList')
t2 = Client.array_to_string_with_specified_style(array, prefix, 'json')
t3 = Client.array_to_string_with_specified_style(array, prefix, 'simple')
t4 = Client.array_to_string_with_specified_style(array, prefix, 'spaceDelimited')
t5 = Client.array_to_string_with_specified_style(array, prefix, 'pipeDelimited')
t6 = Client.array_to_string_with_specified_style(array, prefix, 'piDelimited')
t7 = Client.array_to_string_with_specified_style(None, prefix, 'pipeDelimited')
self.assertEqual('instance.1=ok&&instance.2=test&&instance.3=2&&instance.4=3', t1)
self.assertEqual('["ok", "test", 2, 3]', t2)
self.assertEqual('ok,test,2,3', t3)
self.assertEqual('ok test 2 3', t4)
self.assertEqual('ok|test|2|3', t5)
self.assertEqual('', t6)
self.assertEqual('', t7)
def test_parse_to_map(self):
self.assertIsNone(Client.parse_to_map(None))
filename = module_path + "/test.txt"
with open(filename) as f:
res = Client.parse_to_map({'file': f})
self.assertIsNone(res)
res = Client.parse_to_map({"key": "value"})
self.assertEqual('value', res['key'])
model = self.TestConvertSubModel()
res = Client.parse_to_map(model)
self.assertEqual('subTest', res['requestId'])
self.assertEqual(2, res['id'])
res = Client.parse_to_map({
"key": "value",
'model': model
})
self.assertEqual('value', res['key'])
self.assertEqual('subTest', res['model']['requestId'])
self.assertEqual(2, res['model']['id'])
res = Client.parse_to_map({
'model_list': [model, model, 'model'],
'model_dict': {"model1": model, "model2": model}
})
self.assertEqual('subTest', res['model_list'][0]['requestId'])
self.assertEqual(2, res['model_list'][1]['id'])
self.assertEqual('model', res['model_list'][2])
self.assertEqual('subTest', res['model_dict']['model1']['requestId'])
self.assertEqual(2, res['model_dict']['model2']['id'])
def test_get_endpoint(self):
self.assertEqual("test", Client.get_endpoint("test", False, ""))
self.assertEqual("test-internal.endpoint", Client.get_endpoint("test.endpoint", False, "internal"))
self.assertEqual("oss-accelerate.aliyuncs.com", Client.get_endpoint("test", True, "accelerate"))
def test_hex_encode(self):
# ACS3 - HMAC - SHA256
res = Client.hex_encode(
Client.hash(b'test', 'ACS3-HMAC-SHA256')
)
self.assertEqual(
'9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08',
res
)
# ACS3 - RSA - SHA256
res = Client.hex_encode(
Client.hash(b'test', 'ACS3-RSA-SHA256')
)
self.assertEqual(
'9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08',
res
)
# ACS3 - HMAC - SM3
res = Client.hex_encode(
Client.hash(b'test', 'ACS3-HMAC-SM3')
)
self.assertEqual(
'55e12e91650d2fec56ec74e1d3e4ddbfce2ef3a65890c2a19ecf88a307e76a23',
res
)
res = Client.hex_encode(
Client.hash(b'test', 'ACS3-SHA256')
)
self.assertEqual(
None,
res
)
def test_get_authorization(self):
# request method is 'GET'
request = TeaRequest()
request.query = {
'test': 'ok',
'empty': ''
}
request.headers = {
'x-acs-test': 'http',
'x-acs-TEST': 'https'
}
res = Client.get_authorization(
request,
'ACS3-HMAC-SHA256',
'55e12e91650d2fec56ec74e1d3e4ddbfce2ef3a65890c2a19ecf88a307e76a23',
'acesskey',
'secret'
)
self.assertEqual(
'ACS3-HMAC-SHA256 Credential=acesskey,SignedHea'
'ders=x-acs-test,Signature=d16b30a7699ae9e43875b13195b2f81bcc3ed10c14a9b5eb780e51619aa50be1',
res
)
def test_get_encode_path(self):
res = Client.get_encode_path('/path/ test')
self.assertEqual('/path/%20test', res)
def test_signature_method(self):
pri_key = '-----BEGIN RSA PRIVATE KEY-----\nMIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAo' \
'GBAKzSQmrnH0YnezZ98NK50WjMuci0hgGVcSthIZOTWMIy' \
'SznY9Jj1hlvek7W0uYagtFHz03BHQnHAb5Xs0DZm0Sj9+5' \
'r79GggwEzTJDYEsLyFwXM3ZOIxqxL4sRg94MHsa81M9NXG' \
'HMyMvvffQTn1OBVLTVz5jgJ48foMn7j7r9kRAgMBAAECgY' \
'EAnZppw3/ef2XF8Z3Mnv+iP0ZkLuqiQpN8TykXK7P1/7NJ' \
'8wktlshhrSo/3jdf8axghVQsgHob2Ay8Nidugg4lsxILAU' \
'BHvfQsQp1MAWvxslsVj+ddw01MQnt8kHmC/qhok+YuNqqA' \
'GBcoD6cthRUjEri6hfs599EfPs2DcWW06qECQQDfNqUUhc' \
'DQ/SQHRhfY9UIlaSEs2CVagDrSYFG1wyG+PXDSMes9ZRHs' \
'vVVBmNGmtUTg/jioTU3yuPsis5s9ppbVAkEAxjTAQxv5lBB' \
'm/ikMTzPShljxDZnXh6lKWG9gR1p5fKoQTzLyyhHzkBSFe' \
'848sMm68HWCX2wgIpQLHj0GccYPTQJAduMKBeY/jpBlkiI' \
'5LWtj8b0O2G2/Z3aI3ehDXQYzgLoEz0+bNbYRWAB32lpkv' \
'+AocZW1455Y+ACichcrhiimiQJAW/6L5hoL4u8h/oFq1zAE' \
'XJrXdyqaYLrwaM947mVN0dDVNQ0+pw9h7tO3iNkWTi+zdnv' \
'0APociDASYPyOCyyUWQJACMNRM1/rboXuKfMmVjmmz0XhaD' \
'UC/JkqSwIiaZi+47M21e9BTp1218NA6VaPgJJHeJr4sNOnY' \
'sx+1cwXO5cuZg==\n-----END RSA PRIVATE KEY-----'
res = signature_method("secret", "source", "ACS3-HMAC-SM3")
self.assertEqual(b'b9ff646822f41ef647c1416fa2b8408923828abc0464af6706e18db3e8553da8', binascii.b2a_hex(res))
res = signature_method(pri_key, "source", "ACS3-RSA-SHA256")
self.assertEqual(b'a00b88ae04f651a8ab645e724949ff435bbb2cf9a'
b'37aa54323024477f8031f4e13dc948484c5c5a81ba'
b'53a55eb0571dffccc1e953c93269d6da23ed319e0f'
b'1ef699bcc9823a646574628ae1b70ed569b5a07d13'
b'9dda28996b5b9231f5ba96141f0893deec2fbf54a0'
b'fa2c203b8ae74dd26f457ac29c873745a5b88273d2b3d12', binascii.b2a_hex(res))
def test_get_canonical_query_string(self):
self.assertEqual('test=%20~%2F%2A-%2B', get_canonical_query_string({'test': ' ~/*-+'}))
| 38.538251 | 117 | 0.563843 |
ea03390b18a8a7743bdf7fb4bc38fd715c5b2ac1 | 7,283 | py | Python | trankit/adapter_transformers/hf_argparser.py | jsteggink/trankit | 61ef593999bfa29751990d0d4bcf259daed05db4 | [
"Apache-2.0"
] | 613 | 2021-01-12T14:21:13.000Z | 2022-03-29T19:51:47.000Z | trankit/adapter_transformers/hf_argparser.py | jsteggink/trankit | 61ef593999bfa29751990d0d4bcf259daed05db4 | [
"Apache-2.0"
] | 38 | 2021-01-13T12:01:15.000Z | 2022-03-31T14:13:44.000Z | trankit/adapter_transformers/hf_argparser.py | jsteggink/trankit | 61ef593999bfa29751990d0d4bcf259daed05db4 | [
"Apache-2.0"
] | 77 | 2021-01-13T07:33:26.000Z | 2022-03-29T19:51:50.000Z | import dataclasses
import json
import sys
from argparse import ArgumentParser
from enum import Enum
from pathlib import Path
from typing import Any, Iterable, List, NewType, Tuple, Union
DataClass = NewType("DataClass", Any)
DataClassType = NewType("DataClassType", Any)
class HfArgumentParser(ArgumentParser):
"""
This subclass of `argparse.ArgumentParser` uses type hints on dataclasses
to generate arguments.
The class is designed to play well with the native argparse. In particular,
you can add more (non-dataclass backed) arguments to the parser after initialization
and you'll get the output back after parsing as an additional namespace.
"""
dataclass_types: Iterable[DataClassType]
def __init__(self, dataclass_types: Union[DataClassType, Iterable[DataClassType]], **kwargs):
"""
Args:
dataclass_types:
Dataclass type, or list of dataclass types for which we will "fill" instances
with the parsed args.
kwargs:
(Optional) Passed to `argparse.ArgumentParser()` in the regular way.
"""
super().__init__(**kwargs)
if dataclasses.is_dataclass(dataclass_types):
dataclass_types = [dataclass_types]
self.dataclass_types = dataclass_types
for dtype in self.dataclass_types:
self._add_dataclass_arguments(dtype)
def _add_dataclass_arguments(self, dtype: DataClassType):
for field in dataclasses.fields(dtype):
field_name = f"--{field.name}"
kwargs = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type, str):
raise ImportError(
"This implementation is not compatible with Postponed Evaluation of Annotations (PEP 563),"
"which can be opted in from Python 3.7 with `from __future__ import annotations`."
"We will add compatibility when Python 3.9 is released."
)
typestring = str(field.type)
for prim_type in (int, float, str):
for collection in (List,):
if typestring == f"typing.Union[{collection[prim_type]}, NoneType]":
field.type = collection[prim_type]
if typestring == f"typing.Union[{prim_type.__name__}, NoneType]":
field.type = prim_type
if isinstance(field.type, type) and issubclass(field.type, Enum):
kwargs["choices"] = list(field.type)
kwargs["type"] = field.type
if field.default is not dataclasses.MISSING:
kwargs["default"] = field.default
elif field.type is bool:
kwargs["action"] = "store_false" if field.default is True else "store_true"
if field.default is True:
field_name = f"--no-{field.name}"
kwargs["dest"] = field.name
elif hasattr(field.type, "__origin__") and issubclass(field.type.__origin__, List):
kwargs["nargs"] = "+"
kwargs["type"] = field.type.__args__[0]
assert all(
x == kwargs["type"] for x in field.type.__args__
), "{} cannot be a List of mixed types".format(field.name)
if field.default_factory is not dataclasses.MISSING:
kwargs["default"] = field.default_factory()
else:
kwargs["type"] = field.type
if field.default is not dataclasses.MISSING:
kwargs["default"] = field.default
else:
kwargs["required"] = True
self.add_argument(field_name, **kwargs)
def parse_args_into_dataclasses(
self, args=None, return_remaining_strings=False, look_for_args_file=True
) -> Tuple[DataClass, ...]:
"""
Parse command-line args into instances of the specified dataclass types.
This relies on argparse's `ArgumentParser.parse_known_args`.
See the doc at:
docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args
Args:
args:
List of strings to parse. The default is taken from sys.argv.
(same as argparse.ArgumentParser)
return_remaining_strings:
If true, also return a list of remaining argument strings.
look_for_args_file:
If true, will look for a ".args" file with the same base name
as the entry point script for this process, and will append its
potential content to the command line args.
Returns:
Tuple consisting of:
- the dataclass instances in the same order as they
were passed to the initializer.abspath
- if applicable, an additional namespace for more
(non-dataclass backed) arguments added to the parser
after initialization.
- The potential list of remaining argument strings.
(same as argparse.ArgumentParser.parse_known_args)
"""
if look_for_args_file and len(sys.argv):
args_file = Path(sys.argv[0]).with_suffix(".args")
if args_file.exists():
fargs = args_file.read_text().split()
args = fargs + args if args is not None else fargs + sys.argv[1:]
# in case of duplicate arguments the first one has precedence
# so we append rather than prepend.
namespace, remaining_args = self.parse_known_args(args=args)
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype)}
inputs = {k: v for k, v in vars(namespace).items() if k in keys}
for k in keys:
delattr(namespace, k)
obj = dtype(**inputs)
outputs.append(obj)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(namespace)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}")
return (*outputs,)
def parse_json_file(self, json_file: str) -> Tuple[DataClass, ...]:
"""
Alternative helper method that does not use `argparse` at all,
instead loading a json file and populating the dataclass types.
"""
data = json.loads(Path(json_file).read_text())
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype)}
inputs = {k: v for k, v in data.items() if k in keys}
obj = dtype(**inputs)
outputs.append(obj)
return (*outputs,)
| 45.805031 | 117 | 0.581354 |
5c226ce9cbc5503ecb03ba0ec1b45ece3162760e | 916 | py | Python | helper/misc.py | pasan1992/Human-Pose-Transfer | a7febc632d4fbf627ba05740d2048accb25575f2 | [
"MIT"
] | 64 | 2019-06-13T01:01:44.000Z | 2022-03-20T08:09:18.000Z | helper/misc.py | pasan1992/Human-Pose-Transfer | a7febc632d4fbf627ba05740d2048accb25575f2 | [
"MIT"
] | 10 | 2019-06-20T15:07:42.000Z | 2021-11-13T11:47:31.000Z | helper/misc.py | pasan1992/Human-Pose-Transfer | a7febc632d4fbf627ba05740d2048accb25575f2 | [
"MIT"
] | 17 | 2019-08-01T02:28:30.000Z | 2022-02-03T10:27:33.000Z | from torchvision.utils import make_grid
def custom_global_step_transform(custom_period):
"""
customize a global_step_transform for `ignite.contrib.handlers.BaseOutputHandler`,
used to restore correct iteration or epoch when using CustomPeriodicEvent.
:return: func:global_step_transform
"""
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name) * custom_period
return global_step_transform
def make_2d_grid(tensors, padding=0, normalize=True, range=None, scale_each=False, pad_value=0):
# merge image in a batch in `y` direction first.
grids = [make_grid(
img_batch, padding=padding, nrow=1, normalize=normalize, range=range, scale_each=scale_each,
pad_value=pad_value)
for img_batch in tensors
]
# merge images in `x` direction.
return make_grid(grids, padding=0, nrow=len(grids))
| 35.230769 | 100 | 0.741266 |
7e0612095a7f0875721c79d9bf3027829a4ad17a | 911 | py | Python | example_project/app/migrations/0001_initial.py | EugeneFadeev/django3-robokassa | caeb61c3a55d9e73529869ae39facba43b624241 | [
"MIT"
] | 10 | 2018-05-04T07:28:47.000Z | 2021-07-19T10:55:08.000Z | example_project/app/migrations/0001_initial.py | EugeneFadeev/django3-robokassa | caeb61c3a55d9e73529869ae39facba43b624241 | [
"MIT"
] | null | null | null | example_project/app/migrations/0001_initial.py | EugeneFadeev/django3-robokassa | caeb61c3a55d9e73529869ae39facba43b624241 | [
"MIT"
] | 6 | 2018-10-24T08:37:16.000Z | 2021-06-22T21:16:59.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-04-26 13:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('total', models.DecimalField(decimal_places=2, max_digits=15)),
('status', models.CharField(blank=True, max_length=255, null=True)),
('paid_sum', models.DecimalField(blank=True, decimal_places=2, max_digits=15, null=True)),
('extra_param', models.CharField(blank=True, max_length=255, null=True)),
],
),
]
| 32.535714 | 114 | 0.60483 |
ba1d5e50cc5c60d0006fba99661eb817d06ca1fe | 8,358 | py | Python | scripts/voice_change_with_second_stage.py | m95music/yukarin | 87e4e813e1b846720ef7a89162edf1c379700619 | [
"MIT"
] | 139 | 2018-02-24T21:33:47.000Z | 2022-03-19T03:59:05.000Z | scripts/voice_change_with_second_stage.py | m95music/yukarin | 87e4e813e1b846720ef7a89162edf1c379700619 | [
"MIT"
] | 73 | 2018-02-17T14:27:11.000Z | 2021-06-05T18:11:09.000Z | scripts/voice_change_with_second_stage.py | m95music/yukarin | 87e4e813e1b846720ef7a89162edf1c379700619 | [
"MIT"
] | 31 | 2018-03-05T18:08:18.000Z | 2022-03-28T05:23:16.000Z | import argparse
import glob
import multiprocessing
import re
from functools import partial
from pathlib import Path
import librosa
import matplotlib.pyplot as plt
import numpy
from become_yukarin import SuperResolution
from become_yukarin.config.sr_config import create_from_json as create_sr_config
from become_yukarin.data_struct import AcousticFeature as BYAcousticFeature
from yukarin import AcousticConverter
from yukarin.config import create_from_json as create_config
from yukarin.f0_converter import F0Converter
from yukarin.utility.json_utility import save_arguments
parser = argparse.ArgumentParser()
parser.add_argument('--voice_changer_model_dir', '-vcmd', type=Path)
parser.add_argument('--voice_changer_model_iteration', '-vcmi', type=int)
parser.add_argument('--voice_changer_config', '-vcc', type=Path)
parser.add_argument('--input_wave_scale', '-iws', type=float, default=1.0)
parser.add_argument('--out_sampling_rate', '-osr', type=int)
parser.add_argument('--filter_size', '-fs', type=int)
parser.add_argument('--threshold', '-t', type=float)
parser.add_argument('--f0_trans_model_dir', '-ftmd', type=Path)
parser.add_argument('--f0_trans_model_iteration', '-ftmi', type=int)
parser.add_argument('--f0_trans_config', '-ftc', type=Path)
parser.add_argument('--super_resolution_model', '-srm', type=Path)
parser.add_argument('--super_resolution_config', '-src', type=Path)
parser.add_argument('--input_statistics', '-is', type=Path)
parser.add_argument('--target_statistics', '-ts', type=Path)
parser.add_argument('--output_dir', '-o', type=Path, default='./output/')
parser.add_argument('--disable_dataset_test', '-ddt', action='store_false')
parser.add_argument('--dataset_input_wave_dir', '-diwd', type=Path)
parser.add_argument('--dataset_target_wave_dir', '-dtwd', type=Path)
parser.add_argument('--test_wave_dir', '-twd', type=Path)
parser.add_argument('--gpu', type=int)
arguments = parser.parse_args()
voice_changer_model_dir: Path = arguments.voice_changer_model_dir
voice_changer_model_iteration: int = arguments.voice_changer_model_iteration
voice_changer_config: Path = arguments.voice_changer_config
input_wave_scale: float = arguments.input_wave_scale
filter_size: int = arguments.filter_size
threshold: float = arguments.threshold
super_resolution_model: Path = arguments.super_resolution_model
super_resolution_config: Path = arguments.super_resolution_config
f0_trans_model_dir: Path = arguments.f0_trans_model_dir
f0_trans_model_iteration: int = arguments.f0_trans_model_iteration
f0_trans_config: Path = arguments.f0_trans_config
input_statistics: Path = arguments.input_statistics
target_statistics: Path = arguments.target_statistics
output_dir: Path = arguments.output_dir
disable_dataset_test: bool = arguments.disable_dataset_test
dataset_input_wave_dir: Path = arguments.dataset_input_wave_dir
dataset_target_wave_dir: Path = arguments.dataset_target_wave_dir
test_wave_dir: Path = arguments.test_wave_dir
gpu: int = arguments.gpu
output_dir.mkdir(exist_ok=True)
output = output_dir / voice_changer_model_dir.name
if f0_trans_model_dir is not None:
output = output.parent / (output.name + '+' + f0_trans_model_dir.name)
output.mkdir(exist_ok=True)
def _extract_number(f):
s = re.findall("\d+", str(f))
return int(s[-1]) if s else -1
def _get_predictor_model_path(model_dir: Path, iteration: int = None, prefix: str = 'predictor_'):
if iteration is None:
paths = model_dir.glob(prefix + '*.npz')
model_path = list(sorted(paths, key=_extract_number))[-1]
else:
fn = prefix + '{}.npz'.format(iteration)
model_path = model_dir / fn
return model_path
def process(p_in: Path, acoustic_converter: AcousticConverter, super_resolution: SuperResolution):
try:
if p_in.suffix in ['.npy', '.npz']:
p_in = Path(glob.glob(str(dataset_input_wave_dir / p_in.stem) + '.*')[0])
w_in = acoustic_converter.load_wave(p_in)
w_in.wave *= input_wave_scale
f_in = acoustic_converter.extract_acoustic_feature(w_in)
f_in_effective, effective = acoustic_converter.separate_effective(wave=w_in, feature=f_in, threshold=threshold)
f_low = acoustic_converter.convert_loop(f_in_effective)
f_low = acoustic_converter.combine_silent(effective=effective, feature=f_low)
if filter_size is not None:
f_low.f0 = AcousticConverter.filter_f0(f_low.f0, filter_size=filter_size)
f_low = acoustic_converter.decode_spectrogram(f_low)
s_high = super_resolution.convert_loop(f_low.sp.astype(numpy.float32))
# target
paths = glob.glob(str(dataset_target_wave_dir / p_in.stem) + '.*')
has_true = len(paths) > 0
if has_true:
p_true = Path(paths[0])
w_true = acoustic_converter.load_wave(p_true)
f_true = acoustic_converter.extract_acoustic_feature(w_true)
vmin, vmax = numpy.log(f_true.sp).min(), numpy.log(f_true.sp).max()
else:
vmin, vmax = None, None
# save figure
fig = plt.figure(figsize=[36, 22])
plt.subplot(4, 1, 1)
plt.imshow(numpy.log(f_in.sp).T, aspect='auto', origin='reverse')
plt.plot(f_in.f0, 'w')
plt.colorbar()
plt.clim(vmin=vmin, vmax=vmax)
plt.subplot(4, 1, 2)
plt.imshow(numpy.log(f_low.sp).T, aspect='auto', origin='reverse')
plt.plot(f_low.f0, 'w')
plt.colorbar()
plt.clim(vmin=vmin, vmax=vmax)
plt.subplot(4, 1, 3)
plt.imshow(numpy.log(s_high).T, aspect='auto', origin='reverse')
plt.colorbar()
plt.clim(vmin=vmin, vmax=vmax)
if has_true:
plt.subplot(4, 1, 4)
plt.imshow(numpy.log(f_true.sp).T, aspect='auto', origin='reverse')
plt.plot(f_true.f0, 'w')
plt.colorbar()
plt.clim(vmin=vmin, vmax=vmax)
fig.savefig(output / (p_in.stem + '.png'))
# save wave
f_low_sr = BYAcousticFeature(
f0=f_low.f0,
spectrogram=f_low.sp,
aperiodicity=f_low.ap,
mfcc=f_low.mc,
voiced=f_low.voiced,
)
rate = acoustic_converter.out_sampling_rate
wave = super_resolution.convert_to_audio(s_high, acoustic_feature=f_low_sr, sampling_rate=rate)
librosa.output.write_wav(y=wave.wave, path=str(output / (p_in.stem + '.wav')), sr=rate)
except:
import traceback
traceback.print_exc()
def main():
save_arguments(arguments, output / 'arguments.json')
# f0 converter
if f0_trans_model_dir is not None:
model = _get_predictor_model_path(f0_trans_model_dir, f0_trans_model_iteration)
f0_converter = AcousticConverter(create_config(f0_trans_config), model, gpu=gpu)
elif input_statistics is not None:
f0_converter = F0Converter(input_statistics=input_statistics, target_statistics=target_statistics)
else:
f0_converter = None
# acoustic converter
config = create_config(voice_changer_config)
model = _get_predictor_model_path(voice_changer_model_dir, voice_changer_model_iteration)
acoustic_converter = AcousticConverter(
config,
model,
gpu=gpu,
f0_converter=f0_converter,
out_sampling_rate=arguments.out_sampling_rate,
)
print(f'Loaded acoustic converter model "{model}"')
# super resolution
sr_config = create_sr_config(super_resolution_config)
super_resolution = SuperResolution(sr_config, super_resolution_model, gpu=gpu)
print(f'Loaded super resolution model "{super_resolution_model}"')
# dataset's test
if not disable_dataset_test:
input_paths = list(sorted([Path(p) for p in glob.glob(str(config.dataset.input_glob))]))
numpy.random.RandomState(config.dataset.seed).shuffle(input_paths)
paths_test = input_paths[-config.dataset.num_test:]
else:
paths_test = []
# test data
if test_wave_dir is not None:
paths_test += list(test_wave_dir.glob('*.wav'))
process_partial = partial(process, acoustic_converter=acoustic_converter, super_resolution=super_resolution)
if gpu is None:
list(multiprocessing.Pool().map(process_partial, paths_test))
else:
list(map(process_partial, paths_test))
if __name__ == '__main__':
main()
| 39.990431 | 119 | 0.715363 |
d584445a3a90558e6178a5a900a9511a2b8b8d61 | 3,911 | py | Python | b_train_ae.py | MiguelSimao/UC2017_Classification | 024c003571e3fc75fadf4430c069c284b18a032b | [
"MIT"
] | 1 | 2021-05-11T09:43:59.000Z | 2021-05-11T09:43:59.000Z | b_train_ae.py | MiguelSimao/UC2017_Classification | 024c003571e3fc75fadf4430c069c284b18a032b | [
"MIT"
] | null | null | null | b_train_ae.py | MiguelSimao/UC2017_Classification | 024c003571e3fc75fadf4430c069c284b18a032b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SCRIPT TO TEST NOVELTY DETECTION FUNCTIONALITY
@author: simao
"""
import h5py
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn import preprocessing
np.random.seed(1337)
from keras.models import Model
from keras.layers import Input, Dense, GaussianNoise
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping
from keras import losses
dir_dataset_sg = './dataset/SG24_dataset.h5'
dir_dataset_dg = './dataset/DG10_dataset.h5'
# Open H5 files to read
f1 = h5py.File(dir_dataset_sg,'r')
f2 = h5py.File(dir_dataset_dg,'r')
# Load static gesture data set
X = f1['Predictors']
T = f1['Target']
U = f1['User']
X = np.array(X).transpose()
T = np.array(T).transpose()
U = np.array(U).transpose()
T = T[:,0]
U = U[:,0]
# Shuffle dataset
np.random.seed(0)
indShuf = np.random.permutation(X.shape[0])
X = X[indShuf]
T = T[indShuf]
U = U[indShuf]
X[np.isnan(X)] = 0
# Dataset statistics
num_users = np.unique(U).shape[0]
#%% FEATURE EXTRACTION
# Variable selection
def variable_subset(X):
# X is a numpy array with data Mx29
output_index_list = []
output_index_list += range(5,29)
return X[:,output_index_list]
X = variable_subset(X)
#%% NOVELTY DETECTION SPECIFIC PREPROCESSING
# Change of classes 19+ to outlier (Classes 1-18 are gestures, 19,20,21,22,23,24(,25) are outliers)
# Separate the outliers (unsupervised learning)
inlierInd = np.isin(T,[6])
outlierInd = np.invert(inlierInd)
Xin = X[inlierInd]
Tin = T[inlierInd]
Uin = U[inlierInd]
#X = X[np.invert(outlierInd)]
#T = T[np.invert(outlierInd)]
#U = U[np.invert(outlierInd)]
#%% SET SPLITTTING
# Data splitting : all -> train and validation
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, val_index in sss.split(Xin,Tin):
X_train, X_val = Xin[train_index], Xin[val_index]
t_train, t_val = Tin[train_index], Tin[val_index]
u_train, u_val = Uin[train_index], Uin[val_index]
#%% FEATURE EXTRACTION
# Transformations
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_val = scaler.transform(X_val)
# One hot-encoding
#enc = preprocessing.OneHotEncoder(sparse=False).fit(t_train)
#t_train = enc.transform(t_train)
#t_val = enc.transform(t_val)
#%% TRAIN FFNN
lr = 0.05
momentum = 0.9
inputs = Input(shape=(X.shape[1],))
x = GaussianNoise(1.0)(inputs)
x = Dense(25, activation='linear')(x)
encoded = Dense(2, activation='linear')(x)
x = Dense(25, activation='linear')(encoded)
decoded = Dense(X_train.shape[1], activation='linear')(x)
autoencoder = Model(inputs,decoded,name='Autoencoder')
encoder = Model(inputs, encoded)
# Optimizer
sgd = SGD(lr=lr,
momentum=momentum,
nesterov=True)
es = EarlyStopping(monitor='val_loss', patience=12)
autoencoder.compile(optimizer=sgd,
loss=losses.mean_absolute_error,
metrics=['mae'])
autoencoder.fit(x=X_train, y=X_train,
validation_data=(X_val,X_val),
epochs=50,
callbacks=[],
verbose=1)
def MAE(X,Y):
# Ensure X and Y are numpy arrays NxD
# Based on the L2 distance
# Both have the same dimension
np.testing.assert_equal(X.shape,Y.shape,err_msg='MAE:Input matrices have different shapes.')
L = np.abs(X - Y)
L = np.mean(L,axis=1)
return L
#%% PLOTTING RESULTS
Xp = scaler.transform(X)
Y = autoencoder.predict(Xp)
L = MAE(Xp,Y)
x = np.arange(Xp.shape[0])
plt.figure()
# Plot inliers on dataset
plt.scatter(x[inlierInd],L[inlierInd],s=8,c='b',marker='.')
# Plot outliers on dataset
plt.scatter(x[outlierInd],L[outlierInd],s=8,c='r',marker='.')
Y = encoder.predict(Xp)
plt.figure()
plt.scatter(Y[inlierInd,0],Y[inlierInd,1],s=8,c='b',marker='.')
plt.scatter(Y[outlierInd,0],Y[outlierInd,1],s=8,c='r',marker='.')
| 22.738372 | 99 | 0.700077 |
1005504b888dcf3cf744ce3668e4b5332cf06c83 | 1,919 | py | Python | setup.py | jamieleecho/milliluk-tools | 9d77fd1640956723dbdd43f6675c01ce700b445b | [
"ClArtistic"
] | 3 | 2018-01-03T02:12:41.000Z | 2019-05-28T02:33:00.000Z | setup.py | jamieleecho/milliluk-tools | 9d77fd1640956723dbdd43f6675c01ce700b445b | [
"ClArtistic"
] | null | null | null | setup.py | jamieleecho/milliluk-tools | 9d77fd1640956723dbdd43f6675c01ce700b445b | [
"ClArtistic"
] | 2 | 2016-12-03T02:22:12.000Z | 2021-04-17T15:38:47.000Z | #!/usr/bin/env python
import setuptools
# VERSION MUST be defined on line 6
VERSION = "0.1"
test_deps = [
"black",
"pycodestyle",
"pylint",
"pytest",
"pytest-cov",
"tox",
]
extras = {
"test": test_deps,
}
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="milliluk-tools",
version=VERSION,
description="TRS-80 Color Computer Tools",
long_description=long_description,
long_description_content_type="text/markdown",
# The project's main homepage.
url="https://github.com/milliluk/milliluk-tools",
# Author details
author="Erik Gavriluk",
# Choose your license
license="CC BY-NC-ND 4.0",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 3 - Alpha",
# Indicate who your project is intended for
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
# Pick your license as you wish (should match "license" above)
"OSI Approved :: Common Public License",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 3",
],
install_requires=[
"pypng",
],
tests_require=test_deps,
extras_require=extras,
python_requires=">=3.3",
# What does your project relate to?
keywords="coco image conversion trs-80 tandy",
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=setuptools.find_packages(where="src"),
package_dir={
"": "src",
},
entry_points={
"console_scripts": [
"max2png=milliluk.max2png.max2png:main",
"cgp220=milliluk.cgp220.cgp220:main",
],
},
)
| 28.220588 | 77 | 0.634706 |
1aa2d6de681b6a36d07f72b3e11f7132208fed3e | 742 | py | Python | profiles_project/profiles_api/permissions.py | amin72/profiles-rest-api | 8800e11a574f7330c2d8b7cd814da1c85f3d926e | [
"MIT"
] | null | null | null | profiles_project/profiles_api/permissions.py | amin72/profiles-rest-api | 8800e11a574f7330c2d8b7cd814da1c85f3d926e | [
"MIT"
] | 5 | 2021-03-19T09:28:00.000Z | 2022-02-10T13:54:51.000Z | profiles_project/profiles_api/permissions.py | amin72/profiles-rest-api | 8800e11a574f7330c2d8b7cd814da1c85f3d926e | [
"MIT"
] | null | null | null | from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
"""Allow user to edit their own profile"""
def has_object_permission(self, request, view, obj):
"""Check user is trying to edit their own profile"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.id == request.user.id
class UpdateOwnStatus(permissions.BasePermission):
"""Allow users to update their own status"""
def has_object_permission(self, request, view, obj):
"""Check the user trying to update their status"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.user.id == request.user.id
| 29.68 | 60 | 0.672507 |
cd5a7501d45be784dabfb939ed8343b86b01bac6 | 4,187 | py | Python | virl/cli/ssh/commands.py | tombry/virlutils | e98136b4e88c456828f2d0496c14f851f2627a46 | [
"MIT"
] | 133 | 2018-07-01T06:08:49.000Z | 2022-03-26T15:22:21.000Z | virl/cli/ssh/commands.py | tombry/virlutils | e98136b4e88c456828f2d0496c14f851f2627a46 | [
"MIT"
] | 76 | 2018-06-28T16:41:57.000Z | 2022-03-26T17:23:06.000Z | virl/cli/ssh/commands.py | tombry/virlutils | e98136b4e88c456828f2d0496c14f851f2627a46 | [
"MIT"
] | 43 | 2018-06-27T20:40:52.000Z | 2022-02-22T06:16:11.000Z | import click
from virl.api import VIRLServer
from subprocess import call
from virl import helpers
from virl.helpers import get_mgmt_lxc_ip, get_node_from_roster, get_cml_client, get_current_lab, safe_join_existing_lab, get_node_mgmt_ip
from virl2_client.exceptions import NodeNotFound
@click.command()
@click.argument("node", nargs=1)
def ssh(node):
"""
ssh to a node
"""
server = VIRLServer()
client = get_cml_client(server)
username = server.config.get("VIRL_SSH_USERNAME", "cisco")
current_lab = get_current_lab()
if current_lab:
lab = safe_join_existing_lab(current_lab, client)
if lab:
try:
node_obj = lab.get_node_by_label(node)
except NodeNotFound:
click.secho("Node {} was not found in lab {}".format(node, current_lab), fg="red")
exit(1)
if node_obj.is_active():
mgmtip = get_node_mgmt_ip(node_obj)
if mgmtip:
if "VIRL_SSH_COMMAND" in server.config:
cmd = server.config["VIRL_SSH_COMMAND"]
cmd = cmd.format(host=mgmtip, username=username)
print("Calling user specified command: {}".format(cmd))
exit(call(cmd.split()))
else:
click.secho("Attemping ssh connection to {} at {}".format(node_obj.label, mgmtip))
exit(call(["ssh", "{}@{}".format(username, mgmtip)]))
else:
click.secho("Node {} does not have an external management IP".format(node_obj.label))
else:
click.secho("Node {} is not active".format(node_obj.label), fg="yellow")
else:
click.secho("Unable to find lab {}".format(current_lab), fg="red")
exit(1)
else:
click.secho("No current lab set", fg="red")
exit(1)
@click.command()
@click.argument("node", nargs=-1)
def ssh1(node):
"""
ssh to a node
"""
if len(node) == 2:
# we received env and node name
env = node[0]
running = helpers.check_sim_running(env)
node = node[1]
elif len(node) == 1:
# assume default env
env = "default"
running = helpers.check_sim_running(env)
node = node[0]
else:
exit(call(["virl", "ssh", "--help"]))
if running:
sim_name = running
server = VIRLServer()
details = server.get_sim_roster(sim_name)
# default ssh username can be overriden
username = server.config.get("VIRL_SSH_USERNAME", "cisco")
if node:
try:
node_dict = get_node_from_roster(node, details)
node_name = node_dict.get("NodeName")
ip = node_dict["managementIP"]
proxy = node_dict.get("managementProxy")
if "VIRL_SSH_COMMAND" in server.config:
cmd = server.config["VIRL_SSH_COMMAND"]
cmd = cmd.format(host=ip, username=username)
print("Calling user specified command: {}".format(cmd))
exit(call(cmd.split()))
if proxy == "lxc":
lxc = get_mgmt_lxc_ip(details)
if lxc:
click.secho("Attemping ssh connection" "to {} at {} via {}".format(node_name, ip, lxc))
cmd = 'ssh -o "ProxyCommand ssh -W %h:%p {}@{}" {}@{}'
cmd = cmd.format(server.user, lxc, username, ip)
exit(call(cmd, shell=True))
else:
# handle the "flat" networking case
click.secho("Attemping ssh connection" "to {} at {}".format(node_name, ip))
exit(call(["ssh", "{}@{}".format(username, ip)]))
except AttributeError:
click.secho("Could not find management info" " for {}:{}".format(env, node), fg="red")
except KeyError:
click.secho("Unknown node {}:{}".format(env, node), fg="red")
else:
return details.json()
| 36.72807 | 137 | 0.537139 |
5f7b62430be24daa84c58a45aa13d87ad8e3d245 | 3,787 | py | Python | server.py | kylehiroyasu/bert-extractive-summarizer | 71d122aeb91db64336a2ae4b017532f74387be5f | [
"MIT"
] | null | null | null | server.py | kylehiroyasu/bert-extractive-summarizer | 71d122aeb91db64336a2ae4b017532f74387be5f | [
"MIT"
] | null | null | null | server.py | kylehiroyasu/bert-extractive-summarizer | 71d122aeb91db64336a2ae4b017532f74387be5f | [
"MIT"
] | 1 | 2020-03-25T18:12:26.000Z | 2020-03-25T18:12:26.000Z | from flask import Flask
from flask import request, jsonify, abort, make_response
from flask_cors import CORS
import nltk
nltk.download('punkt')
from nltk import tokenize
from typing import List
import argparse
from summarizer import Summarizer, TransformerSummarizer
app = Flask(__name__)
CORS(app)
class Parser(object):
def __init__(self, raw_text: bytes):
self.all_data = str(raw_text, 'utf-8').split('\n')
def __isint(self, v) -> bool:
try:
int(v)
return True
except:
return False
def __should_skip(self, v) -> bool:
return self.__isint(v) or v == '\n' or '-->' in v
def __process_sentences(self, v) -> List[str]:
sentence = tokenize.sent_tokenize(v)
return sentence
def save_data(self, save_path, sentences) -> None:
with open(save_path, 'w') as f:
for sentence in sentences:
f.write("%s\n" % sentence)
def run(self) -> List[str]:
total: str = ''
for data in self.all_data:
if not self.__should_skip(data):
cleaned = data.replace('>', '').replace('\n', '').strip()
if cleaned:
total += ' ' + cleaned
sentences = self.__process_sentences(total)
return sentences
def convert_to_paragraphs(self) -> str:
sentences: List[str] = self.run()
return ' '.join([sentence.strip() for sentence in sentences]).strip()
@app.route('/summarize', methods=['POST'])
def convert_raw_text():
ratio = float(request.args.get('ratio', 0.2))
min_length = int(request.args.get('min_length', 25))
max_length = int(request.args.get('max_length', 500))
data = request.data
if not data:
abort(make_response(jsonify(message="Request must have raw text"), 400))
parsed = Parser(data).convert_to_paragraphs()
summary = summarizer(parsed, ratio=ratio, min_length=min_length, max_length=max_length)
return jsonify({
'summary': summary
})
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-model', dest='model', default='bert-base-uncased', help='The model to use')
parser.add_argument('-transformer-type',
dest='transformer_type', default=None,
help='Huggingface transformer class key')
parser.add_argument('-transformer-key', dest='transformer_key', default=None,
help='The transformer key for huggingface. For example bert-base-uncased for Bert Class')
parser.add_argument('-greediness', dest='greediness', help='', default=0.45)
parser.add_argument('-reduce', dest='reduce', help='', default='mean')
parser.add_argument('-hidden', dest='hidden', help='', default=-2)
parser.add_argument('-port', dest='port', help='', default=5000)
parser.add_argument('-host', dest='host', help='', default='0.0.0.0')
args = parser.parse_args()
if args.transformer_type is not None:
print(f"Using Model: {args.transformer_type}")
assert args.transformer_key is not None, 'Transformer Key cannot be none with the transformer type'
summarizer = TransformerSummarizer(
transformer_type=args.transformer_type,
transformer_model_key=args.transformer_key,
hidden=int(args.hidden),
reduce_option=args.reduce,
greedyness=float(args.greediness)
)
else:
print(f"Using Model: {args.model}")
summarizer = Summarizer(
model=args.model,
hidden=int(args.hidden),
reduce_option=args.reduce,
greedyness=float(args.greediness)
)
app.run(host=args.host, port=int(args.port))
| 33.8125 | 113 | 0.627938 |
bccae0a22e1d71771e7c7f126936fd5528fff405 | 51 | py | Python | samples/lint/__init__.py | btrekkie/file-builder | e85726ed647ad7a73839c7410618ef3f118c96c9 | [
"MIT"
] | 1 | 2020-05-29T17:13:26.000Z | 2020-05-29T17:13:26.000Z | samples/lint/__init__.py | btrekkie/file-builder | e85726ed647ad7a73839c7410618ef3f118c96c9 | [
"MIT"
] | 4 | 2021-06-14T18:42:55.000Z | 2022-03-27T13:36:54.000Z | samples/lint/__init__.py | btrekkie/file-builder | e85726ed647ad7a73839c7410618ef3f118c96c9 | [
"MIT"
] | null | null | null | from .lint import lint_dir
__all__ = ['lint_dir']
| 12.75 | 26 | 0.72549 |
c8f00af828cea4b4fe96a961a6085e4081adc4ac | 2,422 | py | Python | components/manager/scripts/configure_manager.py | cloudify-cosmo/cloudify-manager-blueprints | 1908c1a0615fb15cbb118335aa2f9e055b9e5779 | [
"Apache-2.0"
] | 35 | 2015-03-07T13:30:58.000Z | 2022-02-14T11:44:48.000Z | components/manager/scripts/configure_manager.py | cloudify-cosmo/cloudify-manager-blueprints | 1908c1a0615fb15cbb118335aa2f9e055b9e5779 | [
"Apache-2.0"
] | 101 | 2015-03-18T03:07:57.000Z | 2019-02-07T12:06:42.000Z | components/manager/scripts/configure_manager.py | cloudify-cosmo/cloudify-manager-blueprints | 1908c1a0615fb15cbb118335aa2f9e055b9e5779 | [
"Apache-2.0"
] | 76 | 2015-01-08T10:33:03.000Z | 2021-05-11T08:45:50.000Z | #!/usr/bin/env python
#########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from os.path import join, dirname
from cloudify import ctx
ctx.download_resource(
join('components', 'utils.py'),
join(dirname(__file__), 'utils.py')
)
import utils # NOQA
NODE_NAME = 'manager-config'
ctx_properties = ctx.node.properties.get_all()
def configure_security_properties():
security_config = ctx_properties['security']
runtime_props = ctx.instance.runtime_properties
if security_config['ssl']['enabled']:
# manager SSL settings
ctx.logger.info('SSL is enabled, setting rest port to 443 and '
'rest protocol to https...')
external_rest_port = 443
external_rest_protocol = 'https'
else:
ctx.logger.info('SSL is disabled, setting rest port '
'to 80 and rest protocols to http...')
external_rest_port = 80
external_rest_protocol = 'http'
runtime_props['external_rest_port'] = external_rest_port
runtime_props['external_rest_protocol'] = external_rest_protocol
def create_cloudify_user():
utils.create_service_user(
user=utils.CLOUDIFY_USER,
group=utils.CLOUDIFY_GROUP,
home=utils.CLOUDIFY_HOME_DIR
)
utils.mkdir(utils.CLOUDIFY_HOME_DIR)
def create_sudoers_file_and_disable_sudo_requiretty():
utils.sudo(['touch', utils.CLOUDIFY_SUDOERS_FILE])
utils.chmod('440', utils.CLOUDIFY_SUDOERS_FILE)
entry = 'Defaults:{user} !requiretty'.format(user=utils.CLOUDIFY_USER)
description = 'Disable sudo requiretty for {0}'.format(utils.CLOUDIFY_USER)
utils.add_entry_to_sudoers(entry, description)
def init_cloudify_user():
create_cloudify_user()
create_sudoers_file_and_disable_sudo_requiretty()
init_cloudify_user()
configure_security_properties()
| 31.454545 | 79 | 0.71924 |
5e9c60a466fba874adedeab35be75fce5c5b5271 | 9,343 | py | Python | buildAdvGear2.py | BurcinSayin/pf2 | bcd362dc0a750b8ee59cd19ecff9cf5be4f34b19 | [
"MIT"
] | 25 | 2019-09-13T19:30:24.000Z | 2022-03-14T21:57:17.000Z | buildAdvGear2.py | BurcinSayin/pf2 | bcd362dc0a750b8ee59cd19ecff9cf5be4f34b19 | [
"MIT"
] | 13 | 2019-09-26T20:03:04.000Z | 2021-09-14T23:21:03.000Z | buildAdvGear2.py | BurcinSayin/pf2 | bcd362dc0a750b8ee59cd19ecff9cf5be4f34b19 | [
"MIT"
] | 12 | 2019-09-13T19:47:57.000Z | 2022-03-13T03:46:16.000Z | from bs4 import BeautifulSoup
import requests
import json
import datetime
import codecs
import time
wornHolder = {}
wornHolder['name'] = 'Pathfinder 2.0 Adventuring Gear list'
wornHolder['date'] = datetime.date.today().strftime("%B %d, %Y")
def get_multi(link):
items = []
res2 = requests.get(link)
res2.raise_for_status()
soup2 = BeautifulSoup(res2.text, 'lxml')
main = soup2.find("span", {'id':'ctl00_MainContent_DetailedOutput'})
traits = main.find_all("span", {"class" : lambda L: L and L.startswith('trai')})
traitHolder = []
for trait in traits:
traitHolder.append(trait.text)
children = main.contents
reachedBreak = False
reachedItem = False
detailHolder = []
notFirstH2 = False
inHeader = False
parentDetails = {}
parentDetails['traits'] = traitHolder
item = {}
item['link'] = link
tagType = ""
itemDetailHolder = []
for child in children:
stringContents = str(child)
if stringContents.startswith("<"):
#print(stringContents)
if child.name == "img":
parentDetails['actions'] = child['alt']
if child.name == "hr":
tagType = ""
reachedBreak = True
inHeader = False
if child.name == "img":
item['actions'] = child['alt']
if child.name == "h1":
inHeader = True
if child.name == "h2":
#print(child.text)
className = ""
try:
className = child['class'][0]
except:
className = ""
if className == "title":
if notFirstH2:
item['text'] = detailHolder + itemDetailHolder
for key in parentDetails.keys():
item[key] = parentDetails[key]
items.append(item)
item = {}
item['link'] = link
itemDetailHolder = []
else:
notFirstH2 = True
reachedBreak = False
reachedItem = True
inHeader = False
name = child.text
start = child.text.find("Item")
item['name'] = child.text[0:start]
if child.name == "b":
if(child.text != "Source"):
tagType = child.text.lower()
if child.name == "a":
try:
if child['class'][0] == "external-link" :
item['source'] = child.text
except:
pass
tagType = ""
if child.name == "ul":
#print(child.text)
lis = child.find_all("li")
if(len(lis) > 0):
spellHolder = []
for li in lis:
spellHolder.append(li.text)
item['spells'] = spellHolder
else:
if reachedBreak:
if(tagType != ""):
if not stringContents.isspace():
parentDetails[tagType] = stringContents
tagType = ""
else:
detailHolder.append(stringContents)
if inHeader:
if tagType != "":
parentDetails[tagType] = stringContents
tagType = ""
if reachedItem:
if tagType != "":
item[tagType] = stringContents
tagType = ""
else:
if not stringContents.isspace():
itemDetailHolder.append(stringContents)
#print(stringContents)
for key in parentDetails.keys():
item[key] = parentDetails[key]
string = " "
item['text'] = string.join(detailHolder + itemDetailHolder)
items.append(item)
return items
def get_single(link):
details = {}
itemDetails = {}
res2 = requests.get(link)
res2.raise_for_status()
soup2 = BeautifulSoup(res2.text, 'lxml')
detail = soup2.find(lambda tag: tag.name=='span' and tag.has_attr('id') and tag['id']=="ctl00_MainContent_DetailedOutput")
traits = detail.find_all("span", {"class" : lambda L: L and L.startswith('trai')})
traitHolder = []
for trait in traits:
traitHolder.append(trait.text)
details['traits'] = traitHolder
children = detail.contents
reachedBreak = False
detailHolder = []
tagType = ""
details['link'] = link
for child in children:
stringContents = str(child)
if stringContents.startswith("<"):
if child.name == "h1":
name = child.text
start = name.find("Item")
details['name'] = name[0:start].strip()
if child.name == "hr":
tagType = ""
reachedBreak = True
if child.name == "a":
try:
if child['class'][0] == "external-link" :
details['source'] = child.text
except:
pass
tagType = ""
if child.name == "b":
if(child.text != "Source"):
tagType = child.text.lower()
if child.name == "img":
details['actions'] = child['alt']
if child.name == "i":
if(reachedBreak):
detailHolder.append(child.text)
if child.name == "ul":
#print(child.text)
lis = child.find_all("li")
if(len(lis) > 0):
spellHolder = []
for li in lis:
spellHolder.append(li.text)
details['spells'] = spellHolder
#else:
#if not stringContents.isspace() :
#detailHolder.append(child.text)
else:
if reachedBreak:
if tagType != "":
if not stringContents.isspace():
details[tagType] = stringContents.strip()
else:
if not stringContents.isspace() :
detailHolder.append(stringContents.strip())
else:
if tagType != "":
if not stringContents.isspace():
details[tagType] = stringContents.strip()
#print(child)
string = " "
details['text'] = string.join(detailHolder)
return details
def get_all():
listOfLinks = []
listOfLinks.append("https://2e.aonprd.com/Equipment.aspx?Category=1")
itemHolder = []
for link in listOfLinks:
res2 = requests.get(link)
res2.raise_for_status()
soup2 = BeautifulSoup(res2.text, 'lxml')
table = soup2.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=="ctl00_MainContent_TreasureElement")
rows = table.findAll(lambda tag: tag.name=='tr')
t = 0
for row in rows:
t += 1
#print(row)
#print("-----------------------------------")
item = {}
entries = row.find_all(lambda tag: tag.name=='td')
if entries is not None:
if len(entries) > 0:
name = entries[0].find("a").text
item['name'] = name
item['link'] = "https://2e.aonprd.com/"+entries[0].find("a")['href']
if entries[1].text == "—":
item['level'] = 0
else:
item['level'] = int(entries[1].text)
if any(x['link'] == item['link'] for x in itemHolder):
#print("shortName:", shortName)
for item2 in itemHolder:
if item2['link'] == item['link']:
item2['multi'] = True
else:
item['multi'] = False
itemHolder.append(item)
#if t >6:
#break
items = []
for item in itemHolder:
#print(item)
print("Getting adv gear item :", item['name'],"This url:", item['link'],"|is it multi:",item['multi'])
if item['multi'] == True:
multiHolder = get_multi(item['link'])
for multi in multiHolder:
multi['category'] = "adventuring gear"
items.append(multi)
else:
single = get_single(item['link'])
single['category'] = "adventuring gear"
single['level'] = item['level']
items.append(single)
wornHolder['itemList'] = items
return wornHolder
#print(get_all())
json_data = json.dumps(get_all(), indent=4)
#print(json_data)
filename = "adv-gear-v2-pf2.json"
f = open(filename, "w")
f.write(json_data)
f.close | 33.487455 | 131 | 0.458846 |
7d8ee95bf5b933dc766678f4fce23796f07a9299 | 12,753 | py | Python | zoopt/algos/opt_algorithms/racos/racos_common.py | hongkahjun/ZOOpt-1 | 3dd0f81076f7b464ac1ec77117021621d314cdcb | [
"MIT"
] | null | null | null | zoopt/algos/opt_algorithms/racos/racos_common.py | hongkahjun/ZOOpt-1 | 3dd0f81076f7b464ac1ec77117021621d314cdcb | [
"MIT"
] | null | null | null | zoopt/algos/opt_algorithms/racos/racos_common.py | hongkahjun/ZOOpt-1 | 3dd0f81076f7b464ac1ec77117021621d314cdcb | [
"MIT"
] | null | null | null | """
This module contains the class RacosCommon, which is a common part in Racos, SRacos and SSRacos.
Author:
Yu-Ren Liu
Updated by:
Ze-Wen Li
"""
import copy, math
from zoopt.utils.tool_function import ToolFunction
from zoopt.solution import Solution
from multiprocessing import Queue
class RacosCommon:
"""
This class contains common attributes and methods shared by Racos, SRacos and SSRacos.
"""
def __init__(self):
"""
Initialization.
"""
self._parameter = None
self._objective = None
# Solution set
# Random sampled solutions construct self._data
self._data = []
# Save solutions with distinct x for tune init
self._init_data = []
self._need_copy = True
# self._positive_data are best-positive_size solutions set
self._positive_data = []
# self._negative_data are the other solutions
self._negative_data = []
# Solution
self._best_solution = None
self._possible_solution_list = []
return
def clear(self):
"""
Clear RacosCommon.
:return: no return value
"""
self._parameter = None
self._objective = None
# Solution
self._data = []
self._positive_data = []
self._negative_data = []
# value
self._best_solution = None
def init_attribute(self):
"""
Init self._data, self._positive_data, self._negative_data by sampling.
:return: no return value
"""
self._parameter.set_negative_size(self._parameter.get_train_size() - self._parameter.get_positive_size())
# check if the initial solutions have been set
data_temp = self._parameter.get_init_samples()
i = 0
iteration_num = self._parameter.get_train_size()
if data_temp is not None and self._best_solution is None:
size = len(data_temp)
if iteration_num < size:
size = iteration_num
for j in range(size):
if isinstance(data_temp[j], Solution) is False:
x = self._objective.construct_solution(data_temp[j])
else:
x = data_temp[j]
if math.isnan(x.get_value()):
self._objective.eval(x)
self._data.append(x)
ToolFunction.log("init solution %s, value: %s" % (i, x.get_value()))
i += 1
# otherwise generate random solutions
while i < iteration_num:
# distinct_flag: True means sample is distinct(can be use),
# False means sample is distinct, you should sample again.
x, distinct_flag = self.distinct_sample(self._objective.get_dim(), self._data,
data_num=iteration_num)
# panic stop
if x is None:
break
if distinct_flag:
self._objective.eval(x)
self._data.append(x)
i += 1
self.selection()
return
def parallel_init_attribute(self, unevaluated_queue, evaluated_queue):
"""
Init self._data, self._positive_data, self._negative_data by sampling.
:return: no return value
"""
self._parameter.set_negative_size(self._parameter.get_train_size() - self._parameter.get_positive_size())
# check if the initial solutions have been set
data_temp = self._parameter.get_init_samples()
sampled_data = []
ini_size = 0
if data_temp is not None:
ini_size = len(data_temp)
eval_num = 0
iteration_num = self._parameter.get_train_size()
if data_temp is not None and self._best_solution is None:
for j in range(min(ini_size, iteration_num)):
if isinstance(data_temp[j], Solution) is False:
sol = self._objective.construct_solution(data_temp[j])
else:
sol = data_temp[j]
if math.isnan(sol.get_value()):
unevaluated_queue.put(sol, block=True, timeout=None)
eval_num += 1
else:
self._data.append(sol)
for i in range(0, eval_num):
sol = evaluated_queue.get(block=True, timeout=None)
# ToolFunction.log("init solution %s, value: %s" % (i, sol.get_value()))
self._data.append(sol)
sampled_data.append(sol)
# otherwise generate random solutions
t = ini_size
while t < iteration_num:
# distinct_flag: True means sample is distinct(can be use),
# False means sample is distinct, you should sample again.
sol, distinct_flag = self.distinct_sample(self._objective.get_dim(), sampled_data,
data_num=iteration_num)
# panic stop
if sol is None:
break
if distinct_flag:
unevaluated_queue.put(sol, block=True, timeout=None)
sampled_data.append(sol)
t += 1
t = ini_size
while t < iteration_num:
sol = evaluated_queue.get(block=True, timeout=None)
self._data.append(sol)
t += 1
self.selection()
return
def tune_init_attribute(self):
"""
Init samples for Tune.
:return: sample x
"""
self._parameter.set_negative_size(self._parameter.get_train_size() - self._parameter.get_positive_size())
if self._need_copy:
self._data_temp = copy.deepcopy(self._parameter.get_init_samples())
self._need_copy = False
self._iteration_num = self._parameter.get_train_size()
if self._data_temp is not None and self._best_solution is None:
size = min(len(self._data_temp), self._iteration_num)
if size > 0:
if isinstance(self._data_temp[0], Solution) is False:
x = self._objective.construct_solution(self._data_temp[0])
else:
x = self._data_temp[0]
del self._data_temp[0]
self._iteration_num -= 1
self._init_data.append(x)
if math.isnan(x.get_value()):
return x, True
else:
return self.tune_init_attribute()
x, distinct_flag = self.distinct_sample(self._objective.get_dim(), self._init_data, data_num=1)
if distinct_flag:
self._init_data.append(x)
return x, distinct_flag
def selection(self):
"""
This function sequentially does:
Sort self._data
Choose [first, train_size )solutions as the new self._data
Choose first positive_size solutions as self._positive_data
Choose [positive_size, train_size) solutions as self._negative_data
:return: no return value
"""
new_data = sorted(self._data, key=lambda x: x.get_value())
self._data = new_data[0: self._parameter.get_train_size()]
self._positive_data = self._data[0: self._parameter.get_positive_size()]
self._negative_data = self._data[self._parameter.get_positive_size():]
self._best_solution = self._positive_data[0]
return
def distinct_sample(self, dim, data_list, check_distinct=True, data_num=0):
"""
Sample a distinct solution(compared with solutions in set) from dim.
:param dim: a Dimension object
:param set: a list containing other solutions
:param check_distinct: whether to check the sampled solution is distinct
:param data_num: the maximum number to sample
:return: sampled solution and distinct_flag(True if distinct)
"""
objective = self._objective
x = objective.construct_solution(dim.rand_sample())
times = 1
distinct_flag = True
if check_distinct is True:
while self.is_distinct(data_list, x) is False:
x = objective.construct_solution(dim.rand_sample())
times += 1
if times % 10 == 0:
limited, number = dim.limited_space()
if limited is True:
if number <= data_num:
ToolFunction.log(
'racos_common.py: WARNING -- sample space has been fully enumerated. Stop early')
return None, None
if times > 100:
distinct_flag = False
break
return x, distinct_flag
# Distinct sample from a classifier, return a solution
# if check_distinct is False, you don't need to sample distinctly
def distinct_sample_classifier(self, classifier, data_list, check_distinct=True, data_num=0):
"""
Sample a distinct solution from a classifier.
"""
x = classifier.rand_sample()
sol = self._objective.construct_solution(x)
times = 1
distinct_flag = True
if check_distinct is True:
while self.is_distinct(data_list, sol) is False:
x = classifier.rand_sample()
sol = self._objective.construct_solution(x)
times += 1
if times % 10 == 0:
if times == 10:
space = classifier.get_sample_space()
limited, number = space.limited_space()
if limited is True:
if number <= data_num:
ToolFunction.log(
'racos_common: WARNING -- sample space has been fully explored. Stop early')
return None, None
if times > 100:
distinct_flag = False
break
return sol, distinct_flag
def show_best_solution(self, intermediate_print=False, times=0, freq=100):
"""
Show intermediate best solutions every 'freq' evaluation.
:param intermediate_print: whether to show
:param times: current iteration time
:param freq: frequency
:return: no return value
"""
if intermediate_print is True and times % freq == 0:
ToolFunction.log(("budget %d, fx result: " % times) + str(self._best_solution.get_value()))
ToolFunction.log("x: " + str(self._best_solution.get_x()))
@staticmethod
def extend(seta, setb):
"""
Concatenate two list.
"""
result = copy.deepcopy(seta)
for x in setb:
result.append(copy.deepcopy(x))
return result
@staticmethod
def is_distinct(sol_list, sol):
"""
Check if x is distinct from each solution in seta.
:param seta: a list
:param x: a Solution object
:return: True or False
"""
for ins in sol_list:
if sol.is_equal(ins):
return False
return True
def set_parameters(self, parameter):
self._parameter = parameter
return
def get_parameters(self):
return self._parameter
def set_objective(self, objective):
self._objective = objective
return
def get_objective(self):
return self._objective
# For debugging
def print_positive_data(self):
ToolFunction.log('------print positive_data------')
ToolFunction.log('the size of positive_data is: %d' %
(len(self._positive_data)))
for x in self._positive_data:
x.print_solution()
def print_negative_data(self):
ToolFunction.log('------print negative_data------')
ToolFunction.log('the size of negative_data is: %d' %
(len(self._negative_data)))
for x in self._negative_data:
x.print_solution()
def print_data(self):
ToolFunction.log('------print b------')
ToolFunction.log('the size of b is: %d' % (len(self._data)))
for x in self._data:
x.print_solution()
def set_best_solution(self, solution):
self._best_solution = solution
def get_best_solution(self):
return self._best_solution
def get_data(self):
return self._data
def get_positive_data(self):
return self._positive_data
def get_negative_data(self):
return self._negative_data
| 36.965217 | 113 | 0.569827 |
8a6b44dab0a69ec86d56c3cb1dc944ab24a6e4da | 8,216 | py | Python | code/Experiments/neon-master/examples/nmt/data.py | matthijsvk/convNets | 7e65db7857a4e6abfbcab264953eb7741319de6c | [
"Apache-2.0"
] | 53 | 2017-04-18T10:06:20.000Z | 2021-12-29T21:26:07.000Z | examples/nmt/data.py | anlthms/neon | cba318c9f0a2acf2ab8a3d7725b588b2a8b17cb9 | [
"Apache-2.0"
] | null | null | null | examples/nmt/data.py | anlthms/neon | cba318c9f0a2acf2ab8a3d7725b588b2a8b17cb9 | [
"Apache-2.0"
] | 20 | 2017-05-03T03:27:09.000Z | 2022-03-24T07:07:45.000Z | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Utilities for handling the bilingual text dataset used for
Neural Machine Translation.
"""
import os
import numpy as np
from collections import Counter
import h5py
import tarfile
import gzip
from neon.data.datasets import Dataset
from neon.util.argparser import NeonArgparser
def get_data():
"""
Download bilingual text dataset for Machine translation example.
"""
# vocab_size and time_steps are hard coded here
vocab_size = 16384
time_steps = 20
# download dataset
url = 'http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/'
filename = 'bitexts.tgz'
size = 1313280000
parser = NeonArgparser(__doc__)
args = parser.parse_args(gen_be=False)
data_dir = os.path.join(args.data_dir, 'nmt')
_, filepath = Dataset._valid_path_append(data_dir, '', filename)
if not os.path.exists(filepath):
Dataset.fetch_dataset(url, filename, filepath, size)
# extract selected datasets
datafiles = dict()
datafiles['un2000'] = ('un2000_pc34.en.gz', 'un2000_pc34.fr.gz')
datafiles['europarl7'] = ('ep7_pc45.en.gz', 'ep7_pc45.fr.gz')
extractpath = os.path.join(data_dir, 'bitexts.selected')
with tarfile.open(filepath, 'r') as tar_ref:
for dset, files in datafiles.items():
datasetpath = os.path.join(data_dir, dset)
# extract the files for dataset, if not already there
for zipped in files:
fname = '.'.join(zipped.split('.')[:-1])
fpath = os.path.join(datasetpath, fname)
if not os.path.exists(fpath):
gzpath = os.path.join(extractpath, zipped)
if not os.path.exists(gzpath):
select = [ti for ti in tar_ref if os.path.split(ti.name)[1] == zipped]
tar_ref.extractall(path=data_dir, members=select)
# get contents of gz files
if not os.path.exists(datasetpath):
os.makedirs(datasetpath)
with gzip.open(gzpath, 'r') as fin, open(fpath, 'w') as fout:
fout.write(fin.read())
os.remove(gzpath)
if os.path.exists(extractpath):
os.rmdir(extractpath)
# process data and save to h5 file
# loop through all datasets and get train and valid splits
for dataset in datafiles.keys():
s_vocab, t_vocab = create_h5py(data_dir, dataset, 'train',
vocab_size=vocab_size, time_steps=time_steps)
create_h5py(data_dir, dataset, 'valid', s_vocab=s_vocab, t_vocab=t_vocab,
time_steps=time_steps)
def parse_vocab(path, vocab_size):
with open(path, 'r') as f:
word_counts = Counter()
blob = []
for ii, sentence in enumerate(f):
sentence = sentence.lower().replace('.', '').replace(',', '')
sentence = sentence.replace('\xe2\x80\x99s', '')
tokens = sentence.split()
blob += tokens
if ii % 100000 == 0:
word_counts += Counter(blob)
blob = []
word_counts += Counter(blob) # get any leftover fraction
vocab = [w[0] for w in word_counts.most_common(vocab_size-2)]
vocab = ['<eos>', '<unk>'] + vocab # used for LUT size
return vocab
def vocab_to_dicts(vocab):
t2i = dict((t, i) for i, t in enumerate(vocab))
i2t = dict((i, t) for i, t in enumerate(vocab))
return t2i, i2t
def get_lengths(path, split, time_steps, num_train, num_valid, max_sentence):
with open(path, 'r') as f:
lengths = []
num_short = 0
for ii, sentence in enumerate(f):
if (split is 'train' and ii < num_train) or (split is 'valid' and
ii >= max_sentence - num_valid):
tokens = sentence.split()
lengths.append(len(tokens))
if lengths[-1] <= time_steps:
num_short += 1
return lengths, num_short
def create_data(path, time_steps, t2i, vocab, lengths, split, s_num_short,
num_train, num_valid, max_sentence):
X = np.zeros((s_num_short, time_steps)) # init with <eos>
with open(path, 'r') as f:
i_sent = 0
idx = 0
for ii, sentence in enumerate(f):
if (split is 'train' and ii < num_train) or (split is 'valid' and
ii >= max_sentence - num_valid):
sentence = sentence.lower().replace('.', '').replace(',', '')
sentence = sentence.replace('\xe2\x80\x99s', '')
token = sentence.split()
length = len(token)
if lengths[idx] <= time_steps:
trunc_len = min(length, time_steps)
for j in range(trunc_len):
j_prime = j + time_steps - trunc_len # right-align sentences
# look up word index in vocab, 1 is <unk> -- VERY SLOW!
X[i_sent, j_prime] = t2i[token[j]] if token[j] in vocab else 1
i_sent += 1
idx += 1
return X
def create_h5py(data_dir, dataset, split, s_vocab=None, t_vocab=None,
vocab_size=16384, time_steps=20):
print("processing {} dataset - {}".format(dataset, split))
if dataset == 'europarl7':
basename = 'ep7_pc45'
num_train = 900000
num_valid = 2000
max_sentence = 982178
elif dataset == 'un2000':
basename = 'un2000_pc34'
num_train = 5200000
num_valid = 2000
max_sentence = 5259899
sourcefile = basename + '.fr'
targetfile = basename + '.en'
# if h5 data file already exists, do not recreate
path = os.path.join(data_dir, dataset)
processed_file = os.path.join(path, dataset + '-' + split + '.h5')
if os.path.exists(processed_file):
print("{} already exists, skipping".format(processed_file))
return None, None
source = os.path.join(path, sourcefile)
target = os.path.join(path, targetfile)
if s_vocab is not None:
vocab_size = len(s_vocab)
# if vocab is not given, create from dataset
s_vocab = parse_vocab(source, vocab_size) if s_vocab is None else s_vocab
t_vocab = parse_vocab(target, vocab_size) if t_vocab is None else t_vocab
s_token_to_index, s_index_to_token = vocab_to_dicts(s_vocab)
t_token_to_index, t_index_to_token = vocab_to_dicts(t_vocab)
# source sentence lengths
lengths, s_num_short = get_lengths(source, split, time_steps,
num_train, num_valid, max_sentence)
# create data matrices
X = create_data(source, time_steps, s_token_to_index, s_vocab, lengths,
split, s_num_short, num_train, num_valid, max_sentence)
y = create_data(target, time_steps, t_token_to_index, t_vocab, lengths,
split, s_num_short, num_train, num_valid, max_sentence)
# save parsed data
print("Saving parsed data to {}".format(processed_file))
with h5py.File(processed_file, 'w') as f:
f.create_dataset("s_vocab", data=s_vocab)
f.create_dataset("t_vocab", data=t_vocab)
f.create_dataset("X", data=X)
f.create_dataset("y", data=y)
return s_vocab, t_vocab
if __name__ == "__main__":
get_data()
| 38.754717 | 94 | 0.591529 |
d8d65a25726ecba4e1f09c149c777992e32a5cab | 1,281 | py | Python | examples/debug_converter.py | anthonykgross/ffmpeg-streams-manager | 417e48370f9454ac45ccac73f8742d065a0b00d6 | [
"MIT"
] | null | null | null | examples/debug_converter.py | anthonykgross/ffmpeg-streams-manager | 417e48370f9454ac45ccac73f8742d065a0b00d6 | [
"MIT"
] | null | null | null | examples/debug_converter.py | anthonykgross/ffmpeg-streams-manager | 417e48370f9454ac45ccac73f8742d065a0b00d6 | [
"MIT"
] | null | null | null | from ffmpeg_streams_manager import *
input1 = Input("../fixtures/sintel.mp4")
input2 = Input("../fixtures/en.srt")
input3 = Input("../fixtures/es.srt")
converter = Converter('output.mkv')
converter.add_input(input1)
converter.add_input(input2)
converter.add_input(input3)
converter.debug()
# converter.run()
"""
Result :
{'language': 'und', 'map': 0, 'codec': 'h264'}
{'language': 'eng', 'map': 1, 'codec': 'aac'}
{'language': None, 'map': 0, 'codec': 'subrip'}
{'language': None, 'map': 0, 'codec': 'subrip'}
Input : sintel-1024-surround.mp4
Mapping :
{'language': 'und', 'map': 0, 'codec': 'h264'}
{'language': 'eng', 'map': 1, 'codec': 'aac'}
---- debug ----
Video streams :
{'language': 'und', 'map': 0, 'codec': 'h264'}
Audio streams :
{'language': 'eng', 'map': 1, 'codec': 'aac'}
Subtitle streams :
Input : sintel_en.srt
Mapping :
{'language': None, 'map': 0, 'codec': 'subrip'}
---- debug ----
Video streams :
Audio streams :
Subtitle streams :
{'language': None, 'map': 0, 'codec': 'subrip'}
Input : sintel_es.srt
Mapping :
{'language': None, 'map': 0, 'codec': 'subrip'}
---- debug ----
Video streams :
Audio streams :
Subtitle streams :
{'language': None, 'map': 0, 'codec': 'subrip'}
""" | 26.142857 | 51 | 0.589383 |
4e66810370ac85b76bcf1e5fc9092490d1fd53d7 | 2,922 | py | Python | setup.py | eumiro/osmnx | 8d9e6af04a380931f0426b6ac3ab8cc0d708cfff | [
"MIT"
] | null | null | null | setup.py | eumiro/osmnx | 8d9e6af04a380931f0426b6ac3ab8cc0d708cfff | [
"MIT"
] | null | null | null | setup.py | eumiro/osmnx | 8d9e6af04a380931f0426b6ac3ab8cc0d708cfff | [
"MIT"
] | null | null | null | """
OSMnx setup script.
See license in LICENSE.txt.
"""
import os
from setuptools import setup
# provide a long description using reStructuredText
LONG_DESCRIPTION = r"""
**OSMnx** is a Python package that lets you download spatial geometries and
model, project, visualize, and analyze real-world street networks from
OpenStreetMap's APIs. Users can download and model walkable, drivable, or
bikeable urban networks with a single line of Python code, and then easily
analyze and visualize them. You can just as easily download and work with
amenities/points of interest, building footprints, elevation data, street
bearings/orientations, speed/travel time, and network routing.
Citation info: Boeing, G. 2017. "`OSMnx: New Methods for Acquiring,
Constructing, Analyzing, and Visualizing Complex Street Networks`_."
*Computers, Environment and Urban Systems* 65, 126-139.
doi:10.1016/j.compenvurbsys.2017.05.004
Read the `docs`_ or see usage examples and demos on `GitHub`_.
.. _GitHub: https://github.com/gboeing/osmnx-examples
.. _docs: https://osmnx.readthedocs.io
.. _OSMnx\: New Methods for Acquiring, Constructing, Analyzing, and Visualizing Complex Street Networks: http://geoffboeing.com/publications/osmnx-complex-street-networks/
"""
# list of classifiers from the PyPI classifiers trove
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: GIS",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Information Analysis",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
]
DESC = (
"Retrieve, model, analyze, and visualize OpenStreetMap street networks and other spatial data"
)
# only specify install_requires if not in RTD environment
if os.getenv("READTHEDOCS") == "True":
INSTALL_REQUIRES = []
else:
with open("requirements.txt") as f:
INSTALL_REQUIRES = [line.strip() for line in f.readlines()]
# now call setup
setup(
name="osmnx",
version="1.0.0",
description=DESC,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
url="https://github.com/gboeing/osmnx",
author="Geoff Boeing",
author_email="boeing@usc.edu",
license="MIT",
platforms="any",
packages=["osmnx"],
python_requires=">=3.6",
install_requires=INSTALL_REQUIRES,
extras_require={
"folium": ["folium>=0.11"],
"kdtree": ["scipy>=1.5"],
"balltree": ["scikit-learn>=0.23"],
},
)
| 34.785714 | 171 | 0.708077 |
f17aa680f344a7fbec8c88d91ac0ca797b73e011 | 333 | py | Python | head_first_python/set/vowels4.py | goxhaj/python | c4dd40d69062444361ca0cd81722c491b989a1cd | [
"Apache-2.0"
] | null | null | null | head_first_python/set/vowels4.py | goxhaj/python | c4dd40d69062444361ca0cd81722c491b989a1cd | [
"Apache-2.0"
] | null | null | null | head_first_python/set/vowels4.py | goxhaj/python | c4dd40d69062444361ca0cd81722c491b989a1cd | [
"Apache-2.0"
] | null | null | null | vowels = ['a', 'e', 'i', 'o', 'u']
word = input("Provide a word to search for vowels: ")
found = {}
found['a'] = 0
found['e'] = 0
found['i'] = 0
found['o'] = 0
found['u'] = 0
for letter in word:
if letter in vowels:
found[letter]+=1
for k, v in sorted(found.items()):
print (k, 'was found', v, ' times(s).')
| 22.2 | 53 | 0.528529 |
32146291c72aae31fa51014036152deb3a8a4fd7 | 23,035 | py | Python | awsume/awsumepy/default_plugins.py | icyfork/awsume | 524c667599b8bfba521f0397214bb363f1b706fa | [
"MIT"
] | null | null | null | awsume/awsumepy/default_plugins.py | icyfork/awsume | 524c667599b8bfba521f0397214bb363f1b706fa | [
"MIT"
] | null | null | null | awsume/awsumepy/default_plugins.py | icyfork/awsume | 524c667599b8bfba521f0397214bb363f1b706fa | [
"MIT"
] | null | null | null | import argparse
import configparser
import json
import os
import colorama
from . lib import exceptions
from . hookimpl import hookimpl
from .. import __data__
from ..autoawsume.process import kill
from . lib import aws as aws_lib
from . lib import aws_files as aws_files_lib
from . lib.logger import logger
from . lib.safe_print import safe_print
from . lib import config_management as config_lib
from . lib import profile as profile_lib
from . lib import cache as cache_lib
from . lib.profile import VALID_CREDENTIAL_SOURCES
from . lib.profile import get_role_chain, get_profile_name
def custom_duration_argument_type(string):
number = int(string)
if number >= 0 and number <= 43201:
return number
raise argparse.ArgumentTypeError('Custom Duration must be between 0 and 43200')
@hookimpl(tryfirst=True)
def add_arguments(config: dict, parser: argparse.ArgumentParser):
logger.info('Adding arguments')
parser.add_argument('-v', '--version',
action='store_true',
dest='version',
help='Display the current version of awsume',
)
parser.add_argument('-o', '--output-profile',
action='store',
dest='output_profile',
metavar='output_profile',
help='A profile to output credentials to',
)
parser.add_argument('--clean',
action='store_true',
dest='clean',
help='Clean expired output profiles',
)
parser.add_argument('profile_name',
nargs='?',
action='store',
metavar='profile_name',
help='The target profile name',
)
parser.add_argument('-r', '--refresh',
action='store_true',
dest='force_refresh',
help='Force refresh credentials',
)
parser.add_argument('-s', '--show-commands',
action='store_true',
dest='show_commands',
help='Show the commands to set the credentials',
)
parser.add_argument('-u', '--unset',
action='store_true',
dest='unset_variables',
help='Unset your aws environment variables',
)
parser.add_argument('-a', '--auto-refresh',
action='store_true',
dest='auto_refresh',
help='Auto refresh credentials',
)
parser.add_argument('-k', '--kill-refresher',
action='store_true',
default=False,
dest='kill',
help='Kill autoawsume',
)
parser.add_argument('-l', '--list-profiles',
nargs='?',
action='store',
default=None,
const='list',
choices=['more', 'list', None],
metavar='more',
dest='list_profiles',
help='List profiles, "more" for detail (slow)',
)
parser.add_argument('--refresh-autocomplete',
action='store_true',
dest='refresh_autocomplete',
help='Refresh all plugin autocomplete profiles',
)
parser.add_argument('--role-arn',
action='store',
dest='role_arn',
metavar='role_arn',
help='Role ARN or <partition>:<account_id>:<role_name>',
)
parser.add_argument('--principal-arn',
action='store',
dest='principal_arn',
metavar='principal_arn',
help='Principal ARN or <partition>:<account_id>:<provider_name>',
)
parser.add_argument('--source-profile',
action='store',
dest='source_profile',
metavar='source_profile',
help='source_profile to use (role-arn only)',
)
parser.add_argument('--external-id',
action='store',
dest='external_id',
metavar='external_id',
help='External ID to pass to the assume_role',
)
parser.add_argument('--mfa-token',
action='store',
dest='mfa_token',
metavar='mfa_token',
help='Your mfa token',
)
parser.add_argument('--region',
action='store',
dest='region',
metavar='region',
help='The region you want to awsume into',
)
parser.add_argument('--session-name',
action='store',
dest='session_name',
metavar='session_name',
help='Set a custom role session name',
)
parser.add_argument('--role-duration',
action='store',
dest='role_duration',
type=custom_duration_argument_type,
metavar='role_duration',
help='Seconds to get role creds for',
)
assume_role_method = parser.add_mutually_exclusive_group()
assume_role_method.add_argument('--with-saml',
action='store_true',
dest='with_saml',
help='Use saml (requires plugin)',
)
assume_role_method.add_argument('--with-web-identity',
action='store_true',
dest='with_web_identity',
help='Use web identity (requires plugin)',
)
parser.add_argument('--json',
action='store',
dest='json',
metavar='json',
help='Use json credentials',
)
parser.add_argument('--credentials-file',
action='store',
dest='credentials_file',
metavar='credentials_file',
help='Target a shared credentials file',
)
parser.add_argument('--config-file',
action='store',
dest='config_file',
metavar='config_file',
help='Target a config file',
)
parser.add_argument('--config',
nargs='*',
dest='config',
action='store',
metavar='option',
help='Configure awsume',
)
parser.add_argument('--list-plugins',
action='store_true',
dest='list_plugins',
help='List installed plugins',
)
parser.add_argument('--info',
action='store_true',
dest='info',
help='Print any info logs to stderr',
)
parser.add_argument('--debug',
action='store_true',
dest='debug',
help='Print any debug logs to stderr',
)
@hookimpl(tryfirst=True)
def post_add_arguments(config: dict, arguments: argparse.Namespace, parser: argparse.ArgumentParser):
logger.debug('Post add arguments')
logger.debug(json.dumps(vars(arguments)))
if arguments.auto_refresh:
if arguments.role_arn:
raise exceptions.ValidationException('Cannot use autoawsume with a given role_arn')
if arguments.json:
raise exceptions.ValidationException('Cannot use autoawsume with json')
if arguments.clean:
_, credentials_file = aws_files_lib.get_aws_files(arguments, config)
aws_files_lib.remove_expired_output_profiles(credentials_file)
raise exceptions.EarlyExit()
if arguments.version:
logger.debug('Logging version')
safe_print(__data__.version)
raise exceptions.EarlyExit()
if arguments.unset_variables:
logger.debug('Unsetting environment variables')
print('Unset', [])
raise exceptions.EarlyExit()
if type(arguments.config) is list:
config_lib.handle_config(arguments.config)
raise exceptions.EarlyExit()
if arguments.kill:
kill(arguments)
raise exceptions.EarlyExit()
if arguments.with_saml:
if bool(arguments.role_arn) is not bool(arguments.principal_arn):
parser.error('both or neither --principal-arn and --role-arn must be specified with saml')
if not arguments.with_saml and arguments.principal_arn:
parser.error('--principal-arn can only be specified with --with-saml')
if arguments.role_arn and not arguments.role_arn.startswith('arn:'):
logger.debug('Using short-hand role arn syntax')
parts = arguments.role_arn.split(':')
if len(parts) == 2:
partition = 'aws'
account_id = parts[0]
role_name = parts[1]
elif len(parts) == 3:
partition = parts[0]
account_id = parts[1]
role_name = parts[2]
else:
parser.error('--role-arn must be a valid role arn or follow the format "<partition>:<account_id>:<role_name>"')
if not account_id.isnumeric() or len(account_id) != 12:
parser.error('--role-arn account id must be valid numeric account id of length 12')
arguments.role_arn = 'arn:{}:iam::{}:role/{}'.format(partition, account_id, role_name)
if arguments.principal_arn and not arguments.principal_arn.startswith('arn:'):
logger.debug('Using short-hand role arn syntax')
parts = arguments.principal_arn.split(':')
if len(parts) == 2:
partition = 'aws'
account_id = parts[0]
provider_name = parts[1]
elif len(parts) == 3:
partition = parts[0]
account_id = parts[1]
provider_name = parts[2]
else:
parser.error('--principal-arn must be a valid role arn or follow the format "<partition>:<account_id>:<provider_name>"')
if not provider_name.isnumeric() or len(provider_name) != 12:
parser.error('--principal-arn account id must be valid numeric account id of length 12')
arguments.principal_arn = 'arn:{}:iam::{}:role/{}'.format(partition, account_id, provider_name)
if not arguments.profile_name:
if arguments.role_arn:
logger.debug('Role arn passed, target profile name will be role_arn')
arguments.target_profile_name = arguments.role_arn
else:
logger.debug('No profile name passed, target profile name will be "default"')
arguments.target_profile_name = 'default'
else:
arguments.target_profile_name = arguments.profile_name
@hookimpl(tryfirst=True)
def collect_aws_profiles(config: dict, arguments: argparse.Namespace, credentials_file: str, config_file: str):
logger.info('Collecting AWS profiles')
profiles = aws_files_lib.read_aws_file(credentials_file)
config_profiles = aws_files_lib.read_aws_file(config_file)
for profile_name, profile in config_profiles.items():
short_name = profile_name.replace('profile ', '')
if short_name not in profiles:
profiles[short_name] = {}
profiles[short_name].update(profile)
logger.debug('Collected {} profiles'.format(len(profiles)))
return profiles
@hookimpl(tryfirst=True)
def post_collect_aws_profiles(config: dict, arguments: argparse.Namespace, profiles: dict):
logger.info('Post collect AWS profiles')
if arguments.list_profiles:
logger.debug('Listing profiles')
profile_lib.list_profile_data(profiles, arguments.list_profiles == 'more')
raise exceptions.EarlyExit()
def assume_role_from_cli(config: dict, arguments: dict, profiles: dict):
region = profile_lib.get_region(profiles, arguments, config, ignore_config=True, ignore_default=True)
logger.info('Using role_arn from the CLI')
role_duration = arguments.role_duration or int(config.get('role-duration', 0))
session_name = arguments.session_name or 'awsume-cli-role'
logger.debug('Session name: {}'.format(session_name))
if not arguments.source_profile:
logger.debug('Using current credentials to assume role')
role_session = aws_lib.assume_role({}, arguments.role_arn, session_name, region=region, external_id=arguments.external_id, role_duration=role_duration)
else:
logger.debug('Using the source_profile from the cli to call assume_role')
source_profile = profiles.get(arguments.source_profile)
if not source_profile:
raise exceptions.ProfileNotFoundError(profile_name=arguments.source_profile)
source_credentials = profile_lib.profile_to_credentials(source_profile)
mfa_serial = source_profile.get('mfa_serial')
if role_duration:
logger.debug('Using custom role duration')
if mfa_serial:
logger.debug('Requires MFA')
logger.debug('Using custom role duration for role that needs mfa_serial, skipping get-session-token call')
source_session = source_credentials
role_session = aws_lib.assume_role(
source_session,
arguments.role_arn,
session_name,
region=region,
external_id=arguments.external_id,
role_duration=role_duration,
mfa_serial=mfa_serial,
mfa_token=arguments.mfa_token,
)
else:
logger.debug('MFA not needed, assuming role from with profile creds')
role_session = aws_lib.assume_role(
source_credentials,
arguments.role_arn,
session_name,
region=region,
external_id=arguments.external_id,
role_duration=role_duration,
)
else:
logger.debug('Using default role duration')
if mfa_serial:
logger.debug('MFA required')
source_session = aws_lib.get_session_token(
source_credentials,
region=profile_lib.get_region(profiles, arguments, config),
mfa_serial=mfa_serial,
mfa_token=arguments.mfa_token,
ignore_cache=arguments.force_refresh,
duration_seconds=config.get('debug', {}).get('session_token_duration'),
)
else:
logger.debug('MFA not required')
source_session = source_credentials
role_session = aws_lib.assume_role(
source_session,
arguments.role_arn,
session_name,
region=region,
external_id=arguments.external_id,
role_duration=role_duration,
)
return role_session
def get_assume_role_credentials(config: dict, arguments: argparse.Namespace, profiles: dict, target_profile: dict, role_duration: int, source_credentials: dict, target_profile_name: str):
logger.info('Getting assume role credentials')
region = profile_lib.get_region(profiles, arguments, config)
external_id = profile_lib.get_external_id(arguments, target_profile)
if not source_credentials:
source_profile = profile_lib.get_source_profile(profiles, target_profile_name)
source_credentials = profile_lib.profile_to_credentials(source_profile)
role_session = aws_lib.assume_role(
source_credentials,
target_profile.get('role_arn'),
arguments.session_name or target_profile_name,
region=region,
external_id=external_id,
role_duration=role_duration,
)
if 'SourceExpiration' in source_credentials:
role_session['SourceExpiration'] = source_credentials['SourceExpiration']
elif 'Expiration' in source_credentials:
role_session['SourceExpiration'] = source_credentials['Expiration']
return role_session
def get_assume_role_credentials_mfa_required(config: dict, arguments: argparse.Namespace, profiles: dict, target_profile: dict, role_duration: int, source_credentials: dict, target_profile_name: str):
logger.info('Getting assume role credentials MFA required')
region = profile_lib.get_region(profiles, arguments, config)
mfa_serial = profile_lib.get_mfa_serial(profiles, target_profile_name)
external_id = profile_lib.get_external_id(arguments, target_profile)
source_profile = profile_lib.get_source_profile(profiles, target_profile_name)
if source_profile:
if 'role_arn' not in source_profile:
logger.debug('Calling get_session_token to assume role with')
if not source_credentials:
source_credentials = profile_lib.profile_to_credentials(source_profile)
source_session = aws_lib.get_session_token(
source_credentials,
region=region,
mfa_serial=mfa_serial,
mfa_token=arguments.mfa_token,
ignore_cache=arguments.force_refresh,
duration_seconds=config.get('debug', {}).get('session_token_duration'),
)
else:
source_session = source_credentials
elif 'credential_source' in target_profile and target_profile['credential_source'] in VALID_CREDENTIAL_SOURCES:
logger.debug('Using current environment to assume role')
source_session = {}
if arguments.auto_refresh and (os.environ.get('AWS_PROFILE', '').startswith('autoawsume-') or profiles.get(os.getenv('AWS_PROFILE'), {}).get('autoawsume')):
os.environ.pop('AWS_PROFILE')
os.environ.pop('AWS_DEFAULT_PROFILE')
role_session = aws_lib.assume_role(
source_session,
target_profile.get('role_arn'),
arguments.session_name or target_profile_name,
region=region,
external_id=external_id,
role_duration=role_duration,
)
if 'SourceExpiration' in source_session:
role_session['SourceExpiration'] = source_session['SourceExpiration']
elif 'Expiration' in source_session:
role_session['SourceExpiration'] = source_session['Expiration']
return source_session, role_session
def get_assume_role_credentials_mfa_required_large_custom_duration(config: dict, arguments: argparse.Namespace, profiles: dict, target_profile: dict, role_duration: int, target_profile_name: str):
logger.info('Getting assume role credentials MFA required, large custom duration')
if arguments.auto_refresh and role_duration > 3600:
raise exceptions.ValidationException('Cannot use autoawsume with custom role duration of more than 1 hour')
logger.debug('Skipping the get_session_token call, temp creds cannot be used for custom role duration')
region = profile_lib.get_region(profiles, arguments, config)
mfa_serial = profile_lib.get_mfa_serial(profiles, target_profile_name)
external_id = profile_lib.get_external_id(arguments, target_profile)
source_profile = profile_lib.get_source_profile(profiles, target_profile_name)
source_session = profile_lib.profile_to_credentials(source_profile)
role_session = aws_lib.assume_role(
source_session,
target_profile.get('role_arn'),
arguments.session_name or target_profile_name,
region=region,
external_id=external_id,
role_duration=role_duration,
mfa_serial=mfa_serial,
mfa_token=arguments.mfa_token,
)
return role_session
def get_credentials_no_mfa(config: dict, arguments: argparse.Namespace, profiles: dict, target_profile: dict):
logger.info('Getting credentials MFA not required')
region = profile_lib.get_region(profiles, arguments, config)
return_session = profile_lib.profile_to_credentials(target_profile)
return_session['Region'] = region
return return_session
def get_credentials_from_credential_source(config: dict, arguments: argparse.Namespace, profiles: dict, target_profile: dict, target_profile_name: str):
logger.info('Getting credentials from credential_source')
region = profile_lib.get_region(profiles, arguments, config)
return_session = {'AwsProfile': target_profile_name}
return_session['Region'] = region
return return_session
def get_session_token_credentials(config: dict, arguments: argparse.Namespace, profiles: dict, target_profile: dict, target_profile_name: str):
logger.info('Getting session token credentials')
region = profile_lib.get_region(profiles, arguments, config)
mfa_serial = profile_lib.get_mfa_serial(profiles, target_profile_name)
source_credentials = profile_lib.profile_to_credentials(target_profile)
user_session = aws_lib.get_session_token(
source_credentials,
region=region,
mfa_serial=mfa_serial,
mfa_token=arguments.mfa_token,
ignore_cache=arguments.force_refresh,
duration_seconds=config.get('debug', {}).get('session_token_duration'),
)
return user_session
def get_credentials_handler(config: dict, arguments: argparse.Namespace, profiles: dict, profile_name: str, credentials: dict) -> dict:
credentials = credentials if credentials else {}
logger.info('Getting credentials')
user_session = None
role_session = None
if arguments.role_arn:
role_session = assume_role_from_cli(config, arguments, profiles)
else:
profile_lib.validate_profile(config, arguments, profiles, profile_name)
target_profile = profiles.get(profile_name)
mfa_serial = profile_lib.get_mfa_serial(profiles, profile_name)
role_duration = profile_lib.get_role_duration(config, arguments, target_profile)
if 'role_arn' in target_profile:
logger.debug('assume_role call needed')
if mfa_serial and not credentials: # if using specific credentials, no mfa needed
if role_duration > 3600: # cannot use temp creds with custom role duration more than an hour
role_session = get_assume_role_credentials_mfa_required_large_custom_duration(config, arguments, profiles, target_profile, role_duration, profile_name)
else:
user_session, role_session = get_assume_role_credentials_mfa_required(config, arguments, profiles, target_profile, role_duration, credentials, profile_name)
else:
role_session = get_assume_role_credentials(config, arguments, profiles, target_profile, role_duration, credentials, profile_name)
else:
if mfa_serial:
user_session = get_session_token_credentials(config, arguments, profiles, target_profile, profile_name)
elif 'credential_source' in target_profile:
user_session = get_credentials_from_credential_source(config, arguments, profiles, target_profile, profile_name)
else:
user_session = get_credentials_no_mfa(config, arguments, profiles, target_profile)
if config.get('is_interactive'):
if user_session and user_session.get('Expiration'):
safe_print('Session token will expire at {}'.format(profile_lib.parse_time(user_session['Expiration'])), colorama.Fore.GREEN)
if role_session and role_session.get('Expiration'):
safe_print('[{}] Role credentials will expire {}'.format(profile_name, profile_lib.parse_time(role_session['Expiration'])), colorama.Fore.GREEN)
return role_session or user_session
@hookimpl(tryfirst=True)
def get_credentials(config: dict, arguments: argparse.Namespace, profiles: dict) -> dict:
if arguments.role_arn:
target_profile_name = arguments.role_arn
else:
target_profile_name = get_profile_name(config, profiles, arguments.target_profile_name)
role_chain = get_role_chain(profiles, target_profile_name)
credentials = None
for profile_name in role_chain:
credentials = get_credentials_handler(config=config, arguments=arguments, profiles=profiles, profile_name=profile_name, credentials=credentials)
return credentials
| 42.188645 | 200 | 0.671587 |
114dac7083f6c1a189a68e4a3c6f234587513895 | 8,319 | py | Python | rrl.py | johnnyp2587/rrl-cpp | da2a72420f3cb80ebf7e77f727110ea21619e0af | [
"MIT"
] | 2 | 2020-10-05T05:04:32.000Z | 2021-03-08T07:24:47.000Z | rrl.py | johnnyp2587/rrl-cpp | da2a72420f3cb80ebf7e77f727110ea21619e0af | [
"MIT"
] | null | null | null | rrl.py | johnnyp2587/rrl-cpp | da2a72420f3cb80ebf7e77f727110ea21619e0af | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
import pickle
import numpy as np
import pandas as pd
from datetime import datetime as dt
import matplotlib.pyplot as plt
def main():
fname = "../data/USDJPY30.csv"
init_t = 6000
T = 1000
M = 200
mu = 10000
sigma = 0.04
rho = 1.0
n_epoch = 10000
# RRL agent with initial weight.
ini_rrl = TradingRRL(T, M, init_t, mu, sigma, rho, n_epoch)
ini_rrl.load_csv(fname)
ini_rrl.set_t_p_r()
ini_rrl.calc_dSdw()
# RRL agent for training
rrl = TradingRRL(T, M, init_t, mu, sigma, rho, n_epoch)
rrl.all_t = ini_rrl.all_t
rrl.all_p = ini_rrl.all_p
rrl.set_t_p_r()
rrl.fit()
# Plot results.
# Training for initial term T.
plt.plot(range(len(rrl.epoch_S)),rrl.epoch_S)
plt.title("Sharp's ratio optimization")
plt.xlabel("Epoch times")
plt.ylabel("Sharp's ratio")
plt.grid(True)
plt.savefig("sharp's ratio optimization.png", dpi=300)
plt.close
fig, ax = plt.subplots(nrows=3, figsize=(15, 10))
t = np.linspace(1, rrl.T, rrl.T)[::-1]
ax[0].plot(t, rrl.p[:rrl.T])
ax[0].set_xlabel("time")
ax[0].set_ylabel("USDJPY")
ax[0].grid(True)
ax[1].plot(t, ini_rrl.F[:rrl.T], color="blue", label="With initial weights")
ax[1].plot(t, rrl.F[:rrl.T], color="red", label="With optimized weights")
ax[1].set_xlabel("time")
ax[1].set_ylabel("F")
ax[1].legend(loc="upper left")
ax[1].grid(True)
ax[2].plot(t, ini_rrl.sumR, color="blue", label="With initial weights")
ax[2].plot(t, rrl.sumR, color="red", label="With optimized weights")
ax[2].set_xlabel("time")
ax[2].set_ylabel("Sum of reward[yen]")
ax[2].legend(loc="upper left")
ax[2].grid(True)
plt.savefig("rrl_train.png", dpi=300)
fig.clear()
# Prediction for next term T with optimized weight.
# RRL agent with initial weight.
ini_rrl_f = TradingRRL(T, M, init_t-T, mu, sigma, rho, n_epoch)
ini_rrl_f.all_t = ini_rrl.all_t
ini_rrl_f.all_p = ini_rrl.all_p
ini_rrl_f.set_t_p_r()
ini_rrl_f.calc_dSdw()
# RRL agent with optimized weight.
rrl_f = TradingRRL(T, M, init_t-T, mu, sigma, rho, n_epoch)
rrl_f.all_t = ini_rrl.all_t
rrl_f.all_p = ini_rrl.all_p
rrl_f.set_t_p_r()
rrl_f.w = rrl.w
rrl_f.calc_dSdw()
fig, ax = plt.subplots(nrows=3, figsize=(15, 10))
t_f = np.linspace(rrl.T+1, rrl.T+rrl.T, rrl.T)[::-1]
ax[0].plot(t_f, rrl_f.p[:rrl_f.T])
ax[0].set_xlabel("time")
ax[0].set_ylabel("USDJPY")
ax[0].grid(True)
ax[1].plot(t_f, ini_rrl_f.F[:rrl_f.T], color="blue", label="With initial weights")
ax[1].plot(t_f, rrl_f.F[:rrl_f.T], color="red", label="With optimized weights")
ax[1].set_xlabel("time")
ax[1].set_ylabel("F")
ax[1].legend(loc="lower right")
ax[1].grid(True)
ax[2].plot(t_f, ini_rrl_f.sumR, color="blue", label="With initial weights")
ax[2].plot(t_f, rrl_f.sumR, color="red", label="With optimized weights")
ax[2].set_xlabel("time")
ax[2].set_ylabel("Sum of reward[yen]")
ax[2].legend(loc="lower right")
ax[2].grid(True)
plt.savefig("rrl_prediction.png", dpi=300)
fig.clear()
class TradingRRL(object):
def __init__(self, T=1000, M=200, init_t=10000, mu=10000, sigma=0.04, rho=1.0, n_epoch=10000):
self.T = T
self.M = M
self.init_t = init_t
self.mu = mu
self.sigma = sigma
self.rho = rho
self.all_t = None
self.all_p = None
self.t = None
self.p = None
self.r = None
self.x = np.zeros([T, M+2])
self.F = np.zeros(T+1)
self.R = np.zeros(T)
self.w = np.ones(M+2)
self.w_opt = np.ones(M+2)
self.epoch_S = np.empty(0)
self.n_epoch = n_epoch
self.progress_period = 100
self.q_threshold = 0.7
def load_csv(self, fname):
tmp = pd.read_csv(fname, header=None)
tmp_tstr = tmp[0] +" " + tmp[1]
tmp_t = [dt.strptime(tmp_tstr[i], '%Y.%m.%d %H:%M') for i in range(len(tmp_tstr))]
tmp_p = list(tmp[5])
self.all_t = np.array(tmp_t[::-1])
self.all_p = np.array(tmp_p[::-1])
def quant(self, f):
fc = f.copy()
fc[np.where(np.abs(fc) < self.q_threshold)] = 0
return np.sign(fc)
def set_t_p_r(self):
self.t = self.all_t[self.init_t:self.init_t+self.T+self.M+1]
self.p = self.all_p[self.init_t:self.init_t+self.T+self.M+1]
self.r = -np.diff(self.p)
def set_x_F(self):
for i in range(self.T-1, -1 ,-1):
self.x[i] = np.zeros(self.M+2)
self.x[i][0] = 1.0
self.x[i][self.M+2-1] = self.F[i+1]
for j in range(1, self.M+2-1, 1):
self.x[i][j] = self.r[i+j-1]
self.F[i] = np.tanh(np.dot(self.w, self.x[i]))
def calc_R(self):
self.R = self.mu * (self.F[1:] * self.r[:self.T] - self.sigma * np.abs(-np.diff(self.F)))
def calc_sumR(self):
self.sumR = np.cumsum(self.R[::-1])[::-1]
self.sumR2 = np.cumsum((self.R**2)[::-1])[::-1]
def calc_dSdw(self):
self.set_x_F()
self.calc_R()
self.calc_sumR()
self.A = self.sumR[0] / self.T
self.B = self.sumR2[0] / self.T
self.S = self.A / np.sqrt(self.B - self.A**2)
self.dSdA = self.S * (1 + self.S**2) / self.A
self.dSdB = -self.S**3 / 2 / self.A**2
self.dAdR = 1.0 / self.T
self.dBdR = 2.0 / self.T * self.R
self.dRdF = -self.mu * self.sigma * np.sign(-np.diff(self.F))
self.dRdFp = self.mu * self.r[:self.T] + self.mu * self.sigma * np.sign(-np.diff(self.F))
self.dFdw = np.zeros(self.M+2)
self.dFpdw = np.zeros(self.M+2)
self.dSdw = np.zeros(self.M+2)
for i in range(self.T-1, -1 ,-1):
if i != self.T-1:
self.dFpdw = self.dFdw.copy()
self.dFdw = (1 - self.F[i]**2) * (self.x[i] + self.w[self.M+2-1] * self.dFpdw)
self.dSdw += (self.dSdA * self.dAdR + self.dSdB * self.dBdR[i]) * (self.dRdF[i] * self.dFdw + self.dRdFp[i] * self.dFpdw)
def update_w(self):
self.w += self.rho * self.dSdw
def fit(self):
pre_epoch_times = len(self.epoch_S)
self.calc_dSdw()
print("Epoch loop start. Initial sharp's ratio is " + str(self.S) + ".")
self.S_opt = self.S
tic = time.clock()
for e_index in range(self.n_epoch):
self.calc_dSdw()
if self.S > self.S_opt:
self.S_opt = self.S
self.w_opt = self.w.copy()
self.epoch_S = np.append(self.epoch_S, self.S)
self.update_w()
if e_index % self.progress_period == self.progress_period-1:
toc = time.clock()
print("Epoch: " + str(e_index + pre_epoch_times + 1) + "/" + str(self.n_epoch + pre_epoch_times) +". Shape's ratio: " + str(self.S) + ". Elapsed time: " + str(toc-tic) + " sec.")
toc = time.clock()
print("Epoch: " + str(e_index + pre_epoch_times + 1) + "/" + str(self.n_epoch + pre_epoch_times) +". Shape's ratio: " + str(self.S) + ". Elapsed time: " + str(toc-tic) + " sec.")
self.w = self.w_opt.copy()
self.calc_dSdw()
print("Epoch loop end. Optimized sharp's ratio is " + str(self.S_opt) + ".")
def save_weight(self):
pd.DataFrame(self.w).to_csv("w.csv", header=False, index=False)
pd.DataFrame(self.epoch_S).to_csv("epoch_S.csv", header=False, index=False)
def load_weight(self):
tmp = pd.read_csv("w.csv", header=None)
self.w = tmp.T.values[0]
def plot_hist(n_tick, R):
rnge = max(R) - min(R)
tick = rnge / n_tick
tick_min = [min(R) - tick * 0.5 + i * tick for i in range(n_tick)]
tick_max = [min(R) + tick * 0.5 + i * tick for i in range(n_tick)]
tick_center = [min(R) + i * tick for i in range(n_tick)]
tick_val = [0.0] * n_tick
for i in range(n_tick ):
tick_val[i] = len(set(np.where(tick_min[i] < np.array(R))[0].tolist()).intersection(np.where(np.array(R) <= tick_max[i])[0]))
plt.bar(tick_center, tick_val, width=tick)
plt.grid()
plt.show()
if __name__ == "__main__":
main()
| 34.953782 | 194 | 0.566655 |
014693251ad19ad0dd0293c84414eb78069ab003 | 980 | py | Python | kubernetes/test/test_v1_api_service_status.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 1 | 2018-10-20T19:37:57.000Z | 2018-10-20T19:37:57.000Z | kubernetes/test/test_v1_api_service_status.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_api_service_status.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 2 | 2018-07-27T19:39:34.000Z | 2020-12-25T02:48:27.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_api_service_status import V1APIServiceStatus
class TestV1APIServiceStatus(unittest.TestCase):
""" V1APIServiceStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1APIServiceStatus(self):
"""
Test V1APIServiceStatus
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_api_service_status.V1APIServiceStatus()
pass
if __name__ == '__main__':
unittest.main()
| 21.777778 | 105 | 0.714286 |
5b9c1518912b62dca902075eb3a10dd13b88d82a | 13,785 | py | Python | sdk/python/pulumi_azure_native/maintenance/configuration_assignment.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/maintenance/configuration_assignment.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/maintenance/configuration_assignment.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = ['ConfigurationAssignmentArgs', 'ConfigurationAssignment']
@pulumi.input_type
class ConfigurationAssignmentArgs:
def __init__(__self__, *,
provider_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
resource_name: pulumi.Input[str],
resource_type: pulumi.Input[str],
configuration_assignment_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
maintenance_configuration_id: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ConfigurationAssignment resource.
:param pulumi.Input[str] provider_name: Resource provider name
:param pulumi.Input[str] resource_group_name: Resource group name
:param pulumi.Input[str] resource_name: Resource identifier
:param pulumi.Input[str] resource_type: Resource type
:param pulumi.Input[str] configuration_assignment_name: Configuration assignment name
:param pulumi.Input[str] location: Location of the resource
:param pulumi.Input[str] maintenance_configuration_id: The maintenance configuration Id
:param pulumi.Input[str] resource_id: The unique resourceId
"""
pulumi.set(__self__, "provider_name", provider_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "resource_name", resource_name)
pulumi.set(__self__, "resource_type", resource_type)
if configuration_assignment_name is not None:
pulumi.set(__self__, "configuration_assignment_name", configuration_assignment_name)
if location is not None:
pulumi.set(__self__, "location", location)
if maintenance_configuration_id is not None:
pulumi.set(__self__, "maintenance_configuration_id", maintenance_configuration_id)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="providerName")
def provider_name(self) -> pulumi.Input[str]:
"""
Resource provider name
"""
return pulumi.get(self, "provider_name")
@provider_name.setter
def provider_name(self, value: pulumi.Input[str]):
pulumi.set(self, "provider_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Resource group name
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> pulumi.Input[str]:
"""
Resource identifier
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> pulumi.Input[str]:
"""
Resource type
"""
return pulumi.get(self, "resource_type")
@resource_type.setter
def resource_type(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_type", value)
@property
@pulumi.getter(name="configurationAssignmentName")
def configuration_assignment_name(self) -> Optional[pulumi.Input[str]]:
"""
Configuration assignment name
"""
return pulumi.get(self, "configuration_assignment_name")
@configuration_assignment_name.setter
def configuration_assignment_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "configuration_assignment_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Location of the resource
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="maintenanceConfigurationId")
def maintenance_configuration_id(self) -> Optional[pulumi.Input[str]]:
"""
The maintenance configuration Id
"""
return pulumi.get(self, "maintenance_configuration_id")
@maintenance_configuration_id.setter
def maintenance_configuration_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maintenance_configuration_id", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique resourceId
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
class ConfigurationAssignment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
configuration_assignment_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
maintenance_configuration_id: Optional[pulumi.Input[str]] = None,
provider_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Configuration Assignment
API Version: 2021-04-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] configuration_assignment_name: Configuration assignment name
:param pulumi.Input[str] location: Location of the resource
:param pulumi.Input[str] maintenance_configuration_id: The maintenance configuration Id
:param pulumi.Input[str] provider_name: Resource provider name
:param pulumi.Input[str] resource_group_name: Resource group name
:param pulumi.Input[str] resource_id: The unique resourceId
:param pulumi.Input[str] resource_name_: Resource identifier
:param pulumi.Input[str] resource_type: Resource type
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ConfigurationAssignmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Configuration Assignment
API Version: 2021-04-01-preview.
:param str resource_name: The name of the resource.
:param ConfigurationAssignmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ConfigurationAssignmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
configuration_assignment_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
maintenance_configuration_id: Optional[pulumi.Input[str]] = None,
provider_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ConfigurationAssignmentArgs.__new__(ConfigurationAssignmentArgs)
__props__.__dict__["configuration_assignment_name"] = configuration_assignment_name
__props__.__dict__["location"] = location
__props__.__dict__["maintenance_configuration_id"] = maintenance_configuration_id
if provider_name is None and not opts.urn:
raise TypeError("Missing required property 'provider_name'")
__props__.__dict__["provider_name"] = provider_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["resource_id"] = resource_id
if resource_name_ is None and not opts.urn:
raise TypeError("Missing required property 'resource_name_'")
__props__.__dict__["resource_name"] = resource_name_
if resource_type is None and not opts.urn:
raise TypeError("Missing required property 'resource_type'")
__props__.__dict__["resource_type"] = resource_type
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:maintenance:ConfigurationAssignment"), pulumi.Alias(type_="azure-native:maintenance/v20210401preview:ConfigurationAssignment"), pulumi.Alias(type_="azure-nextgen:maintenance/v20210401preview:ConfigurationAssignment"), pulumi.Alias(type_="azure-native:maintenance/v20210901preview:ConfigurationAssignment"), pulumi.Alias(type_="azure-nextgen:maintenance/v20210901preview:ConfigurationAssignment")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ConfigurationAssignment, __self__).__init__(
'azure-native:maintenance:ConfigurationAssignment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ConfigurationAssignment':
"""
Get an existing ConfigurationAssignment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ConfigurationAssignmentArgs.__new__(ConfigurationAssignmentArgs)
__props__.__dict__["location"] = None
__props__.__dict__["maintenance_configuration_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["resource_id"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return ConfigurationAssignment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Location of the resource
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maintenanceConfigurationId")
def maintenance_configuration_id(self) -> pulumi.Output[Optional[str]]:
"""
The maintenance configuration Id
"""
return pulumi.get(self, "maintenance_configuration_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Output[Optional[str]]:
"""
The unique resourceId
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the resource
"""
return pulumi.get(self, "type")
| 43.213166 | 484 | 0.661299 |
eaeed10c8863674de20dc3bcb00ac750650316fa | 42,485 | py | Python | test/test_utils/__init__.py | Lokiiiiii/deep-learning-containers | f54b733567fd741b12362dc71cf93a72b5da1c82 | [
"Apache-2.0"
] | 1 | 2021-07-10T14:01:23.000Z | 2021-07-10T14:01:23.000Z | test/test_utils/__init__.py | Lokiiiiii/deep-learning-containers | f54b733567fd741b12362dc71cf93a72b5da1c82 | [
"Apache-2.0"
] | null | null | null | test/test_utils/__init__.py | Lokiiiiii/deep-learning-containers | f54b733567fd741b12362dc71cf93a72b5da1c82 | [
"Apache-2.0"
] | null | null | null | import json
import logging
import os
import re
import subprocess
import sys
import time
import boto3
import git
import pytest
from botocore.exceptions import ClientError
from glob import glob
from invoke import run
from invoke.context import Context
from packaging.version import LegacyVersion, Version, parse
from packaging.specifiers import SpecifierSet
from retrying import retry
from src.config.test_config import ENABLE_BENCHMARK_DEV_MODE
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
LOGGER.addHandler(logging.StreamHandler(sys.stderr))
# Constant to represent default region for boto3 commands
DEFAULT_REGION = "us-west-2"
# Constant to represent region where p3dn tests can be run
P3DN_REGION = "us-east-1"
UBUNTU_18_BASE_DLAMI_US_WEST_2 = "ami-0ab8a8eaef5d56ff2"
UBUNTU_18_BASE_DLAMI_US_EAST_1 = "ami-01d0263a9631d8502"
PT_GPU_PY3_BENCHMARK_IMAGENET_AMI_US_EAST_1 = "ami-0673bb31cc62485dd"
PT_GPU_PY3_BENCHMARK_IMAGENET_AMI_US_WEST_2 = "ami-02d9a47bc61a31d43"
NEURON_UBUNTU_18_BASE_DLAMI_US_WEST_2 = "ami-0b5d270a84e753c18"
UL_AMI_LIST = [
UBUNTU_18_BASE_DLAMI_US_EAST_1,
UBUNTU_18_BASE_DLAMI_US_WEST_2,
PT_GPU_PY3_BENCHMARK_IMAGENET_AMI_US_EAST_1,
PT_GPU_PY3_BENCHMARK_IMAGENET_AMI_US_WEST_2,
NEURON_UBUNTU_18_BASE_DLAMI_US_WEST_2,
]
ECS_AML2_GPU_USWEST2 = "ami-09ef8c43fa060063d"
ECS_AML2_CPU_USWEST2 = "ami-014a2e30da708ee8b"
NEURON_AL2_DLAMI = "ami-092059396c7e51f52"
DLAMI_PYTHON_MAPPING = {
UBUNTU_18_BASE_DLAMI_US_WEST_2: "/usr/bin/python3.7",
UBUNTU_18_BASE_DLAMI_US_EAST_1: "/usr/bin/python3.7"
}
# Used for referencing tests scripts from container_tests directory (i.e. from ECS cluster)
CONTAINER_TESTS_PREFIX = os.path.join(os.sep, "test", "bin")
# S3 Bucket to use to transfer tests into an EC2 instance
TEST_TRANSFER_S3_BUCKET = "s3://dlinfra-tests-transfer-bucket"
# S3 Bucket to use to record benchmark results for further retrieving
BENCHMARK_RESULTS_S3_BUCKET = "s3://dlinfra-dlc-cicd-performance"
# Ubuntu ami home dir
UBUNTU_HOME_DIR = "/home/ubuntu"
# Reason string for skipping tests in PR context
SKIP_PR_REASON = "Skipping test in PR context to speed up iteration time. Test will be run in nightly/release pipeline."
# Reason string for skipping tests in non-PR context
PR_ONLY_REASON = "Skipping test that doesn't need to be run outside of PR context."
KEYS_TO_DESTROY_FILE = os.path.join(os.sep, "tmp", "keys_to_destroy.txt")
# Sagemaker test types
SAGEMAKER_LOCAL_TEST_TYPE = "local"
SAGEMAKER_REMOTE_TEST_TYPE = "sagemaker"
PUBLIC_DLC_REGISTRY = "763104351884"
class MissingPythonVersionException(Exception):
"""
When the Python Version is missing from an image_uri where it is expected to exist
"""
pass
def get_dockerfile_path_for_image(image_uri):
"""
For a given image_uri, find the path within the repository to its corresponding dockerfile
:param image_uri: str Image URI
:return: str Absolute path to dockerfile
"""
github_repo_path = os.path.abspath(os.path.curdir).split("test", 1)[0]
framework, framework_version = get_framework_and_version_from_tag(image_uri)
framework_path = framework.replace("_", os.path.sep) if "huggingface" in framework else framework
job_type = get_job_type_from_image(image_uri)
short_framework_version = re.search(r"(\d+\.\d+)", image_uri).group(1)
long_framework_version = re.search(r"\d+(\.\d+){2}", image_uri).group()
framework_version_path = os.path.join(github_repo_path, framework_path, job_type, "docker", short_framework_version)
if not os.path.isdir(framework_version_path):
framework_version_path = os.path.join(
github_repo_path, framework_path, job_type, "docker", long_framework_version
)
python_version = re.search(r"py\d+", image_uri).group()
python_version_path = os.path.join(framework_version_path, python_version)
if not os.path.isdir(python_version_path):
python_version_path = os.path.join(framework_version_path, "py3")
device_type = get_processor_from_image_uri(image_uri)
cuda_version = get_cuda_version_from_tag(image_uri)
dockerfiles_list = [
path
for path in glob(os.path.join(python_version_path, "**", f"Dockerfile.{device_type}"), recursive=True)
if "example" not in path
]
if device_type in ["gpu"]:
if not cuda_version and len(dockerfiles_list) > 1:
raise LookupError(
f"dockerfiles_list has more than one result, and needs cuda_version to be in image_uri to "
f"uniquely identify the right dockerfile:\n"
f"{dockerfiles_list}"
)
for dockerfile_path in dockerfiles_list:
if cuda_version in dockerfile_path:
return dockerfile_path
raise LookupError(f"Failed to find a dockerfile path for {cuda_version} in:\n{dockerfiles_list}")
assert len(dockerfiles_list) == 1, f"No unique dockerfile path in:\n{dockerfiles_list}\nfor image: {image_uri}"
return dockerfiles_list[0]
def get_python_invoker(ami_id):
return DLAMI_PYTHON_MAPPING.get(ami_id, "/usr/bin/python3")
def is_tf_version(required_version, image_uri):
"""
Validate that image_uri has framework version equal to required_version
:param required_version: str Framework version which is required from the image_uri
:param image_uri: str ECR Image URI for the image to be validated
:return: bool True if image_uri has same framework version as required_version, else False
"""
image_framework_name, image_framework_version = get_framework_and_version_from_tag(image_uri)
required_version_specifier_set = SpecifierSet(f"=={required_version}.*")
return image_framework_name == "tensorflow" and image_framework_version in required_version_specifier_set
def is_below_framework_version(version_upper_bound, image_uri, framework):
"""
Validate that image_uri has framework version strictly less than version_upper_bound
:param version_upper_bound: str Framework version that image_uri is required to be below
:param image_uri: str ECR Image URI for the image to be validated
:return: bool True if image_uri has framework version less than version_upper_bound, else False
"""
image_framework_name, image_framework_version = get_framework_and_version_from_tag(image_uri)
required_version_specifier_set = SpecifierSet(f"<{version_upper_bound}")
return image_framework_name == framework and image_framework_version in required_version_specifier_set
def is_image_incompatible_with_instance_type(image_uri, ec2_instance_type):
"""
Check for all compatibility issues between DLC Image Types and EC2 Instance Types.
Currently configured to fail on the following checks:
1. p4d.24xlarge instance type is used with a cuda<11.0 image
2. p2.8xlarge instance type is used with a cuda=11.0 image for MXNET framework
:param image_uri: ECR Image URI in valid DLC-format
:param ec2_instance_type: EC2 Instance Type
:return: bool True if there are incompatibilities, False if there aren't
"""
image_is_cuda10_on_incompatible_p4d_instance = (
get_processor_from_image_uri(image_uri) == "gpu" and
get_cuda_version_from_tag(image_uri).startswith("cu10") and
ec2_instance_type in ["p4d.24xlarge"]
)
framework, _ = get_framework_and_version_from_tag(image_uri)
image_is_cuda11_on_incompatible_p2_instance_mxnet = (
framework == "mxnet" and
get_processor_from_image_uri(image_uri) == "gpu" and
get_cuda_version_from_tag(image_uri).startswith("cu11") and
ec2_instance_type in ["p2.8xlarge"]
)
return image_is_cuda10_on_incompatible_p4d_instance or image_is_cuda11_on_incompatible_p2_instance_mxnet
def get_repository_local_path():
git_repo_path = os.getcwd().split("/test/")[0]
return git_repo_path
def get_inference_server_type(image_uri):
if "pytorch" not in image_uri:
return "mms"
if "neuron" in image_uri:
return "ts"
image_tag = image_uri.split(":")[1]
pytorch_ver = parse(image_tag.split("-")[0])
if isinstance(pytorch_ver, LegacyVersion) or pytorch_ver < Version("1.6"):
return "mms"
return "ts"
def is_pr_context():
return os.getenv("BUILD_CONTEXT") == "PR"
def is_canary_context():
return os.getenv("BUILD_CONTEXT") == "CANARY"
def is_mainline_context():
return os.getenv("BUILD_CONTEXT") == "MAINLINE"
def is_nightly_context():
return os.getenv("BUILD_CONTEXT") == "NIGHTLY"
def is_empty_build_context():
return not os.getenv("BUILD_CONTEXT")
def is_dlc_cicd_context():
return os.getenv("BUILD_CONTEXT") in ["PR", "CANARY", "NIGHTLY", "MAINLINE"]
def is_benchmark_dev_context():
return ENABLE_BENCHMARK_DEV_MODE
def is_time_for_canary_safety_scan():
"""
Canary tests run every 15 minutes.
Using a 20 minutes interval to make tests run only once a day around 9 am PST (10 am during winter time).
"""
current_utc_time = time.gmtime()
return current_utc_time.tm_hour == 16 and (0 < current_utc_time.tm_min < 20)
def _get_remote_override_flags():
try:
s3_client = boto3.client('s3')
sts_client = boto3.client('sts')
account_id = sts_client.get_caller_identity().get('Account')
result = s3_client.get_object(Bucket=f"dlc-cicd-helper-{account_id}", Key="override_tests_flags.json")
json_content = json.loads(result["Body"].read().decode('utf-8'))
except ClientError as e:
LOGGER.warning("ClientError when performing S3/STS operation: {}".format(e))
json_content = {}
return json_content
# Now we can skip EFA tests on pipeline without making any source code change
def are_efa_tests_disabled():
disable_efa_tests = is_pr_context() and os.getenv("DISABLE_EFA_TESTS", "False").lower() == "true"
remote_override_flags = _get_remote_override_flags()
override_disable_efa_tests = remote_override_flags.get("disable_efa_tests", "false").lower() == "true"
return disable_efa_tests or override_disable_efa_tests
def is_test_disabled(test_name, build_name, version):
"""
Expected format of remote_override_flags:
{
"CB Project Name for Test Type A": {
"CodeBuild Resolved Source Version": ["test_type_A_test_function_1", "test_type_A_test_function_2"]
},
"CB Project Name for Test Type B": {
"CodeBuild Resolved Source Version": ["test_type_B_test_function_1", "test_type_B_test_function_2"]
}
}
:param test_name: str Test Function node name (includes parametrized values in string)
:param build_name: str Build Project name of current execution
:param version: str Source Version of current execution
:return: bool True if test is disabled as per remote override, False otherwise
"""
remote_override_flags = _get_remote_override_flags()
remote_override_build = remote_override_flags.get(build_name, {})
if version in remote_override_build:
return (
not remote_override_build[version]
or any([test_keyword in test_name for test_keyword in remote_override_build[version]])
)
return False
def run_subprocess_cmd(cmd, failure="Command failed"):
command = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
if command.returncode:
pytest.fail(f"{failure}. Error log:\n{command.stdout.decode()}")
return command
def login_to_ecr_registry(context, account_id, region):
"""
Function to log into an ecr registry
:param context: either invoke context object or fabric connection object
:param account_id: Account ID with the desired ecr registry
:param region: i.e. us-west-2
"""
context.run(
f"aws ecr get-login-password --region {region} | docker login --username AWS "
f"--password-stdin {account_id}.dkr.ecr.{region}.amazonaws.com"
)
def retry_if_result_is_false(result):
"""Return True if we should retry (in this case retry if the result is False), False otherwise"""
return result is False
@retry(
stop_max_attempt_number=10, wait_fixed=10000, retry_on_result=retry_if_result_is_false,
)
def request_mxnet_inference(ip_address="127.0.0.1", port="80", connection=None, model="squeezenet"):
"""
Send request to container to test inference on kitten.jpg
:param ip_address:
:param port:
:connection: ec2_connection object to run the commands remotely over ssh
:return: <bool> True/False based on result of inference
"""
conn_run = connection.run if connection is not None else run
# Check if image already exists
run_out = conn_run("[ -f kitten.jpg ]", warn=True)
if run_out.return_code != 0:
conn_run("curl -O https://s3.amazonaws.com/model-server/inputs/kitten.jpg", hide=True)
run_out = conn_run(f"curl -X POST http://{ip_address}:{port}/predictions/{model} -T kitten.jpg", warn=True)
# The run_out.return_code is not reliable, since sometimes predict request may succeed but the returned result
# is 404. Hence the extra check.
if run_out.return_code != 0 or "probability" not in run_out.stdout:
return False
return True
@retry(stop_max_attempt_number=10, wait_fixed=10000, retry_on_result=retry_if_result_is_false)
def request_mxnet_inference_gluonnlp(ip_address="127.0.0.1", port="80", connection=None):
"""
Send request to container to test inference for predicting sentiments.
:param ip_address:
:param port:
:connection: ec2_connection object to run the commands remotely over ssh
:return: <bool> True/False based on result of inference
"""
conn_run = connection.run if connection is not None else run
run_out = conn_run(
(
f"curl -X POST http://{ip_address}:{port}/predictions/bert_sst/predict -F "
'\'data=["Positive sentiment", "Negative sentiment"]\''
),
warn=True,
)
# The run_out.return_code is not reliable, since sometimes predict request may succeed but the returned result
# is 404. Hence the extra check.
if run_out.return_code != 0 or "1" not in run_out.stdout:
return False
return True
@retry(
stop_max_attempt_number=10, wait_fixed=10000, retry_on_result=retry_if_result_is_false,
)
def request_pytorch_inference_densenet(
ip_address="127.0.0.1", port="80", connection=None, model_name="pytorch-densenet", server_type="ts"
):
"""
Send request to container to test inference on flower.jpg
:param ip_address: str
:param port: str
:param connection: obj
:param model_name: str
:return: <bool> True/False based on result of inference
"""
conn_run = connection.run if connection is not None else run
# Check if image already exists
run_out = conn_run("[ -f flower.jpg ]", warn=True)
if run_out.return_code != 0:
conn_run("curl -O https://s3.amazonaws.com/model-server/inputs/flower.jpg", hide=True)
run_out = conn_run(
f"curl -X POST http://{ip_address}:{port}/predictions/{model_name} -T flower.jpg", hide=True, warn=True
)
# The run_out.return_code is not reliable, since sometimes predict request may succeed but the returned result
# is 404. Hence the extra check.
if run_out.return_code != 0:
LOGGER.error("run_out.return_code != 0")
return False
else:
inference_output = json.loads(run_out.stdout.strip("\n"))
if not (
("neuron" in model_name and isinstance(inference_output, list) and len(inference_output) == 3)
or (server_type=="ts" and isinstance(inference_output, dict) and len(inference_output) == 5)
or (server_type=="mms" and isinstance(inference_output, list) and len(inference_output) == 5)
):
return False
LOGGER.info(f"Inference Output = {json.dumps(inference_output, indent=4)}")
return True
@retry(stop_max_attempt_number=20, wait_fixed=10000, retry_on_result=retry_if_result_is_false)
def request_tensorflow_inference(model_name, ip_address="127.0.0.1", port="8501"):
"""
Method to run tensorflow inference on half_plus_two model using CURL command
:param model_name:
:param ip_address:
:param port:
:connection: ec2_connection object to run the commands remotely over ssh
:return:
"""
inference_string = "'{\"instances\": [1.0, 2.0, 5.0]}'"
run_out = run(
f"curl -d {inference_string} -X POST http://{ip_address}:{port}/v1/models/{model_name}:predict", warn=True
)
# The run_out.return_code is not reliable, since sometimes predict request may succeed but the returned result
# is 404. Hence the extra check.
if run_out.return_code != 0 or "predictions" not in run_out.stdout:
return False
return True
@retry(stop_max_attempt_number=20, wait_fixed=10000, retry_on_result=retry_if_result_is_false)
def request_tensorflow_inference_nlp(model_name, ip_address="127.0.0.1", port="8501"):
"""
Method to run tensorflow inference on half_plus_two model using CURL command
:param model_name:
:param ip_address:
:param port:
:connection: ec2_connection object to run the commands remotely over ssh
:return:
"""
inference_string = "'{\"instances\": [[2,1952,25,10901,3]]}'"
run_out = run(
f"curl -d {inference_string} -X POST http://{ip_address}:{port}/v1/models/{model_name}:predict", warn=True
)
# The run_out.return_code is not reliable, since sometimes predict request may succeed but the returned result
# is 404. Hence the extra check.
if run_out.return_code != 0 or 'predictions' not in run_out.stdout:
return False
return True
def request_tensorflow_inference_grpc(script_file_path, ip_address="127.0.0.1", port="8500", connection=None, ec2_instance_ami=None):
"""
Method to run tensorflow inference on MNIST model using gRPC protocol
:param script_file_path:
:param ip_address:
:param port:
:param connection:
:return:
"""
python_invoker = get_python_invoker(ec2_instance_ami)
conn_run = connection.run if connection is not None else run
conn_run(f"{python_invoker} {script_file_path} --num_tests=1000 --server={ip_address}:{port}", hide=True)
def get_inference_run_command(image_uri, model_names, processor="cpu"):
"""
Helper function to format run command for MMS
:param image_uri:
:param model_names:
:param processor:
:return: <str> Command to start MMS server with given model
"""
server_type = get_inference_server_type(image_uri)
if processor == "eia":
multi_model_location = {
"resnet-152-eia": "https://s3.amazonaws.com/model-server/model_archive_1.0/resnet-152-eia-1-7-0.mar",
"resnet-152-eia-1-5-1": "https://s3.amazonaws.com/model-server/model_archive_1.0/resnet-152-eia-1-5-1.mar",
"pytorch-densenet": "https://aws-dlc-sample-models.s3.amazonaws.com/pytorch/densenet_eia/densenet_eia_v1_5_1.mar",
"pytorch-densenet-v1-3-1": "https://aws-dlc-sample-models.s3.amazonaws.com/pytorch/densenet_eia/densenet_eia_v1_3_1.mar",
}
elif server_type == "ts":
multi_model_location = {
"squeezenet": "https://torchserve.s3.amazonaws.com/mar_files/squeezenet1_1.mar",
"pytorch-densenet": "https://torchserve.s3.amazonaws.com/mar_files/densenet161.mar",
"pytorch-resnet-neuron": "https://aws-dlc-sample-models.s3.amazonaws.com/pytorch/Resnet50-neuron.mar",
}
else:
multi_model_location = {
"squeezenet": "https://s3.amazonaws.com/model-server/models/squeezenet_v1.1/squeezenet_v1.1.model",
"pytorch-densenet": "https://dlc-samples.s3.amazonaws.com/pytorch/multi-model-server/densenet/densenet.mar",
"bert_sst": "https://aws-dlc-sample-models.s3.amazonaws.com/bert_sst/bert_sst.mar",
"mxnet-resnet-neuron": "https://aws-dlc-sample-models.s3.amazonaws.com/mxnet/Resnet50-neuron.mar",
}
if not isinstance(model_names, list):
model_names = [model_names]
for model_name in model_names:
if model_name not in multi_model_location:
raise Exception("No entry found for model {} in dictionary".format(model_name))
parameters = ["{}={}".format(name, multi_model_location[name]) for name in model_names]
if server_type == "ts":
server_cmd = "torchserve"
else:
server_cmd = "multi-model-server"
if processor != "neuron":
mms_command = (
f"{server_cmd} --start --{server_type}-config /home/model-server/config.properties --models "
+ " ".join(parameters)
)
else:
mms_command = (
f"/usr/local/bin/entrypoint.sh -t /home/model-server/config.properties -m " + " ".join(parameters)
)
return mms_command
def get_tensorflow_model_name(processor, model_name):
"""
Helper function to get tensorflow model name
:param processor: Processor Type
:param model_name: Name of model to be used
:return: File name for model being used
"""
tensorflow_models = {
"saved_model_half_plus_two": {
"cpu": "saved_model_half_plus_two_cpu",
"gpu": "saved_model_half_plus_two_gpu",
"eia": "saved_model_half_plus_two",
},
"albert": {
"cpu": "albert",
"gpu": "albert",
"eia": "albert",
},
"saved_model_half_plus_three": {"eia": "saved_model_half_plus_three"},
}
if model_name in tensorflow_models:
return tensorflow_models[model_name][processor]
else:
raise Exception(f"No entry found for model {model_name} in dictionary")
def generate_ssh_keypair(ec2_client, key_name):
pwd = run("pwd", hide=True).stdout.strip("\n")
key_filename = os.path.join(pwd, f"{key_name}.pem")
if os.path.exists(key_filename):
run(f"chmod 400 {key_filename}")
return key_filename
try:
key_pair = ec2_client.create_key_pair(KeyName=key_name)
except ClientError as e:
if "InvalidKeyPair.Duplicate" in f"{e}":
# Wait 10 seconds for key to be created to avoid race condition
time.sleep(10)
if os.path.exists(key_filename):
run(f"chmod 400 {key_filename}")
return key_filename
raise e
run(f"echo '{key_pair['KeyMaterial']}' > {key_filename}")
run(f"chmod 400 {key_filename}")
return key_filename
def destroy_ssh_keypair(ec2_client, key_filename):
key_name = os.path.basename(key_filename).split(".pem")[0]
response = ec2_client.delete_key_pair(KeyName=key_name)
run(f"rm -f {key_filename}")
return response, key_name
def upload_tests_to_s3(testname_datetime_suffix):
"""
Upload test-related artifacts to unique s3 location.
Allows each test to have a unique remote location for test scripts and files.
These uploaded files and folders are copied into a container running an ECS test.
:param testname_datetime_suffix: test name and datetime suffix that is unique to a test
:return: <bool> True if upload was successful, False if any failure during upload
"""
s3_test_location = os.path.join(TEST_TRANSFER_S3_BUCKET, testname_datetime_suffix)
run_out = run(f"aws s3 ls {s3_test_location}", warn=True)
if run_out.return_code == 0:
raise FileExistsError(f"{s3_test_location} already exists. Skipping upload and failing the test.")
path = run("pwd", hide=True).stdout.strip("\n")
if "dlc_tests" not in path:
EnvironmentError("Test is being run from wrong path")
while os.path.basename(path) != "dlc_tests":
path = os.path.dirname(path)
container_tests_path = os.path.join(path, "container_tests")
run(f"aws s3 cp --recursive {container_tests_path}/ {s3_test_location}/")
return s3_test_location
def delete_uploaded_tests_from_s3(s3_test_location):
"""
Delete s3 bucket data related to current test after test is completed
:param s3_test_location: S3 URI for test artifacts to be removed
:return: <bool> True/False based on success/failure of removal
"""
run(f"aws s3 rm --recursive {s3_test_location}")
def get_dlc_images():
if is_pr_context() or is_empty_build_context():
return os.getenv("DLC_IMAGES")
elif is_canary_context():
return parse_canary_images(os.getenv("FRAMEWORK"), os.getenv("AWS_REGION"))
test_env_file = os.path.join(os.getenv("CODEBUILD_SRC_DIR_DLC_IMAGES_JSON"), "test_type_images.json")
with open(test_env_file) as test_env:
test_images = json.load(test_env)
for dlc_test_type, images in test_images.items():
if dlc_test_type == "sanity":
return " ".join(images)
raise RuntimeError(f"Cannot find any images for in {test_images}")
def get_canary_default_tag_py3_version(framework, version):
"""
Currently, only TF2.2 images and above have major/minor python version in their canary tag. Creating this function
to conditionally choose a python version based on framework version ranges. If we move up to py38, for example,
this is the place to make the conditional change.
:param framework: tensorflow1, tensorflow2, mxnet, pytorch
:param version: fw major.minor version, i.e. 2.2
:return: default tag python version
"""
if framework == "tensorflow2" or framework == "huggingface_tensorflow":
return "py37" if Version(version) >= Version("2.2") else "py3"
if framework == "mxnet":
return "py37" if Version(version) >= Version("1.8") else "py3"
return "py3"
def parse_canary_images(framework, region):
"""
Return which canary images to run canary tests on for a given framework and AWS region
:param framework: ML framework (mxnet, tensorflow, pytorch)
:param region: AWS region
:return: dlc_images string (space separated string of image URIs)
"""
if framework == "tensorflow":
if "tensorflow2" in os.getenv("CODEBUILD_BUILD_ID") or "tensorflow2" in os.getenv("CODEBUILD_INITIATOR"):
framework = "tensorflow2"
else:
framework = "tensorflow1"
version_regex = {
"tensorflow1": r"tf-(1.\d+)",
"tensorflow2": r"tf-(2.\d+)",
"mxnet": r"mx-(\d+.\d+)",
"pytorch": r"pt-(\d+.\d+)",
"huggingface_pytorch": r"hf-pt-(\d+.\d+)",
"huggingface_tensorflow": r"hf-tf-(\d+.\d+)",
}
py2_deprecated = {"tensorflow1": None, "tensorflow2": "2.2", "mxnet": "1.7", "pytorch": "1.5"}
repo = git.Repo(os.getcwd(), search_parent_directories=True)
versions_counter = {}
for tag in repo.tags:
tag_str = str(tag)
match = re.search(version_regex[framework], tag_str)
if match:
version = match.group(1)
if not versions_counter.get(version):
versions_counter[version] = {"tr": False, "inf": False}
if "tr" not in tag_str and "inf" not in tag_str:
versions_counter[version]["tr"] = True
versions_counter[version]["inf"] = True
elif "tr" in tag_str:
versions_counter[version]["tr"] = True
elif "inf" in tag_str:
versions_counter[version]["inf"] = True
# Adding huggingface here since we dont have inference HF containers now
versions = []
for v, inf_train in versions_counter.items():
if (inf_train["inf"] and inf_train["tr"])\
or framework.startswith("huggingface"):
versions.append(v)
# Sort ascending to descending, use lambda to ensure 2.2 < 2.15, for instance
versions.sort(key=lambda version_str: [int(point) for point in version_str.split(".")], reverse=True)
registry = PUBLIC_DLC_REGISTRY
framework_versions = versions if len(versions) < 4 else versions[:3]
dlc_images = []
for fw_version in framework_versions:
py3_version = get_canary_default_tag_py3_version(framework, fw_version)
images = {
"tensorflow1": {
"py2": [
f"{registry}.dkr.ecr.{region}.amazonaws.com/tensorflow-training:{fw_version}-cpu-py2",
f"{registry}.dkr.ecr.{region}.amazonaws.com/tensorflow-training:{fw_version}-gpu-py2",
],
"py3": [
f"{registry}.dkr.ecr.{region}.amazonaws.com/tensorflow-training:{fw_version}-gpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/tensorflow-training:{fw_version}-cpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/tensorflow-inference:{fw_version}-gpu",
f"{registry}.dkr.ecr.{region}.amazonaws.com/tensorflow-inference:{fw_version}-cpu",
],
},
"tensorflow2": {
"py2": [],
"py3": [
f"{registry}.dkr.ecr.{region}.amazonaws.com/tensorflow-training:{fw_version}-gpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/tensorflow-training:{fw_version}-cpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/tensorflow-inference:{fw_version}-gpu",
f"{registry}.dkr.ecr.{region}.amazonaws.com/tensorflow-inference:{fw_version}-cpu",
],
},
"mxnet": {
"py2": [
f"{registry}.dkr.ecr.{region}.amazonaws.com/mxnet-training:{fw_version}-gpu-py2",
f"{registry}.dkr.ecr.{region}.amazonaws.com/mxnet-training:{fw_version}-cpu-py2",
f"{registry}.dkr.ecr.{region}.amazonaws.com/mxnet-inference:{fw_version}-gpu-py2",
f"{registry}.dkr.ecr.{region}.amazonaws.com/mxnet-inference:{fw_version}-cpu-py2",
],
"py3": [
f"{registry}.dkr.ecr.{region}.amazonaws.com/mxnet-training:{fw_version}-gpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/mxnet-training:{fw_version}-cpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/mxnet-inference:{fw_version}-gpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/mxnet-inference:{fw_version}-cpu-{py3_version}",
],
},
"pytorch": {
"py2": [],
"py3": [
f"{registry}.dkr.ecr.{region}.amazonaws.com/pytorch-training:{fw_version}-gpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/pytorch-training:{fw_version}-cpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/pytorch-inference:{fw_version}-gpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/pytorch-inference:{fw_version}-cpu-{py3_version}",
],
},
# TODO: uncomment once cpu training and inference images become available
"huggingface_pytorch": {
"py2": [],
"py3": [
f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-pytorch-training:{fw_version}-gpu-{py3_version}",
# f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-pytorch-training:{fw_version}-cpu-{py3_version}",
# f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-pytorch-inference:{fw_version}-gpu-{py3_version}",
# f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-pytorch-inference:{fw_version}-cpu-{py3_version}",
],
},
"huggingface_tensorflow": {
"py2": [],
"py3": [
f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-tensorflow-training:{fw_version}-gpu-{py3_version}",
# f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-tensorflow-training:{fw_version}-cpu-{py3_version}",
# f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-tensorflow-inference:{fw_version}-gpu-{py3_version}",
# f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-tensorflow-inference:{fw_version}-cpu-{py3_version}",
],
},
}
dlc_images += images[framework]["py3"]
no_py2 = py2_deprecated.get(framework)
if no_py2 and (Version(fw_version) >= Version(no_py2)):
continue
else:
dlc_images += images[framework].get("py2", [])
return " ".join(dlc_images)
def setup_sm_benchmark_tf_train_env(resources_location, setup_tf1_env, setup_tf2_env):
"""
Create a virtual environment for benchmark tests if it doesn't already exist, and download all necessary scripts
:param resources_location: <str> directory in which test resources should be placed
:param setup_tf1_env: <bool> True if tf1 resources need to be setup
:param setup_tf2_env: <bool> True if tf2 resources need to be setup
:return: absolute path to the location of the virtual environment
"""
ctx = Context()
tf_resource_dir_list = []
if setup_tf1_env:
tf_resource_dir_list.append("tensorflow1")
if setup_tf2_env:
tf_resource_dir_list.append("tensorflow2")
for resource_dir in tf_resource_dir_list:
with ctx.cd(os.path.join(resources_location, resource_dir)):
if not os.path.isdir(os.path.join(resources_location, resource_dir, "horovod")):
# v0.19.4 is the last version for which horovod example tests are py2 compatible
ctx.run("git clone -b v0.19.4 https://github.com/horovod/horovod.git")
if not os.path.isdir(os.path.join(resources_location, resource_dir, "deep-learning-models")):
# We clone branch tf2 for both 1.x and 2.x tests because tf2 branch contains all necessary files
ctx.run(f"git clone -b tf2 https://github.com/aws-samples/deep-learning-models.git")
venv_dir = os.path.join(resources_location, "sm_benchmark_venv")
if not os.path.isdir(venv_dir):
ctx.run(f"virtualenv {venv_dir}")
with ctx.prefix(f"source {venv_dir}/bin/activate"):
ctx.run("pip install 'sagemaker>=2,<3' awscli boto3 botocore six==1.11")
# SageMaker TF estimator is coded to only accept framework versions up to 2.1.0 as py2 compatible.
# Fixing this through the following changes:
estimator_location = ctx.run(
"echo $(pip3 show sagemaker |grep 'Location' |sed s/'Location: '//g)/sagemaker/tensorflow/estimator.py"
).stdout.strip("\n")
system = ctx.run("uname -s").stdout.strip("\n")
sed_input_arg = "'' " if system == "Darwin" else ""
ctx.run(f"sed -i {sed_input_arg}'s/\[2, 1, 0\]/\[2, 1, 1\]/g' {estimator_location}")
return venv_dir
def setup_sm_benchmark_mx_train_env(resources_location):
"""
Create a virtual environment for benchmark tests if it doesn't already exist, and download all necessary scripts
:param resources_location: <str> directory in which test resources should be placed
:return: absolute path to the location of the virtual environment
"""
ctx = Context()
venv_dir = os.path.join(resources_location, "sm_benchmark_venv")
if not os.path.isdir(venv_dir):
ctx.run(f"virtualenv {venv_dir}")
with ctx.prefix(f"source {venv_dir}/bin/activate"):
ctx.run("pip install sagemaker awscli boto3 botocore")
return venv_dir
def setup_sm_benchmark_hf_infer_env(resources_location):
"""
Create a virtual environment for benchmark tests if it doesn't already exist, and download all necessary scripts
:param resources_location: <str> directory in which test resources should be placed
:return: absolute path to the location of the virtual environment
"""
ctx = Context()
venv_dir = os.path.join(resources_location, "sm_benchmark_hf_venv")
if not os.path.isdir(venv_dir):
ctx.run(f"python3 -m virtualenv {venv_dir}")
with ctx.prefix(f"source {venv_dir}/bin/activate"):
ctx.run("pip install sagemaker awscli boto3 botocore")
return venv_dir
def get_account_id_from_image_uri(image_uri):
"""
Find the account ID where the image is located
:param image_uri: <str> ECR image URI
:return: <str> AWS Account ID
"""
return image_uri.split(".")[0]
def get_region_from_image_uri(image_uri):
"""
Find the region where the image is located
:param image_uri: <str> ECR image URI
:return: <str> AWS Region Name
"""
region_pattern = r"(us(-gov)?|ap|ca|cn|eu|sa)-(central|(north|south)?(east|west)?)-\d+"
region_search = re.search(region_pattern, image_uri)
assert region_search, f"{image_uri} must have region that matches {region_pattern}"
return region_search.group()
def get_unique_name_from_tag(image_uri):
"""
Return the unique from the image tag.
:param image_uri: ECR image URI
:return: unique name
"""
return re.sub('[^A-Za-z0-9]+', '', image_uri)
def get_framework_and_version_from_tag(image_uri):
"""
Return the framework and version from the image tag.
:param image_uri: ECR image URI
:return: framework name, framework version
"""
tested_framework = get_framework_from_image_uri(image_uri)
allowed_frameworks = ("huggingface_tensorflow", "huggingface_pytorch", "tensorflow", "mxnet", "pytorch")
if not tested_framework:
raise RuntimeError(
f"Cannot find framework in image uri {image_uri} " f"from allowed frameworks {allowed_frameworks}"
)
tag_framework_version = re.search(r"(\d+(\.\d+){1,2})", image_uri).groups()[0]
return tested_framework, tag_framework_version
def get_framework_from_image_uri(image_uri):
return (
"huggingface_tensorflow" if "huggingface-tensorflow" in image_uri
else "huggingface_pytorch" if "huggingface-pytorch" in image_uri
else "mxnet" if "mxnet" in image_uri
else "pytorch" if "pytorch" in image_uri
else "tensorflow" if "tensorflow" in image_uri
else None
)
def get_cuda_version_from_tag(image_uri):
"""
Return the cuda version from the image tag.
:param image_uri: ECR image URI
:return: cuda version
"""
cuda_framework_version = None
cuda_str = ["cu", "gpu"]
if all(keyword in image_uri for keyword in cuda_str):
cuda_framework_version = re.search(r"(cu\d+)-", image_uri).groups()[0]
return cuda_framework_version
def get_job_type_from_image(image_uri):
"""
Return the Job type from the image tag.
:param image_uri: ECR image URI
:return: Job Type
"""
tested_job_type = None
allowed_job_types = ("training", "inference")
for job_type in allowed_job_types:
if job_type in image_uri:
tested_job_type = job_type
break
if not tested_job_type and "eia" in image_uri:
tested_job_type = "inference"
if not tested_job_type:
raise RuntimeError(
f"Cannot find Job Type in image uri {image_uri} " f"from allowed frameworks {allowed_job_types}"
)
return tested_job_type
def get_repository_and_tag_from_image_uri(image_uri):
"""
Return the name of the repository holding the image
:param image_uri: URI of the image
:return: <str> repository name
"""
repository_uri, tag = image_uri.split(":")
_, repository_name = repository_uri.split("/")
return repository_name, tag
def get_processor_from_image_uri(image_uri):
"""
Return processor from the image URI
Assumes image uri includes -<processor> in it's tag, where <processor> is one of cpu, gpu or eia.
:param image_uri: ECR image URI
:return: cpu, gpu, or eia
"""
allowed_processors = ["eia", "neuron", "cpu", "gpu"]
for processor in allowed_processors:
match = re.search(rf"-({processor})", image_uri)
if match:
return match.group(1)
raise RuntimeError("Cannot find processor")
def get_python_version_from_image_uri(image_uri):
"""
Return the python version from the image URI
:param image_uri: ECR image URI
:return: str py36, py37, py38, etc., based information available in image URI
"""
python_version_search = re.search(r"py\d+", image_uri)
if not python_version_search:
raise MissingPythonVersionException(f"{image_uri} does not have python version in the form 'py\\d+'")
python_version = python_version_search.group()
return "py36" if python_version == "py3" else python_version
def get_container_name(prefix, image_uri):
"""
Create a unique container name based off of a test related prefix and the image uri
:param prefix: test related prefix, like "emacs" or "pip-check"
:param image_uri: ECR image URI
:return: container name
"""
return f"{prefix}-{image_uri.split('/')[-1].replace('.', '-').replace(':', '-')}"
def start_container(container_name, image_uri, context):
"""
Helper function to start a container locally
:param container_name: Name of the docker container
:param image_uri: ECR image URI
:param context: Invoke context object
"""
context.run(
f"docker run --entrypoint='/bin/bash' --name {container_name} -itd {image_uri}", hide=True,
)
def run_cmd_on_container(container_name, context, cmd, executable="bash", warn=False):
"""
Helper function to run commands on a locally running container
:param container_name: Name of the docker container
:param context: ECR image URI
:param cmd: Command to run on the container
:param executable: Executable to run on the container (bash or python)
:param warn: Whether to only warn as opposed to exit if command fails
:return: invoke output, can be used to parse stdout, etc
"""
if executable not in ("bash", "python"):
LOGGER.warn(f"Unrecognized executable {executable}. It will be run as {executable} -c '{cmd}'")
return context.run(
f"docker exec --user root {container_name} {executable} -c '{cmd}'", hide=True, warn=warn, timeout=60
)
| 40.193945 | 133 | 0.681229 |
7acfe1038370ad06323a017c95820888f95dccc5 | 152 | py | Python | Inheritance/class_Inheritance/project_zoo/reptile.py | vasetousa/OOP | e4fedc497dd149c9800613ea11846e0e770d122c | [
"MIT"
] | null | null | null | Inheritance/class_Inheritance/project_zoo/reptile.py | vasetousa/OOP | e4fedc497dd149c9800613ea11846e0e770d122c | [
"MIT"
] | null | null | null | Inheritance/class_Inheritance/project_zoo/reptile.py | vasetousa/OOP | e4fedc497dd149c9800613ea11846e0e770d122c | [
"MIT"
] | null | null | null | from Inheritance.class_Inheritance.project_zoo.animal import Animal
class Reptile(Animal):
def __init__(self, name):
super().__init__(name) | 30.4 | 67 | 0.756579 |
fb26b16bfbee441b76347521fb05dae83dedbc92 | 4,390 | py | Python | core/test/database/postgresql/mixin/test_pg_group_member.py | bogonets/answer | 57f892a9841980bcbc35fa1e27521b34cd94bc25 | [
"MIT"
] | 3 | 2021-06-20T02:24:10.000Z | 2022-01-26T23:55:33.000Z | core/test/database/postgresql/mixin/test_pg_group_member.py | bogonets/answer | 57f892a9841980bcbc35fa1e27521b34cd94bc25 | [
"MIT"
] | null | null | null | core/test/database/postgresql/mixin/test_pg_group_member.py | bogonets/answer | 57f892a9841980bcbc35fa1e27521b34cd94bc25 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from unittest import main
from recc.database.struct.group_join_member import GroupJoinGroupMember
from tester.unittest.postgresql_test_case import PostgresqlTestCase
class PgGroupMemberTestCase(PostgresqlTestCase):
async def setUp(self):
await super().setUp()
self.guest = self.guest_permission_uid
self.reporter = self.reporter_permission_uid
self.operator = self.operator_permission_uid
self.maintainer = self.maintainer_permission_uid
self.anonymous = self.anonymous_group_uid
user1_name = "user1"
user2_name = "user2"
self.user1_uid = await self.db.insert_user(user1_name, "pass1", "salt1")
self.user2_uid = await self.db.insert_user(user2_name, "pass2", "salt2")
self.user1 = await self.db.select_user_by_uid(self.user1_uid)
self.user2 = await self.db.select_user_by_uid(self.user2_uid)
async def test_create_and_get(self):
await self.db.insert_group_member(self.anonymous, self.user1.uid, self.guest)
await self.db.insert_group_member(self.anonymous, self.user2.uid, self.reporter)
member1 = await self.db.select_group_member(self.anonymous, self.user1.uid)
member2 = await self.db.select_group_member(self.anonymous, self.user2.uid)
self.assertEqual(self.anonymous, member1.group_uid)
self.assertEqual(self.anonymous, member2.group_uid)
self.assertEqual(self.user1.uid, member1.user_uid)
self.assertEqual(self.user2.uid, member2.user_uid)
self.assertEqual(self.guest, member1.permission_uid)
self.assertEqual(self.reporter, member2.permission_uid)
async def test_update_permission(self):
await self.db.insert_group_member(self.anonymous, self.user1.uid, self.guest)
await self.db.insert_group_member(self.anonymous, self.user2.uid, self.reporter)
await self.db.update_group_member_permission(
self.anonymous, self.user1.uid, self.maintainer
)
await self.db.update_group_member_permission(
self.anonymous, self.user2.uid, self.operator
)
member1 = await self.db.select_group_member(self.anonymous, self.user1.uid)
member2 = await self.db.select_group_member(self.anonymous, self.user2.uid)
self.assertEqual(self.maintainer, member1.permission_uid)
self.assertEqual(self.operator, member2.permission_uid)
async def test_group_members(self):
await self.db.insert_group_member(self.anonymous, self.user1.uid, self.guest)
await self.db.insert_group_member(self.anonymous, self.user2.uid, self.reporter)
groups1 = await self.db.select_group_members_by_group_uid(self.anonymous)
groups2 = await self.db.select_group_members_by_user_uid(self.user2.uid)
groups3 = await self.db.select_group_members()
self.assertEqual(2, len(groups1))
self.assertEqual(1, len(groups2))
self.assertEqual(2, len(groups3))
async def test_group_members_join_group(self):
test_user = self.user1.uid
fake_user = self.user2.uid
await self.db.insert_group_member(self.anonymous, test_user, self.guest)
await self.db.insert_group_member(self.anonymous, fake_user, self.reporter)
groups = await self.db.select_group_members_join_group_by_user_uid(test_user)
self.assertEqual(1, len(groups))
group0 = groups[0]
self.assertIsInstance(group0, GroupJoinGroupMember)
self.assertEqual(self.anonymous, group0.group_uid)
self.assertEqual(test_user, group0.user_uid)
self.assertEqual(self.guest, group0.permission_uid)
group1 = await self.db.select_group_member_join_group_by_user_uid_and_group_uid(
test_user, self.anonymous
)
self.assertEqual(group0, group1)
async def test_delete(self):
await self.db.insert_group_member(self.anonymous, self.user1.uid, self.guest)
await self.db.insert_group_member(self.anonymous, self.user2.uid, self.reporter)
self.assertEqual(2, len(await self.db.select_group_members()))
await self.db.delete_group_member(self.anonymous, self.user1.uid)
await self.db.delete_group_member(self.anonymous, self.user2.uid)
self.assertEqual(0, len(await self.db.select_group_members()))
if __name__ == "__main__":
main()
| 45.729167 | 88 | 0.719818 |
97700a2b41a02706a37d6bc8a35409a8d731f2e5 | 130,037 | py | Python | flopy/mf6/data/mfdata.py | gyanz/flopy | 282703716a01721e07905da65aa54e6017452a5a | [
"CC0-1.0",
"BSD-3-Clause"
] | 1 | 2019-11-01T00:34:14.000Z | 2019-11-01T00:34:14.000Z | flopy/mf6/data/mfdata.py | gyanz/flopy | 282703716a01721e07905da65aa54e6017452a5a | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | flopy/mf6/data/mfdata.py | gyanz/flopy | 282703716a01721e07905da65aa54e6017452a5a | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | from operator import itemgetter
from copy import deepcopy
import sys
import inspect
from shutil import copyfile
from collections import OrderedDict
from enum import Enum
import struct
import numpy as np
from ..mfbase import MFDataException, VerbosityLevel, \
MFInvalidTransientBlockHeaderException, FlopyException
from ..data.mfstructure import DatumType, MFDataItemStructure
from ..data import mfdatautil
from ..data.mfdatautil import DatumUtil, FileIter, MultiListIter, ArrayUtil, \
ConstIter, ArrayIndexIter, MultiList
from ..coordinates.modeldimensions import DataDimensions, DiscretizationType
class MFComment(object):
"""
Represents a variable in a MF6 input file
Parameters
----------
comment : string or list
comment to be displayed in output file
path : string
tuple representing location in the output file
line_number : integer
line number to display comment in output file
Attributes
----------
comment : string or list
comment to be displayed in output file
path : string
tuple representing location in the output file
line_number : integer
line number to display comment in output file
Methods
-------
write : (file)
writes the comment to file
add_text(additional_text)
adds text to the comment
get_file_entry(eoln_suffix=True)
returns the comment text in the format to write to package files
is_empty(include_whitespace=True)
checks to see if comment is just an empty string ''. if
include_whitespace is set to false a string with only whitespace is
considered empty
is_comment(text, include_empty_line=False) : boolean
returns true if text is a comment. an empty line is considered a
comment if include_empty_line is true.
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(self, comment, path, sim_data, line_number=0):
if not (isinstance(comment, str) or isinstance(comment, list) or
comment is None):
raise FlopyException('Comment "{}" not valid. Comment must be '
'of type str of list.'.format(comment))
self.text = comment
self.path = path
self.line_number = line_number
self.sim_data = sim_data
"""
Add text to the comment string.
Parameters
----------
additional_text: string
text to add
"""
def add_text(self, additional_text):
if additional_text:
if isinstance(self.text, list):
self.text.append(additional_text)
else:
self.text = '{} {}'.format(self.text, additional_text)
"""
Get the comment text in the format to write to package files.
Parameters
----------
eoln_suffix: boolean
have comment text end with end of line character
Returns
-------
string : comment text
"""
def get_file_entry(self, eoln_suffix=True):
file_entry = ''
if self.text and self.sim_data.comments_on:
if not isinstance(self.text, str) and isinstance(self.text, list):
file_entry = self._recursive_get(self.text)
else:
if self.text.strip():
file_entry = self.text
if eoln_suffix:
file_entry = '{}\n'.format(file_entry)
return file_entry
def _recursive_get(self, base_list):
file_entry = ''
if base_list and self.sim_data.comments_on:
for item in base_list:
if not isinstance(item, str) and isinstance(item, list):
file_entry = '{}{}'.format(file_entry,
self._recursive_get(item))
else:
file_entry = '{} {}'.format(file_entry, item)
return file_entry
"""
Write the comment text to a file.
Parameters
----------
fd : file
file to write to
eoln_suffix: boolean
have comment text end with end of line character
"""
def write(self, fd, eoln_suffix=True):
if self.text and self.sim_data.comments_on:
if not isinstance(self.text, str) and isinstance(self.text, list):
self._recursive_write(fd, self.text)
else:
if self.text.strip():
fd.write(self.text)
if eoln_suffix:
fd.write('\n')
"""
Check for comment text
Parameters
----------
include_whitespace : boolean
include whitespace as text
Returns
-------
boolean : True if comment text exists
"""
def is_empty(self, include_whitespace=True):
if include_whitespace:
if self.text():
return True
return False
else:
if self.text.strip():
return True
return False
"""
Check text to see if it is valid comment text
Parameters
----------
text : string
potential comment text
include_empty_line : boolean
allow empty line to be valid
Returns
-------
boolean : True if text is valid comment text
"""
@staticmethod
def is_comment(text, include_empty_line=False):
if not text:
return include_empty_line
if text and isinstance(text, list):
# look for comment mark in first item of list
text_clean = text[0].strip()
else:
text_clean = text.strip()
if include_empty_line and not text_clean:
return True
if text_clean and (text_clean[0] == '#' or text_clean[0] == '!' or
text_clean[0] == '//'):
return True
return False
# recursively writes a nested list to a file
def _recursive_write(self, fd, base_list):
if base_list:
for item in base_list:
if not isinstance(item, str) and isinstance(item, list):
self._recursive_write(fd, item)
else:
fd.write(' {}'.format(item))
class DataStorageType(Enum):
"""
Enumeration of different ways that data can be stored
"""
internal_array = 1
internal_constant = 2
external_file = 3
class DataStructureType(Enum):
"""
Enumeration of different data structures used to store data
"""
ndarray = 1
recarray = 2
scalar = 3
class LayerStorage(object):
"""
Stores a single layer of data.
Parameters
----------
data_storage : DataStorage
Parent data storage object that layer is contained in
lay_num : int
Layer number of layered being stored
data_storage_type : DataStorageType
Method used to store the data
Attributes
----------
internal_data : ndarray or recarray
data being stored, if full data is being stored internally in memory
data_const_value : int/float
constant value of data being stored, if data is a constant
data_storage_type : DataStorageType
method used to store the data
fname : str
file name of external file containing the data
factor : int/float
factor to multiply the data by
iprn : int
print code
binary : bool
whether the data is stored in a binary file
Methods
-------
get_const_val(layer)
gets the constant value of a given layer. data storage type for layer
must be "internal_constant".
get_data(layer) : ndarray/recarray/string
returns the data for the specified layer
set_data(data, layer=None, multiplier=[1.0]
sets the data being stored to "data" for layer "layer", replacing all
data for that layer. a multiplier can be specified.
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(self, data_storage, lay_indexes,
data_storage_type=DataStorageType.internal_array):
self._data_storage_parent = data_storage
self._lay_indexes = lay_indexes
self.internal_data = None
self.data_const_value = None
self.data_storage_type = data_storage_type
self.fname = None
self.factor = 1.0
self.iprn = None
self.binary = False
def __repr__(self):
if self.data_storage_type == DataStorageType.internal_constant:
return 'constant {}'.format(self.get_data_const_val())
else:
return repr(self.get_data())
def __str__(self):
if self.data_storage_type == DataStorageType.internal_constant:
return '{}'.format(self.get_data_const_val())
else:
return str(self.get_data())
def __getattr__(self, attr):
if attr == 'array':
return self._data_storage_parent.get_data(self._lay_indexes, True)
elif attr == '__getstate__':
raise AttributeError(attr)
def set_data(self, data):
self._data_storage_parent.set_data(data, self._lay_indexes, [self.factor])
def get_data(self):
return self._data_storage_parent.get_data(self._lay_indexes, False)
def get_data_const_val(self):
if isinstance(self.data_const_value, list):
return self.data_const_value[0]
else:
return self.data_const_value
class DataStorage(object):
"""
Stores and retrieves data.
Parameters
----------
sim_data : simulation data class
reference to the simulation data class
data_dimensions : data dimensions class
a data dimensions class for the data being stored
get_file_entry : method reference
method that returns the file entry for the stored data
data_storage_type : enum
how the data will be stored (internally, as a constant, as an external
file)
data_structure_type : enum
what internal type is the data stored in (ndarray, recarray, scalar)
layer_shape : int
number of data layers
layered : boolean
is the data layered
layer_storage : MultiList<LayerStorage>
one or more dimensional list of LayerStorage
Attributes
----------
data_storage_type : list
list of data storage types, one for each layer
data_const_value : list
list of data constants, one for each layer
external_file_path : list
list of external file paths, one for each layer
multiplier : list
list of multipliers, one for each layer
print_format : list
list of print formats, one for each layer
data_structure_type :
what internal type is the data stored in (ndarray, recarray, scalar)
layered : boolean
is the data layered
pre_data_comments : string
any comments before the start of the data
comments : OrderedDict
any comments mixed in with the data, dictionary keys are data lines
post_data_comments : string
any comments after the end of the data
Methods
-------
override_data_type : (index, data_type)
overrides the data type used in a recarray at index "index" with data
type "data_type"
get_external_file_path(layer)
gets the path to an external file for layer "layer"
get_const_val(layer)
gets the constant value of a given layer. data storage type for layer
must be "internal_constant".
has_data(layer) : boolean
returns true if data exists for the specified layer, false otherwise
get_data(layer) : ndarray/recarray/string
returns the data for the specified layer
update_item(data, key_index)
updates the data in a recarray at index "key_index" with data "data".
data is a list containing all data for a single record in the
recarray. . data structure type must be recarray
append_data(data)
appends data "data" to the end of a recarray. data structure type must
be recarray
set_data(data, layer=None, multiplier=[1.0]
sets the data being stored to "data" for layer "layer", replacing all
data for that layer. a multiplier can be specified.
get_active_layer_indices() : list
returns the indices of all layers expected to contain data
store_internal(data, layer=None, const=False, multiplier=[1.0])
store data "data" at layer "layer" internally
store_external(file_path, layer=None, multiplier=[1.0], print_format=None,
data=None, do_not_verify=False) store data "data" at layer "layer"
externally in file "file_path"
external_to_external(new_external_file, multiplier=None, layer=None)
copies existing external data to the new file location and points to
the new file
external_to_internal(layer_num=None, store_internal=False) :
ndarray/recarray
loads existing external data for layer "layer_num" and returns it. if
store_internal is True it also storages the data internally,
changing the storage type for "layer_num" layer to internal.
internal_to_external(new_external_file, multiplier=None, layer=None,
print_format=None)
stores existing internal data for layer "layer" to external file
"new_external_file"
read_data_from_file(layer, fd=None, multiplier=None) : (ndarray, int)
reads in data from a given file "fd" as data from layer "layer".
returns data as an ndarray along with the size of the data
to_string(val, type, is_cellid=False, possible_cellid=False)
converts data "val" of type "type" to a string. is_cellid is True if
the data type is known to be a cellid and is treated as such. when
possible_cellid is True the data is checked to see if it matches the
shape/dimensions of a cellid before using it as one.
resolve_data_size(index) : int
resolves the size of a given data element in a recarray based on the
names in the existing rec_array. assumes repeating data element
names follow the format <data_element_name>_X. returns the number of
times the data element repeats.
convert_data(data, type) : type
converts data "data" to type "type" and returns the converted data
flatten()
converts layered data to a non-layered data
make_layered()
converts non-layered data to layered data
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(self, sim_data, data_dimensions, get_file_entry,
data_storage_type=DataStorageType.internal_array,
data_structure_type=DataStructureType.ndarray,
layer_shape=(1,),
layered=False):
self.data_dimensions = data_dimensions
self._simulation_data = sim_data
self._get_file_entry = get_file_entry
self._data_type_overrides = {}
self._data_storage_type = data_storage_type
self.layer_storage = MultiList(shape=layer_shape,
callback=self._create_layer)
#self.layer_storage = [LayerStorage(self, x, data_storage_type)
# for x in range(layer_shape)]
self.data_structure_type = data_structure_type
package_dim = self.data_dimensions.package_dim
self.in_model = self.data_dimensions is not None and \
len(package_dim.package_path) > 1 and \
package_dim.model_dim[0].model_name.lower() == \
package_dim.package_path[0]
if data_structure_type == DataStructureType.recarray:
self.build_type_list(resolve_data_shape=False)
self._data_type = None
else:
self._data_type = self.data_dimensions.structure.\
get_datum_type(return_enum_type=True)
self.layered = layered
# initialize comments
self.pre_data_comments = None
self.comments = OrderedDict()
def __repr__(self):
return self.get_data_str(True)
def __str__(self):
return self.get_data_str(False)
def _create_layer(self, indexes):
return LayerStorage(self, indexes, self._data_storage_type)
def flatten(self):
self.layered = False
storage_type = self.layer_storage.first_item().data_storage_type
self.layer_storage = MultiList(mdlist=[LayerStorage(self, 0,
storage_type)])
def make_layered(self):
if not self.layered:
if self.data_structure_type != DataStructureType.ndarray:
message = 'Data structure type "{}" does not support ' \
'layered data.'.format(self.data_structure_type)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path, 'making data layered',
self.data_dimensions.structure.name, inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
if self.layer_storage.first_item().data_storage_type == \
DataStorageType.external_file:
message = 'Converting external file data into layered ' \
'data currently not support.'
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path, 'making data layered',
self.data_dimensions.structure.name, inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
previous_storage = self.layer_storage.first_item()
data = previous_storage.get_data()
storage_type = previous_storage.data_storage_type
data_dim = self.get_data_dimensions(None)
self.layer_storage = MultiList(shape=(data_dim[0],),
callback=self._create_layer)
#self.layer_storage = [LayerStorage(self, x, storage_type)
# for x in range(data_dim[0])]
if previous_storage.data_storage_type == \
DataStorageType.internal_constant:
for storage in self.layer_storage.elements():
storage.data_const_value = \
previous_storage.data_const_value
elif previous_storage.data_storage_type == \
DataStorageType.internal_array:
data_ml = MultiList(data)
if not (data_ml.get_total_size() ==
self.layer_storage.get_total_size()):
message = 'Size of data ({}) does not match expected ' \
'value of {}' \
'.'.format(data_ml.get_total_size(),
self.layer_storage.get_total_size())
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'making data layered',
self.data_dimensions.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
for data_layer, storage in zip(data,
self.layer_storage.elements()):
storage.internal_data = data_layer
storage.factor = previous_storage.factor
storage.iprn = previous_storage.iprn
self.layered = True
def get_data_str(self, formal):
data_str = ''
# Assemble strings for internal array data
for index, storage in enumerate(self.layer_storage.elements()):
if storage.data_storage_type == DataStorageType.internal_array:
if storage.internal_data is not None:
header = self._get_layer_header_str(index)
if formal:
if self.layered:
data_str = '{}Layer_{}{{{}}}' \
'\n({})\n'.format(data_str, index + 1,
header, repr(storage))
else:
data_str = '{}{{{}}}\n({})\n'.format(data_str,
header,
repr(storage))
else:
data_str = '{}{{{}}}\n({})\n'.format(data_str, header,
str(storage))
elif storage.data_storage_type == \
DataStorageType.internal_constant:
if storage.data_const_value is not None:
data_str = '{}{{{}}}' \
'\n'.format(data_str,
self._get_layer_header_str(index))
return data_str
def _get_layer_header_str(self, layer):
header_list = []
if self.layer_storage[layer].data_storage_type == \
DataStorageType.external_file:
header_list.append('open/close '
'{}'.format(self.layer_storage[layer].fname))
elif self.layer_storage[layer].data_storage_type == \
DataStorageType.internal_constant:
header_list.append('constant {}'.format(self.layer_storage[layer]))
else:
header_list.append('internal')
if self.layer_storage[layer].factor != 1.0 and \
self.layer_storage[layer].factor != 1:
header_list.append('factor '
'{}'.format(self.layer_storage[layer].factor))
if self.layer_storage[layer].iprn is not None:
header_list.append('iprn '
'{}'.format(self.layer_storage[layer].iprn))
if len(header_list) > 0:
return ', '.join(header_list)
else:
return ''
def init_layers(self, dimensions):
self.layer_storage= MultiList(shape=dimensions,
callback=self._create_layer)
def add_layer(self, dimension=2):
self.layer_storage.increment_dimension(dimension, self._create_layer)
def override_data_type(self, index, data_type):
self._data_type_overrides[index] = data_type
def get_external_file_path(self, layer):
if layer is None:
return self.layer_storage[0].fname
else:
return self.layer_storage[layer].fname
def get_const_val(self, layer=None):
if layer is None:
if not self.layer_storage.get_total_size() >= 1:
message = 'Can not get constant value. No data is available.'
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'getting constant value',
self.data_dimensions.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
first_item = self.layer_storage.first_item()
if not first_item.data_storage_type == \
DataStorageType.internal_constant:
message = 'Can not get constant value. Storage type must be ' \
'internal_constant.'
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'getting constant value',
self.data_dimensions.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
return first_item.get_data_const_val()
else:
if not self.layer_storage.in_shape(layer):
message = 'Can not get constant value. Layer "{}" is not a ' \
'valid layer.'.format(layer)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'getting constant value',
self.data_dimensions.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
if not self.layer_storage[layer].data_storage_type == \
DataStorageType.internal_constant:
message = 'Can not get constant value. Storage type must be ' \
'internal_constant.'.format(layer)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'getting constant value',
self.data_dimensions.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
return self.layer_storage[layer].get_data_const_val()
def has_data(self, layer=None):
ret_val = self._access_data(layer, False)
return ret_val is not None and ret_val != False
def get_data(self, layer=None, apply_mult=True):
return self._access_data(layer, True, apply_mult=apply_mult)
def _access_data(self, layer, return_data=False, apply_mult=True):
layer_check = self._resolve_layer(layer)
if self.layer_storage[layer_check].data_storage_type == \
DataStorageType.external_file:
if return_data:
return self.external_to_internal(layer)
else:
return True
else:
if (self.layer_storage[layer_check].internal_data is None and
self.layer_storage[layer_check].data_storage_type ==
DataStorageType.internal_array) or \
(self.layer_storage[layer_check].data_const_value is None and
self.layer_storage[layer_check].data_storage_type ==
DataStorageType.internal_constant):
return None
if self.data_structure_type == DataStructureType.ndarray and \
self.layer_storage[layer_check].data_const_value is None and \
self.layer_storage[layer_check].internal_data is None:
return None
if not (layer is None or self.layer_storage.in_shape(layer)):
message = 'Layer "{}" is an invalid layer.'.format(layer)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'accessing data',
self.data_dimensions.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
if layer is None:
if self.data_structure_type == DataStructureType.ndarray or \
self.data_structure_type == DataStructureType.scalar:
if return_data:
data = self._build_full_data(apply_mult)
if data is None:
if self.layer_storage.first_item().data_storage_type == \
DataStorageType.internal_constant:
return self.layer_storage.first_item().\
get_data()[0]
else:
return data
else:
if self.data_structure_type == DataStructureType.scalar:
return self.layer_storage.first_item().\
internal_data is not None
check_storage = self.layer_storage[layer_check]
return (check_storage.data_const_value is not None and
check_storage.data_storage_type ==
DataStorageType.internal_constant) or (
check_storage.internal_data is not None and
check_storage.data_storage_type ==
DataStorageType.internal_array)
else:
if self.layer_storage[layer_check].data_storage_type == \
DataStorageType.internal_constant:
if return_data:
# recarray stored as a constant. currently only
# support grid-based constant recarrays. build
# a recarray of all cells
data_list = []
model_grid = self.data_dimensions.get_model_grid()
structure = self.data_dimensions.structure
package_dim = self.data_dimensions.package_dim
for cellid in model_grid.get_all_model_cells():
data_line = (cellid,) + \
(self.layer_storage.first_item().
data_const_value,)
if len(structure.data_item_structures) > 2:
# append None any expected optional data
for data_item_struct in \
structure.data_item_structures[2:]:
if (data_item_struct.name !=
'boundname' or
package_dim.boundnames()):
data_line = data_line + (None,)
data_list.append(data_line)
return np.rec.array(data_list,
self._recarray_type_list)
else:
return self.layer_storage[layer_check
].data_const_value is not None
else:
if return_data:
return self.layer_storage.first_item().\
internal_data
else:
return True
elif self.layer_storage[layer].data_storage_type == \
DataStorageType.internal_array:
if return_data:
return self.layer_storage[layer].internal_data
else:
return self.layer_storage[layer].internal_data is not None
elif self.layer_storage[layer].data_storage_type == \
DataStorageType.internal_constant:
layer_storage = self.layer_storage[layer]
if return_data:
data = self._fill_const_layer(layer)
if data is None:
if layer_storage.data_storage_type == \
DataStructureType.internal_constant:
return layer_storage.data_const_value[0]
else:
return data
else:
return layer_storage.data_const_value is not None
else:
if return_data:
return self.get_external(layer)
else:
return True
def append_data(self, data):
# currently only support appending to recarrays
if not (self.data_structure_type == DataStructureType.recarray):
message = 'Can not append to data structure "{}". Can only ' \
'append to a recarray datastructure' \
'.'.format(self.data_structure_type)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'appending data',
self.data_dimensions.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
internal_data = self.layer_storage.first_item().internal_data
if internal_data is None:
if len(data[0]) != len(self._recarray_type_list):
# rebuild type list using existing data as a guide
self.build_type_list(data=data)
self.set_data(np.rec.array(data, self._recarray_type_list))
else:
if len(self.layer_storage.first_item().internal_data[0]) < \
len(data[0]):
# Rebuild recarray to fit larger size
for index in range(len(internal_data[0]), len(data[0])):
self._duplicate_last_item()
internal_data_list = internal_data.tolist()
for data_item in data:
internal_data_list.append(data_item)
self._add_placeholders(internal_data_list)
self.set_data(np.rec.array(internal_data_list,
self._recarray_type_list))
else:
if len(self.layer_storage.first_item().internal_data[0]) \
> len(data[0]):
# Add placeholders to data
self._add_placeholders(data)
self.set_data(np.hstack(
(internal_data, np.rec.array(data,
self._recarray_type_list))))
def set_data(self, data, layer=None, multiplier=[1.0], key=None,
autofill=False):
if self.data_structure_type == DataStructureType.recarray or \
self.data_structure_type == DataStructureType.scalar:
self._set_list(data, layer, multiplier, key, autofill)
else:
data_dim = self.data_dimensions
struct = data_dim.structure
if struct.name == 'aux':
# make a list out of a single item
if isinstance(data, int) or isinstance(data, float) or \
isinstance(data, str):
data = [[data]]
# handle special case of aux variables in an array
self.layered = True
aux_var_names = data_dim.package_dim.get_aux_variables()
if len(data) == len(aux_var_names[0]) - 1:
for layer, aux_var_data in enumerate(data):
if layer > 0:
self.add_layer()
self._set_array(aux_var_data, [layer], multiplier, key,
autofill)
else:
message = 'Unable to set data for aux variable. ' \
'Expected {} aux variables but got ' \
'{}.'.format(len(aux_var_names[0]),
len(data))
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'setting aux variables', data_dim.structure.name,
inspect.stack()[0][3], type_, value_, traceback_,
message, self._simulation_data.debug)
else:
self._set_array(data, layer, multiplier, key, autofill)
def _set_list(self, data, layer, multiplier, key, autofill):
if isinstance(data, dict):
if 'filename' in data:
self.process_open_close_line(data, layer)
return
self.store_internal(data, layer, multiplier, key=key,
autofill=autofill)
def _set_array(self, data, layer, multiplier, key, autofill):
# make a list out of a single item
if isinstance(data, int) or isinstance(data, float) or isinstance(data, str):
data = [data]
# try to set as a single layer
if not self._set_array_layer(data, layer, multiplier, key):
# check for possibility of multi-layered data
success = False
layer_num = 0
if layer is None and self.data_structure_type == \
DataStructureType.ndarray and len(data) == \
self.layer_storage.get_total_size():
self.layered = True
# loop through list and try to store each list entry as a layer
success = True
for layer_num, layer_data in enumerate(data):
if not isinstance(layer_data, list) and \
not isinstance(layer_data, dict) and \
not isinstance(layer_data, np.ndarray):
layer_data = [layer_data]
layer_index = self.layer_storage.nth_index(layer_num)
success = success and self._set_array_layer(layer_data,
layer_index,
multiplier,
key)
if not success:
message = 'Unable to set data "{}" layer {}. Data is not ' \
'in a valid format' \
'.'.format(self.data_dimensions.structure.name,
layer_num)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path, 'setting array data',
self.data_dimensions.structure.name, inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
elif layer is None:
self.layered = False
self.layer_storage.list_shape = (1,)
def _set_array_layer(self, data, layer, multiplier, key):
# look for a single constant value
data_type = self.data_dimensions.structure.\
get_datum_type(return_enum_type=True)
if not isinstance(data, dict) and len(data) == 1 and \
self._is_type(data[0], data_type):
# store data as const
self.store_internal(data, layer, True, multiplier, key=key)
return True
# look for internal and open/close data
if isinstance(data, dict):
if 'data' in data:
if isinstance(data['data'], int) or \
isinstance(data['data'], float) or \
isinstance(data['data'], str):
# data should always in in a list/array
data['data'] = [data['data']]
if 'filename' in data:
self.process_open_close_line(data, layer)
return True
elif 'data' in data:
multiplier, iprn, flags_found = \
self.process_internal_line(data)
if len(data['data']) == 1:
# merge multiplier with single value and make constant
if DatumUtil.is_float(multiplier):
mult = 1.0
else:
mult = 1
self.store_internal([data['data'][0] * multiplier], layer,
True, [mult], key=key,
print_format=iprn)
else:
self.store_internal(data['data'], layer, False,
[multiplier], key=key,
print_format=iprn)
return True
elif isinstance(data[0], str):
if data[0].lower() == 'internal':
multiplier, iprn, \
flags_found = self.process_internal_line(data)
self.store_internal(data[-1], layer, False, [multiplier],
key=key, print_format=iprn)
return True
elif data[0].lower() != 'open/close':
# assume open/close is just omitted, though test data file to
# be sure
new_data = data[:]
new_data.insert(0, 'open/close')
else:
new_data = data[:]
multiplier, iprn, binary = self.process_open_close_line(new_data,
layer,
False)
model_name = \
self.data_dimensions.package_dim.model_dim[0].model_name
resolved_path = \
self._simulation_data.mfpath.resolve_path(new_data[1],
model_name)
if self._verify_data(FileIter(resolved_path), layer):
# store location to file
self.store_external(new_data[1], layer, [multiplier],
print_format=iprn, binary=binary,
do_not_verify=True)
return True
# try to resolve as internal array
layer_storage = self.layer_storage[self._resolve_layer(layer)]
if not (layer_storage.data_storage_type ==
DataStorageType.internal_constant and
ArrayUtil.has_one_item(data)) and \
self._verify_data(MultiListIter(data), layer):
# store data as is
self.store_internal(data, layer, False, multiplier, key=key)
return True
return False
def get_active_layer_indices(self):
layer_index = []
for index in self.layer_storage.indexes():
if self.layer_storage[index].fname is not None or \
self.layer_storage[index].internal_data is not None:
layer_index.append(index)
return layer_index
def get_external(self, layer=None):
if not (layer is None or self.layer_storage.in_shape(layer)):
message = 'Can not get external data for layer "{}"' \
'.'.format(layer)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'getting external data',
self.data_dimensions.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
def store_internal(self, data, layer=None, const=False, multiplier=[1.0],
key=None, autofill=False,
print_format=None):
if self.data_structure_type == DataStructureType.recarray:
if self.layer_storage.first_item().data_storage_type == \
DataStorageType.internal_constant:
self.layer_storage.first_item().data_const_value = data
else:
self.layer_storage.first_item().data_storage_type = \
DataStorageType.internal_array
if data is None or isinstance(data, np.recarray):
self._verify_list(data)
self.layer_storage.first_item().internal_data = data
else:
if autofill and data is not None:
if isinstance(data, tuple) and isinstance(data[0],
tuple):
# convert to list of tuples
data = list(data)
if not isinstance(data, list):
# put data in a list format for recarray
data = [(data,)]
# auto-fill tagged keyword
structure = self.data_dimensions.structure
data_item_structs = structure.data_item_structures
if data_item_structs[0].tagged and not \
data_item_structs[0].type == DatumType.keyword:
for data_index, data_entry in enumerate(data):
if (data_item_structs[0].type ==
DatumType.string and
data_entry[0].lower() ==
data_item_structs[0].name.lower()):
break
data[data_index] = \
(data_item_structs[0].name.lower(),) \
+ data[data_index]
if data is None:
self.set_data(None)
else:
self.build_type_list(data=data, key=key)
if autofill and data is not None:
# resolve any fields with data types that do not
# agree with the expected type list
self._resolve_multitype_fields(data)
if isinstance(data, list):
# data needs to be stored as tuples within a list.
# if this is not the case try to fix it
self._tupleize_data(data)
# add placeholders to data so it agrees with
# expected dimensions of recarray
self._add_placeholders(data)
self._verify_list(data)
try:
new_data = np.rec.array(data,
self._recarray_type_list)
except:
data_expected = []
for data_type in self._recarray_type_list:
data_expected.append('<{}>'.format(
data_type[0]))
message = 'An error occurred when storing data ' \
'"{}" in a recarray. {} data is a one ' \
'or two dimensional list containing ' \
'the variables "{}" (some variables ' \
'may be optional, see MF6 ' \
'documentation), but data "{}" was ' \
'supplied.'.format(
self.data_dimensions.structure.name,
self.data_dimensions.structure.name,
' '.join(data_expected), data)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'setting array data',
self.data_dimensions.structure.name,
inspect.stack()[0][3], type_, value_,
traceback_, message,
self._simulation_data.debug)
self.set_data(new_data)
elif self.data_structure_type == DataStructureType.scalar:
self.layer_storage.first_item().internal_data = data
else:
layer, multiplier = self._store_prep(layer, multiplier)
dimensions = self.get_data_dimensions(layer)
if const:
self.layer_storage[layer].data_storage_type = \
DataStorageType.internal_constant
self.layer_storage[layer].data_const_value = \
[mfdatautil.get_first_val(data)]
else:
self.layer_storage[layer].data_storage_type = \
DataStorageType.internal_array
try:
self.layer_storage[layer].internal_data = \
np.reshape(data, dimensions)
except:
message = 'An error occurred when reshaping data ' \
'"{}" to store. Expected data ' \
'dimensions: ' \
'{}'.format(self.data_dimensions.structure.name,
dimensions)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'setting array data', self.data_dimensions.
structure.name, inspect.stack()[0][3], type_,
value_, traceback_, message,
self._simulation_data.debug)
self.layer_storage[layer].factor = multiplier
self.layer_storage[layer].iprn = print_format
def _resolve_multitype_fields(self, data):
# find any data fields where the data is not a consistent type
itype_len = len(self._recarray_type_list)
for data_entry in data:
for index, data_val in enumerate(data_entry):
if index < itype_len and \
self._recarray_type_list[index][1] != object and \
type(data_val) != self._recarray_type_list[index][1] \
and (type(data_val) != int or
self._recarray_type_list[index][1] != float):
# for inconsistent types use generic object type
self._recarray_type_list[index] = \
(self._recarray_type_list[index][0], object)
def store_external(self, file_path, layer=None, multiplier=[1.0],
print_format=None, data=None, do_not_verify=False,
binary=False):
layer, multiplier = self._store_prep(layer, multiplier)
if data is not None:
if self.data_structure_type == DataStructureType.recarray:
# store data internally first so that a file entry can be generated
self.store_internal(data, layer, False, [multiplier], None,
False, print_format)
ext_file_entry = self._get_file_entry()
# create external file and write file entry to the file
data_dim = self.data_dimensions
model_name = data_dim.package_dim.model_dim[0].model_name
fp = self._simulation_data.mfpath.resolve_path(file_path,
model_name)
try:
fd = open(fp, 'w')
except:
message = 'Unable to open file {}. Make sure the file ' \
'is not locked and the folder exists' \
'.'.format(fp)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'opening external file for writing',
data_dim.structure.name, inspect.stack()[0][3], type_,
value_, traceback_, message,
self._simulation_data.debug)
fd.write(ext_file_entry)
fd.close()
# set as external data
self.layer_storage.first_item().internal_data = None
else:
# store data externally in file
data_size = self._get_data_size(layer)
current_size = 0
data_dim = self.data_dimensions
data_type = data_dim.structure.data_item_structures[0].type
model_name = data_dim.package_dim.model_dim[0].model_name
fp = self._simulation_data.mfpath.resolve_path(file_path,
model_name)
try:
fd = open(fp, 'w')
except:
message = 'Unable to open file {}. Make sure the file ' \
'is not locked and the folder exists' \
'.'.format(fp)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'opening external file for writing',
data_dim.structure.name, inspect.stack()[0][3], type_,
value_, traceback_, message,
self._simulation_data.debug)
for data_item in MultiListIter(data, True):
if data_item[2] and current_size > 0:
# new list/dimension, add appropriate formatting to
# the file
fd.write('\n')
fd.write('{} '.format(self.to_string(data_item[0],
data_type)))
current_size += 1
if current_size != data_size:
message = 'Not enough data for "{}" provided for file' \
' {}. Expected data size is {}, actual data ' \
'size is' \
'{}.'.format(data_dim.structure.path, fd.name,
data_size, current_size)
type_, value_, traceback_ = sys.exc_info()
fd.close()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'storing external data', data_dim.structure.name,
inspect.stack()[0][3], type_, value_, traceback_,
message, self._simulation_data.debug)
fd.close()
self.layer_storage[layer].factor = multiplier
self.layer_storage[layer].internal_data = None
else:
if self.data_structure_type == DataStructureType.recarray:
self.layer_storage.first_item().internal_data = None
else:
self.layer_storage[layer].factor = multiplier
self.layer_storage[layer].internal_data = None
# point to the external file and set flags
self.layer_storage[layer].fname = file_path
self.layer_storage[layer].iprn = print_format
self.layer_storage[layer].binary = binary
self.layer_storage[layer].data_storage_type = \
DataStorageType.external_file
def external_to_external(self, new_external_file, multiplier=None,
layer=None):
# currently only support files containing ndarrays
if not (self.data_structure_type == DataStructureType.ndarray):
message = 'Can not copy external file of type "{}". Only ' \
'files containing ndarrays currently supported' \
'.'.format(self.data_structure_type)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'copy external file',
self.data_dimensions.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
if not ((layer is None and self.layer_storage.get_total_size() == 1) or
(layer is not None and self.layer_storage.in_shape(layer))):
if layer is None:
message = 'When no layer is supplied the data must contain ' \
'only one layer. Data contains {} layers' \
'.' .format(self.layer_storage.get_total_size())
else:
message = 'layer "{}" is not a valid layer'.format(layer)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'copy external file',
self.data_dimensions.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
# get data storage
if layer is None:
layer = 1
if self.layer_storage[layer].fname is None:
message = 'No file name exists for layer {}.'.format(layer)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'copy external file',
self.data_dimensions.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
# copy file to new location
copyfile(self.layer_storage[layer].fname, new_external_file)
# update
self.store_external(new_external_file, layer,
[self.layer_storage[layer].factor],
self.layer_storage[layer].iprn,
binary=self.layer_storage[layer].binary)
def external_to_internal(self, layer=None, store_internal=False):
# currently only support files containing ndarrays
if self.data_structure_type != DataStructureType.ndarray:
path = self.data_dimensions.structure.path
message= 'Can not convert {} to internal data. External to ' \
'internal file operations currently only supported ' \
'for ndarrays.'.format(path[-1])
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'opening external file for writing',
self.data_dimensions.structure.name, inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
if layer is None:
data_out = self._build_full_data(store_internal)
else:
# load data from external file
data_out, current_size = self.read_data_from_file(layer)
if self.layer_storage[layer].factor is not None:
data_out = data_out * self.layer_storage[layer].factor
if store_internal:
self.store_internal(data_out, layer)
return data_out
def internal_to_external(self, new_external_file, multiplier=None,
layer=None, print_format=None):
if layer is None:
self.store_external(new_external_file, layer, multiplier,
print_format,
self.layer_storage.first_item().internal_data)
else:
self.store_external(new_external_file, layer, multiplier,
print_format,
self.layer_storage[layer].internal_data)
def read_data_from_file(self, layer, fd=None, multiplier=None,
print_format=None, data_item=None):
if multiplier is not None:
self.layer_storage[layer].factor = multiplier
if print_format is not None:
self.layer_storage[layer].iprn = print_format
data_size = self._get_data_size(layer)
# load variable data from file
current_size = 0
data_out = []
if layer is None:
layer = 0
close_file = False
if fd is None:
close_file = True
model_dim = self.data_dimensions.package_dim.model_dim[0]
read_file = self._simulation_data.mfpath.resolve_path(
self.layer_storage[layer].fname, model_dim.model_name)
try:
fd = open(read_file, 'r')
except:
message = 'Unable to open file {}. Make sure the file ' \
'is not locked and the folder exists' \
'.'.format(read_file)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'opening external file for writing',
self.data_dimensions.structure.name, inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
line = ' '
ArrayUtil.reset_delimiter_used()
while line != '':
line = fd.readline()
arr_line = ArrayUtil.split_data_line(line, True)
for data in arr_line:
if data != '':
if current_size == data_size:
if self._simulation_data.verbosity_level.value >= \
VerbosityLevel.normal.value:
path = self.data_dimensions.structure.path
print('WARNING: More data found than expected in '
'file {} for data '
'"{}".'.format(fd.name,
path))
break
data_out.append(self.convert_data(data, self._data_type,
data_item))
current_size += 1
if current_size == data_size:
break
if current_size != data_size:
message = 'Not enough data in file {} for data "{}". ' \
'Expected data size {} but only found ' \
'{}.'.format(fd.name,
self.data_dimensions.structure.name,
data_size, current_size)
type_, value_, traceback_ = sys.exc_info()
if close_file:
fd.close()
raise MFDataException(self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'reading data file',
self.data_dimensions.structure.name,
inspect.stack()[0][3], type_, value_,
traceback_, message,
self._simulation_data.debug)
if close_file:
fd.close()
dimensions = self.get_data_dimensions(layer)
data_out = np.reshape(data_out, dimensions)
return data_out, current_size
def to_string(self, val, type, is_cellid=False, possible_cellid=False,
data_item=None):
if type == DatumType.double_precision:
if data_item is not None and data_item.support_negative_index:
if val > 0:
return (str(int(val + 1)))
elif val == 0.0:
if struct.pack('>d', val) == \
b'\x80\x00\x00\x00\x00\x00\x00\x00':
# value is negative zero
return (str(int(val - 1)))
else:
# value is positive zero
return (str(int(val + 1)))
else:
return (str(int(val - 1)))
else:
try:
abs_val = abs(val)
except TypeError:
return str(val)
if (abs_val > self._simulation_data._sci_note_upper_thres or
abs_val < self._simulation_data._sci_note_lower_thres) \
and abs_val != 0:
return self._simulation_data.reg_format_str.format(val)
else:
return self._simulation_data.sci_format_str.format(val)
elif is_cellid or (possible_cellid and isinstance(val, tuple)):
if len(val) > 0 and val[0] == 'none':
# handle case that cellid is 'none'
return val[0]
if is_cellid and \
self.data_dimensions.get_model_dim(None).model_name is not \
None:
model_grid = self.data_dimensions.get_model_grid()
cellid_size = model_grid.get_num_spatial_coordinates()
if len(val) != cellid_size:
message = 'Cellid "{}" contains {} integer(s). Expected a' \
' cellid containing {} integer(s) for grid type' \
' {}.'.format(val, len(val), cellid_size,
str(model_grid.grid_type()))
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'converting cellid to string',
self.data_dimensions.structure.name, inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
string_val = []
for item in val:
string_val.append(str(item + 1))
return ' '.join(string_val)
elif type == DatumType.integer:
if data_item is not None and data_item.numeric_index:
if isinstance(val, str):
return str(int(val) + 1)
else:
return str(val+1)
return str(val)
elif type == DatumType.string:
try:
arr_val = val.split()
except AttributeError:
return str(val)
if len(arr_val) > 1:
# quote any string with spaces
string_val = "'{}'".format(val)
if data_item is not None and data_item.ucase:
return string_val.upper()
else:
return string_val
if data_item is not None and data_item.ucase:
return str(val).upper()
else:
return str(val)
def process_internal_line(self, arr_line):
internal_modifiers_found = False
if self._data_type == DatumType.integer:
multiplier = 1
else:
multiplier = 1.0
print_format = None
if isinstance(arr_line, list):
if len(arr_line) < 2:
message = 'Data array "{}" contains an INTERNAL ' \
'that is not followed by a multiplier in line ' \
'"{}".'.format(self.data_dimensions.structure.name,
' '.join(arr_line))
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'processing internal data header',
self.data_dimensions.structure.name,
inspect.stack()[0][3], type_, value_,
traceback_, message,
self._simulation_data.debug)
index = 1
while index < len(arr_line):
if isinstance(arr_line[index], str):
if arr_line[index].lower() == 'factor' and \
index + 1 < len(arr_line):
multiplier = self.convert_data(arr_line[index+1],
self._data_type)
internal_modifiers_found = True
index += 2
elif arr_line[index].lower() == 'iprn' and \
index + 1 < len(arr_line):
print_format = arr_line[index+1]
index += 2
internal_modifiers_found = True
else:
break
else:
break
elif isinstance(arr_line, dict):
for key, value in arr_line.items():
if key.lower() == 'factor':
multiplier = self.convert_data(value, self._data_type)
internal_modifiers_found = True
if key.lower() == 'iprn':
print_format = value
internal_modifiers_found = True
return multiplier, print_format, internal_modifiers_found
def process_open_close_line(self, arr_line, layer, store=True):
# process open/close line
index = 2
if self._data_type == DatumType.integer:
multiplier = 1
else:
multiplier = 1.0
print_format = None
binary = False
data_file = None
data = None
data_dim = self.data_dimensions
if isinstance(arr_line, list):
if len(arr_line) < 2 and store:
message = 'Data array "{}" contains a OPEN/CLOSE ' \
'that is not followed by a file. ' \
'{}'.format(data_dim.structure.name,
data_dim.structure.path)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'processing open/close line', data_dim.structure.name,
inspect.stack()[0][3], type_, value_, traceback_, message,
self._simulation_data.debug)
while index < len(arr_line):
if isinstance(arr_line[index], str):
if arr_line[index].lower() == 'factor' and \
index + 1 < len(arr_line):
try:
multiplier = self.convert_data(arr_line[index+1],
self._data_type)
except Exception as ex:
message = 'Data array {} contains an OPEN/CLOSE ' \
'with an invalid multiplier following ' \
'the "factor" keyword.' \
'.'.format(data_dim.structure.name)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'processing open/close line',
data_dim.structure.name, inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug, ex)
index += 2
elif arr_line[index].lower() == 'iprn' and \
index + 1 < len(arr_line):
print_format = arr_line[index+1]
index += 2
elif arr_line[index].lower() == 'data' and \
index + 1 < len(arr_line):
data = arr_line[index+1]
index += 2
elif arr_line[index].lower() == 'binary':
binary = True
index += 1
else:
break
else:
break
# save comments
if index < len(arr_line):
self.layer_storage[layer].comments = MFComment(
' '.join(arr_line[index:]),
self.data_dimensions.structure.path,
self._simulation_data, layer)
if arr_line[0].lower() == 'open/close':
data_file = arr_line[1]
else:
data_file = arr_line[0]
elif isinstance(arr_line, dict):
for key, value in arr_line.items():
if key.lower() == 'factor':
try:
multiplier = self.convert_data(value, self._data_type)
except Exception as ex:
message = 'Data array {} contains an OPEN/CLOSE ' \
'with an invalid multiplier following the ' \
'"factor" keyword.' \
'.'.format(data_dim.structure.name)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'processing open/close line',
data_dim.structure.name, inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug, ex)
if key.lower() == 'iprn':
print_format = value
if key.lower() == 'binary':
binary = bool(value)
if key.lower() == 'data':
data = value
if 'filename' in arr_line:
data_file = arr_line['filename']
if data_file is None:
message = 'Data array {} contains an OPEN/CLOSE without a ' \
'fname (file name) specified' \
'.'.format(data_dim.structure.name)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'processing open/close line',
data_dim.structure.name,
inspect.stack()[0][3], type_, value_,
traceback_, message,
self._simulation_data.debug)
if store:
# store external info
self.store_external(data_file, layer, [multiplier], print_format,
binary=binary, data=data)
# add to active list of external files
model_name = data_dim.package_dim.model_dim[0].model_name
self._simulation_data.mfpath.add_ext_file(data_file, model_name)
return multiplier, print_format, binary
def _tupleize_data(self, data):
for index, data_line in enumerate(data):
if type(data_line) != tuple:
if type(data_line) == list:
data[index] = tuple(data_line)
else:
data[index] = (data_line,)
def _verify_list(self, data):
if data is not None:
for data_line in data:
data_line_len = len(data_line)
for index in range(0, min(data_line_len,
len(self._recarray_type_list))):
if self._recarray_type_list[index][0] == 'cellid' and \
self.data_dimensions.get_model_dim(None).model_name\
is not None and data_line[index] is not None:
# this is a cell id. verify that it contains the
# correct number of integers
model_grid = self.data_dimensions.get_model_grid()
cellid_size = model_grid.get_num_spatial_coordinates()
if len(data_line[index]) != cellid_size:
message = 'Cellid "{}" contains {} integer(s). ' \
'Expected a cellid containing {} ' \
'integer(s) for grid type' \
' {}.'.format(data_line[index],
len(data_line[index]),
cellid_size,
str(
model_grid.grid_type()))
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'verifying cellid',
self.data_dimensions.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
def _add_placeholders(self, data):
idx = 0
for data_line in data:
data_line_len = len(data_line)
if data_line_len < len(self._recarray_type_list):
for index in range(data_line_len,
len(self._recarray_type_list)):
if self._recarray_type_list[index][1] == int:
self._recarray_type_list[index] = \
(self._recarray_type_list[index][0], object)
data_line += (None,)
elif self._recarray_type_list[index][1] == float:
data_line += (np.nan,)
else:
data_line += (None,)
data[idx] = data_line
idx += 1
def _duplicate_last_item(self):
last_item = self._recarray_type_list[-1]
arr_item_name = last_item[0].split('_')
if DatumUtil.is_int(arr_item_name[-1]):
new_item_num = int(arr_item_name[-1]) + 1
new_item_name = '_'.join(arr_item_name[0:-1])
new_item_name = '{}_{}'.format(new_item_name, new_item_num)
else:
new_item_name = '{}_1'.format(last_item[0])
self._recarray_type_list.append((new_item_name, last_item[1]))
def _build_full_data(self, apply_multiplier=False):
if self.data_structure_type == DataStructureType.scalar:
return self.layer_storage.first_item().internal_data
dimensions = self.get_data_dimensions(None)
if dimensions[0] < 0:
return None
full_data = np.full(dimensions, np.nan,
self.data_dimensions.structure.get_datum_type(True)
)
if not self.layered:
layers_to_process = [0]
else:
layers_to_process = self.layer_storage.indexes()
for layer in layers_to_process:
if self.layer_storage[layer].factor is not None and \
apply_multiplier:
mult = self.layer_storage[layer].factor
elif self._data_type == DatumType.integer:
mult = 1
else:
mult = 1.0
if self.layer_storage[layer].data_storage_type == \
DataStorageType.internal_array:
if len(self.layer_storage[layer].internal_data) > 0 and \
self.layer_storage[layer].internal_data[0] is None:
return None
if self.layer_storage.get_total_size() == 1 or \
not self.layered:
full_data = self.layer_storage[layer].internal_data * mult
else:
full_data[layer] = \
self.layer_storage[layer].internal_data * mult
elif self.layer_storage[layer].data_storage_type == \
DataStorageType.internal_constant:
if self.layer_storage.get_total_size() == 1 or \
not self.layered:
full_data = self._fill_const_layer(layer) * mult
else:
full_data[layer] = self._fill_const_layer(layer) * mult
else:
if self.layer_storage.get_total_size() == 1 or \
not self.layered:
full_data = self.read_data_from_file(layer)[0] * mult
else:
full_data[layer] = self.read_data_from_file(layer)[0]*mult
return full_data
def _resolve_layer(self, layer):
if layer is None:
return self.layer_storage.first_index()
else:
return layer
def _verify_data(self, data_iter, layer):
# get expected size
data_dimensions = self.get_data_dimensions(layer)
# get expected data types
if self.data_dimensions.structure.type == DatumType.recarray or \
self.data_dimensions.structure.type == DatumType.record:
data_types = self.data_dimensions.structure.\
get_data_item_types(return_enum_type=True)
# check to see if data contains the correct types and is a possibly
# correct size
record_loc = 0
actual_data_size = 0
rows_of_data = 0
for data_item in data_iter:
if self._is_type(data_item, data_types[2][record_loc]):
actual_data_size += 1
if record_loc == len(data_types[0]) - 1:
record_loc = 0
rows_of_data += 1
else:
record_loc += 1
return rows_of_data > 0 and (rows_of_data < data_dimensions[0] or
data_dimensions[0] == -1)
else:
expected_data_size = 1
for dimension in data_dimensions:
if dimension > 0:
expected_data_size = expected_data_size * dimension
data_type = self.data_dimensions.structure.\
get_datum_type(return_enum_type=True)
# check to see if data can fit dimensions
actual_data_size = 0
for data_item in data_iter:
if self._is_type(data_item, data_type):
actual_data_size += 1
if actual_data_size >= expected_data_size:
return True
return False
def _fill_const_layer(self, layer):
data_dimensions = self.get_data_dimensions(layer)
if data_dimensions[0] < 0:
return self.layer_storage[layer].data_const_value
else:
data_iter = ConstIter(self.layer_storage[layer].data_const_value)
return self._fill_dimensions(data_iter, data_dimensions)
def _is_type(self, data_item, data_type):
if data_type == DatumType.string or data_type == DatumType.keyword:
return True
elif data_type == DatumType.integer:
return DatumUtil.is_int(data_item)
elif data_type == DatumType.double_precision:
return DatumUtil.is_float(data_item)
elif data_type == DatumType.keystring:
# TODO: support keystring type
if self._simulation_data.verbosity_level.value >= \
VerbosityLevel.normal.value:
print('Keystring type currently not supported.')
return True
else:
if self._simulation_data.verbosity_level.value >= \
VerbosityLevel.normal.value:
print('{} type checking currently not supported'.format(data_type))
return True
def _fill_dimensions(self, data_iter, dimensions):
if self.data_structure_type == DataStructureType.ndarray:
# initialize array
data_array = np.ndarray(shape=dimensions, dtype=float)
# fill array
for index in ArrayIndexIter(dimensions):
data_array.itemset(index, data_iter.__next__()[0])
return data_array
elif self.data_structure_type == DataStructureType.scalar:
return data_iter.__next__()
else:
data_array = None
data_line = ()
# fill array
array_index_iter = ArrayIndexIter(dimensions)
current_col = 0
for index in array_index_iter:
data_line += (index,)
if current_col == dimensions[1] - 1:
try:
if data_array is None:
data_array = np.rec.array(data_line,
self._recarray_type_list)
else:
rec_array = np.rec.array(data_line,
self._recarray_type_list)
data_array = np.hstack((data_array,
rec_array))
except:
message = 'An error occurred when storing data ' \
'"{}" in a recarray. Data line being ' \
'stored: {}'.format(
self.data_dimensions.structure.name,
data_line)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'processing open/close line',
dimensions.structure.name, inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
current_col = 0
data_line = ()
data_array[index] = data_iter.next()
return data_array
def resolve_data_size(self, index):
# Resolves the size of a given data element based on the names in the
# existing rec_array. Assumes repeating data element names follow the
# format <data_element_name>_X
if self.data_structure_type != DataStructureType.recarray:
message = 'Data structure type is {}. Data structure type must ' \
'be recarray.'.format(self.data_structure_type)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'resolving data size',
self.data_dimensions.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
if len(self.layer_storage.first_item().internal_data[0]) <= index:
return 0
label = self.layer_storage.first_item().\
internal_data.dtype.names[index]
label_list = label.split('_')
if len(label_list) == 1:
return 1
internal_data = self.layer_storage.first_item().internal_data
for forward_index in range(index+1, len(internal_data.dtype.names)):
forward_label = internal_data.dtype.names[forward_index]
forward_label_list = forward_label.split('_')
if forward_label_list[0] != label_list[0]:
return forward_index - index
return len(internal_data.dtype.names) - index
def build_type_list(self, data_set=None, data=None,
resolve_data_shape=True, key=None,
nseg=None):
if data_set is None:
self._recarray_type_list = []
data_set = self.data_dimensions.structure
initial_keyword = True
package_dim = self.data_dimensions.package_dim
for data_item, index in zip(data_set.data_item_structures,
range(0,
len(data_set.data_item_structures))):
# handle optional mnames
if not data_item.optional or len(data_item.name) < 5 or \
data_item.name.lower()[0:5] != 'mname' \
or not self.in_model:
overrides = self._data_type_overrides
if len(self._recarray_type_list) in overrides:
data_type = overrides[len(self._recarray_type_list)]
elif isinstance(data_item, MFDataItemStructure):
data_type = data_item.get_rec_type()
else:
data_type = None
if data_item.name.lower() == 'aux' and resolve_data_shape:
aux_var_names = package_dim.get_aux_variables()
if aux_var_names is not None:
for aux_var_name in aux_var_names[0]:
if aux_var_name.lower() != 'auxiliary':
self._recarray_type_list.append((aux_var_name,
data_type))
elif data_item.type == DatumType.record:
# record within a record, recurse
self.build_type_list(data_item, True, data)
elif data_item.type == DatumType.keystring:
self._recarray_type_list.append((data_item.name,
data_type))
# add potential data after keystring to type list
ks_data_item = deepcopy(data_item)
ks_data_item.type = DatumType.string
ks_data_item.name = '{}_data'.format(ks_data_item.name)
ks_rec_type = ks_data_item.get_rec_type()
self._recarray_type_list.append((ks_data_item.name,
ks_rec_type))
if index == len(data_set.data_item_structures) - 1:
idx = 1
data_line_max_size = self._get_max_data_line_size(data)
while data is not None and \
len(self._recarray_type_list) < \
data_line_max_size:
# keystrings at the end of a line can contain items
# of variable length. assume everything at the
# end of the data line is related to the last
# keystring
self._recarray_type_list.append(
('{}_{}'.format(ks_data_item.name, idx),
ks_rec_type))
idx += 1
elif data_item.name != 'boundname' or \
self.data_dimensions.package_dim.boundnames():
# don't include initial keywords
if data_item.type != DatumType.keyword or \
initial_keyword == \
False or data_set.block_variable == True:
initial_keyword = False
shape_rule = None
if data_item.tagged:
if data_item.type != DatumType.string and \
data_item.type != DatumType.keyword:
self._recarray_type_list.append(
('{}_label'.format(data_item.name),
object))
if nseg is not None and len(data_item.shape) > 0 and \
isinstance(data_item.shape[0], str) and \
data_item.shape[0][0:4] == 'nseg':
# nseg explicitly specified. resolve any formula
# nseg is in
model_dim = \
self.data_dimensions.get_model_dim(None)
expression_array = \
model_dim.build_shape_expression(data_item.
shape)
if isinstance(expression_array, list) and \
len(expression_array) == 1:
exp = expression_array[0]
resolved_shape = \
[model_dim.resolve_exp(exp, nseg)]
else:
resolved_shape = [1]
else:
if resolve_data_shape:
data_dim = self.data_dimensions
resolved_shape, shape_rule = \
data_dim.get_data_shape(data_item,
data_set,
data, key)
else:
resolved_shape = [1]
if not resolved_shape or len(resolved_shape) == 0 or \
resolved_shape[0] == -1:
# could not resolve shape
resolved_shape = [1]
elif resolved_shape[0] == -9999 or \
shape_rule is not None:
if data is not None:
# shape is an indeterminate 1-d array and
# should consume the remainder of the data
max_s = ArrayUtil.max_multi_dim_list_size(data)
resolved_shape[0] = \
max_s - len(self._recarray_type_list)
else:
# shape is indeterminate 1-d array and no data
# provided to resolve
resolved_shape[0] = 1
if data_item.is_cellid:
if data_item.shape is not None and \
len(data_item.shape) > 0 and \
data_item.shape[0] == 'ncelldim':
# A cellid is a single entry (tuple) in the
# recarray. Adjust dimensions accordingly.
data_dim = self.data_dimensions
model_grid = data_dim.get_model_grid()
size = model_grid.get_num_spatial_coordinates()
data_item.remove_cellid(resolved_shape,
size)
for index in range(0, resolved_shape[0]):
if resolved_shape[0] > 1:
# type list fields must have unique names
self._recarray_type_list.append(
('{}_{}'.format(data_item.name,
index), data_type))
else:
self._recarray_type_list.append(
(data_item.name, data_type))
return self._recarray_type_list
@staticmethod
def _get_max_data_line_size(data):
max_size = 0
if data is not None:
for index in range(0, len(data)):
if len(data[index]) > max_size:
max_size = len(data[index])
return max_size
def get_data_dimensions(self, layer):
data_dimensions, shape_rule = self.data_dimensions.get_data_shape()
if layer is not None and self.layer_storage.get_total_size() > 1:
# remove all "layer" dimensions from the list
layer_dims = self.data_dimensions.structure.\
data_item_structures[0].layer_dims
data_dimensions = data_dimensions[len(layer_dims):]
return data_dimensions
def _store_prep(self, layer, multiplier):
if not (layer is None or self.layer_storage.in_shape(layer)):
message = 'Layer {} is not a valid layer.'.format(layer)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'storing data',
self.data_dimensions.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
if layer is None:
# layer is none means the data provided is for all layers or this
# is not layered data
layer = (0,)
self.layer_storage.list_shape = (1,)
self.layer_storage.multi_dim_list = [
self.layer_storage.first_item()]
mult_ml = MultiList(multiplier)
if not mult_ml.in_shape(layer):
if multiplier[0] is None:
multiplier = 1.0
else:
multiplier = multiplier[0]
else:
if mult_ml.first_item() is None:
multiplier = 1.0
else:
multiplier = mult_ml.first_item()
return layer, multiplier
def _get_data_size(self, layer):
dimensions = self.get_data_dimensions(layer)
data_size = 1
for dimension in dimensions:
data_size = data_size * dimension
return data_size
def convert_data(self, data, type, data_item=None):
if type == DatumType.double_precision:
if data_item is not None and data_item.support_negative_index:
val = int(ArrayUtil.clean_numeric(data))
if val == -1:
return -0.0
elif val == 1:
return 0.0
elif val < 0:
val += 1
else:
val -= 1
try:
return float(val)
except (ValueError, TypeError):
message = 'Data "{}" with value "{}" can ' \
'not be converted to float' \
'.'.format(self.data_dimensions.structure.name,
data)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path, 'converting data',
self.data_dimensions.structure.name,
inspect.stack()[0][3], type_, value_, traceback_,
message, self._simulation_data.debug)
else:
try:
if isinstance(data, str):
# fix any scientific formatting that python can't handle
data = data.replace('d', 'e')
return float(data)
except (ValueError, TypeError):
try:
return float(ArrayUtil.clean_numeric(data))
except (ValueError, TypeError):
message = 'Data "{}" with value "{}" can ' \
'not be converted to float' \
'.'.format(self.data_dimensions.structure.
name,
data)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path,
'converting data',
self.data_dimensions.structure.name,
inspect.stack()[0][3], type_, value_,
traceback_, message, self._simulation_data.debug)
elif type == DatumType.integer:
if data_item is not None and data_item.numeric_index:
return int(ArrayUtil.clean_numeric(data)) - 1
try:
return int(data)
except (ValueError, TypeError):
try:
return int(ArrayUtil.clean_numeric(data))
except (ValueError, TypeError):
message = 'Data "{}" with value "{}" can not be ' \
'converted to int' \
'.'.format(self.data_dimensions.structure.name,
data)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.data_dimensions.structure.get_model(),
self.data_dimensions.structure.get_package(),
self.data_dimensions.structure.path, 'converting data',
self.data_dimensions.structure.name,
inspect.stack()[0][3], type_, value_, traceback_,
message, self._simulation_data.debug)
elif type == DatumType.string and data is not None:
if data_item is None or not data_item.preserve_case:
# keep strings lower case
return data.lower()
return data
class MFTransient(object):
"""
Parent class for transient data. This class contains internal objects and
methods that most end users will not need to access directly.
Parameters
----------
*args, **kwargs
Parameters present to support multiple child class interfaces
Attributes
----------
_current_key : str
current key defining specific transient dataset to be accessed
_data_storage : dict
dictionary of DataStorage objects
Methods
-------
add_transient_key(transient_key)
verifies the validity of the transient key about to be added
get_data_prep(transient_key)
called prior to the child class getting data. ensures that the data
retrieved will come from the dataset of a specific transient_key
_set_data_prep(transient_key)
called prior to the child class setting data. ensures that the data
set will go to the dataset of a specific transient_key
_get_file_entry_prep(transient_key)
called prior to the child class getting the file entry. ensures that
the file entry only reflects the data from a specific transient_key
_load_prep(first_line, file_handle, block_header, pre_data_comments)
called prior to the child class loading data from a file. figures out
what transient_key to store the data under
_append_list_as_record_prep(record, transient_key)
called prior to the child class appending a list to a record. ensures
that the list gets appended to the record associated with the key
transient_key
_update_record_prep(transient_key)
called prior to the child class updating a record. ensures that the
record being updated is the one associated with the key transient_key
get_active_key_list() : list
returns a list of the active transient keys
_verify_sp(sp_num) : bool
returns true of the stress period sp_num is within the expected range
of stress periods for this model
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(self, *args, **kwargs):
self._current_key = None
self._data_storage = None
def add_transient_key(self, transient_key):
if isinstance(transient_key, int):
self._verify_sp(transient_key)
def update_transient_key(self, old_transient_key, new_transient_key):
if old_transient_key in self._data_storage:
# replace dictionary key
self._data_storage[new_transient_key] = \
self._data_storage[old_transient_key]
del self._data_storage[old_transient_key]
if self._current_key == old_transient_key:
# update current key
self._current_key = new_transient_key
def _transient_setup(self, data_storage):
self._data_storage = data_storage
def get_data_prep(self, transient_key=0):
if isinstance(transient_key, int):
self._verify_sp(transient_key)
self._current_key = transient_key
if transient_key not in self._data_storage:
self.add_transient_key(transient_key)
def _set_data_prep(self, data, transient_key=0):
if isinstance(transient_key, int):
self._verify_sp(transient_key)
if isinstance(transient_key, tuple):
self._current_key = transient_key[0]
else:
self._current_key = transient_key
if self._current_key not in self._data_storage:
self.add_transient_key(self._current_key)
def _get_file_entry_prep(self, transient_key=0):
if isinstance(transient_key, int):
self._verify_sp(transient_key)
self._current_key = transient_key
def _load_prep(self, block_header):
# transient key is first non-keyword block variable
transient_key = block_header.get_transient_key()
if isinstance(transient_key, int):
if not self._verify_sp(transient_key):
message = 'Invalid transient key "{}" in block' \
' "{}"'.format(transient_key, block_header.name)
raise MFInvalidTransientBlockHeaderException(message)
if transient_key not in self._data_storage:
self.add_transient_key(transient_key)
self._current_key = transient_key
def _append_list_as_record_prep(self, record, transient_key=0):
if isinstance(transient_key, int):
self._verify_sp(transient_key)
self._current_key = transient_key
if transient_key not in self._data_storage:
self.add_transient_key(transient_key)
def _update_record_prep(self, transient_key=0):
if isinstance(transient_key, int):
self._verify_sp(transient_key)
self._current_key = transient_key
def get_active_key_list(self):
return sorted(self._data_storage.items(), key=itemgetter(0))
def _verify_sp(self, sp_num):
if self._path[0].lower() == 'nam':
return True
if not ('tdis', 'dimensions', 'nper') in self._simulation_data.mfdata:
raise FlopyException('Could not find number of stress periods ('
'nper).')
nper = self._simulation_data.mfdata[('tdis', 'dimensions', 'nper')]
if not (sp_num <= nper.get_data()):
raise FlopyException('Stress period value sp_num ({}) is greater '
'than the number of stress periods defined '
'in nper.'.format(sp_num))
return True
class MFData(object):
"""
Base class for all data. This class contains internal objects and methods
that most end users will not need to access directly.
Parameters
----------
sim_data : MFSimulationData
container class for all data for a MF6 simulation
structure : MFDataStructure
defines the structure of the data
enable : bool
whether this data is currently being used
path : tuple
tuple describing path to the data generally in the format (<model>,
<package>, <block>, <data>)
dimensions : DataDimensions
object used to retrieve dimension information about data
*args, **kwargs : exists to support different child class parameter sets
with extra init parameters
Attributes
----------
_current_key : str
current key defining specific transient dataset to be accessed
Methods
-------
new_simulation(sim_data)
points data object to a new simulation
layer_shape() : tuple
returns the shape of the layered dimensions
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(self, sim_data, structure, enable=True, path=None,
dimensions=None, *args, **kwargs):
# initialize
self._current_key = None
self._simulation_data = sim_data
self.structure = structure
self.enabled = enable
self.repeating = False
if path is None:
self._path = structure.path
else:
self._path = path
self._data_name = structure.name
self._data_storage = None
self._data_type = structure.type
self._keyword = ''
if self._simulation_data is not None:
self._data_dimensions = DataDimensions(dimensions, structure)
# build a unique path in the simulation dictionary
self._org_path = self._path
index = 0
while self._path in self._simulation_data.mfdata:
self._path = self._org_path[:-1] + \
('{}_{}'.format(self._org_path[-1], index),)
index += 1
self._structure_init()
# tie this to the simulation dictionary
sim_data.mfdata[self._path] = self
def __repr__(self):
return repr(self._get_storage_obj())
def __str__(self):
return str(self._get_storage_obj())
@property
def array(self):
kwargs = {'array': True}
return self.get_data(apply_mult=True, **kwargs)
def new_simulation(self, sim_data):
self._simulation_data = sim_data
self._data_storage = None
def find_dimension_size(self, dimension_name):
parent_path = self._path[:-1]
result = self._simulation_data.mfdata.find_in_path(parent_path,
dimension_name)
if result[0] is not None:
return [result[0].get_data()]
else:
return []
def aux_var_names(self):
return self.find_dimension_size('auxnames')
def layer_shape(self):
layers = []
layer_dims = self.structure.data_item_structures[0] \
.layer_dims
if len(layer_dims) == 1:
layers.append(self._data_dimensions.get_model_grid(). \
num_layers())
else:
for layer in layer_dims:
if layer == 'nlay':
# get the layer size from the model grid
try:
model_grid = self._data_dimensions.get_model_grid()
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(self.structure.get_model(),
self.structure.get_package(),
self.path,
'getting model grid',
self.structure.name,
inspect.stack()[0][3],
type_, value_, traceback_, None,
self.sim_data.debug, ex)
if model_grid.grid_type() == DiscretizationType.DISU:
layers.append(1)
else:
num_layers = model_grid.num_layers()
if num_layers is not None:
layers.append(num_layers)
else:
layers.append(1)
else:
# search data dictionary for layer size
layer_size = self.find_dimension_size(layer)
if len(layer_size) == 1:
layers.append(layer_size[0])
else:
message = 'Unable to find the size of expected layer ' \
'dimension {} '.format(layer)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self.structure.path, 'resolving layer dimensions',
self.structure.name, inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
return tuple(layers)
def get_description(self, description=None, data_set=None):
if data_set is None:
data_set = self.structure
for index, data_item in data_set.data_items.items():
if data_item.type == DatumType.record:
# record within a record, recurse
description = self.get_description(description, data_item)
else:
if data_item.description:
if description:
description = '{}\n{}'.format(description,
data_item.description)
else:
description = data_item.description
return description
def load(self, first_line, file_handle, block_header,
pre_data_comments=None):
self.enabled = True
def is_valid(self):
# TODO: Implement for each data type
return True
def _structure_init(self, data_set=None):
if data_set is None:
# Initialize variables
data_set = self.structure
for data_item_struct in data_set.data_item_structures:
if data_item_struct.type == DatumType.record:
# this is a record within a record, recurse
self._structure_init(data_item_struct)
else:
if len(self.structure.data_item_structures) == 1:
# data item name is a keyword to look for
self._keyword = data_item_struct.name
def _get_constant_formatting_string(self, const_val, layer, data_type,
suffix='\n'):
sim_data = self._simulation_data
const_format = list(sim_data.constant_formatting)
const_format[1] = self._get_storage_obj().to_string(const_val,
data_type)
return '{}{}'.format(sim_data.indent_string.join(const_format), suffix)
def _get_aux_var_index(self, aux_name):
aux_var_index = None
# confirm whether the keyword found is an auxiliary variable name
aux_var_names = self._data_dimensions.package_dim.get_aux_variables()
if aux_var_names:
for aux_var_name, index in zip(aux_var_names[0],
range(0,len(aux_var_names[0]))):
if aux_name.lower() == aux_var_name.lower():
aux_var_index = index - 1
return aux_var_index
def _get_aux_var_name(self, aux_var_index):
aux_var_names = self._data_dimensions.package_dim.get_aux_variables()
# TODO: Verify that this works for multi-dimensional layering
return aux_var_names[0][aux_var_index[0]+1]
def _load_keyword(self, arr_line, index_num):
aux_var_index = None
if self._keyword != '':
# verify keyword
keyword_found = arr_line[index_num].lower()
keyword_match = self._keyword.lower() == keyword_found
aux_var_names = None
if not keyword_match:
aux_var_index = self._get_aux_var_index(keyword_found)
if not keyword_match and aux_var_index is None:
aux_text = ''
if aux_var_names is not None:
aux_text = ' or auxiliary variables ' \
'{}'.format(aux_var_names[0])
message = 'Error reading variable "{}". Expected ' \
'variable keyword "{}"{} not found ' \
'at line "{}". {}'.format(self._data_name,
self._keyword,
aux_text,
' '.join(arr_line),
self._path)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self.structure.path, 'loading keyword',
self.structure.name, inspect.stack()[0][3],
type_, value_, traceback_, message,
self._simulation_data.debug)
return (index_num + 1, aux_var_index)
return (index_num, aux_var_index)
def _read_pre_data_comments(self, line, file_handle, pre_data_comments):
line_num = 0
storage = self._get_storage_obj()
if pre_data_comments:
storage.pre_data_comments = MFComment(pre_data_comments.text,
self._path,
self._simulation_data,
line_num)
else:
storage.pre_data_comments = None
# read through any fully commented or empty lines
arr_line = ArrayUtil.split_data_line(line)
while MFComment.is_comment(arr_line, True) and line != '':
if storage.pre_data_comments:
storage.pre_data_comments.add_text('\n')
storage.pre_data_comments.add_text(' '.join(arr_line))
else:
storage.pre_data_comments = MFComment(arr_line, self._path,
self._simulation_data,
line_num)
self._add_data_line_comment(arr_line, line_num)
line = file_handle.readline()
arr_line = ArrayUtil.split_data_line(line)
return line
def _add_data_line_comment(self, comment, line_num):
storage = self._get_storage_obj()
if line_num in storage.comments:
storage.comments[line_num].add_text('\n')
storage.comments[line_num].add_text(' '.join(comment))
else:
storage.comments[line_num] = MFComment(' '.join(comment),
self._path,
self._simulation_data,
line_num)
def _get_storage_obj(self):
return self._data_storage
class MFMultiDimVar(MFData):
def __init__(self, sim_data, structure, enable=True, path=None,
dimensions=None):
super(MFMultiDimVar, self).__init__(sim_data, structure, enable, path,
dimensions)
def _get_internal_formatting_string(self, layer):
if layer is None:
layer_storage = self._get_storage_obj().layer_storage.first_item()
else:
layer_storage = self._get_storage_obj().layer_storage[layer]
int_format = ['INTERNAL', 'FACTOR']
data_type = self.structure.get_datum_type(return_enum_type=True)
if layer_storage.factor is not None:
int_format.append(str(layer_storage.factor))
else:
if data_type == DatumType.double_precision:
int_format.append('1.0')
else:
int_format.append('1')
if layer_storage.iprn is not None:
int_format.append('IPRN')
int_format.append(str(layer_storage.iprn))
return self._simulation_data.indent_string.join(int_format)
def _get_external_formatting_string(self, layer, ext_file_action):
if layer is None:
layer_storage = self._get_storage_obj().layer_storage.first_item()
else:
layer_storage = self._get_storage_obj().layer_storage[layer]
# resolve external file path
file_mgmt = self._simulation_data.mfpath
model_name = self._data_dimensions.package_dim.model_dim[0].model_name
ext_file_path = file_mgmt.get_updated_path(layer_storage.fname,
model_name,
ext_file_action)
layer_storage.fname = ext_file_path
ext_format = ['OPEN/CLOSE', "'{}'".format(ext_file_path)]
ext_format.append('FACTOR')
if layer_storage.factor is not None:
ext_format.append(str(layer_storage.factor))
else:
if self.structure.get_datum_type(return_enum_type=True) == \
DatumType.double_precision:
ext_format.append('1.0')
else:
ext_format.append('1')
if layer_storage.binary:
ext_format.append('(BINARY)')
if layer_storage.iprn is not None:
ext_format.append('IPRN')
ext_format.append(str(layer_storage.iprn))
return '{}\n'.format(
self._simulation_data.indent_string.join(ext_format))
| 47.615159 | 86 | 0.511639 |
6a430b8687bd533c2fc1e516f0cc66374b817736 | 2,400 | py | Python | python/gui.py | caioseda/audio-reactive-led-strip | 0264ad64b5fed4ed8fe0090ea8ea102a7c9afdda | [
"MIT"
] | null | null | null | python/gui.py | caioseda/audio-reactive-led-strip | 0264ad64b5fed4ed8fe0090ea8ea102a7c9afdda | [
"MIT"
] | null | null | null | python/gui.py | caioseda/audio-reactive-led-strip | 0264ad64b5fed4ed8fe0090ea8ea102a7c9afdda | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import division
import time
import numpy as np
from pyqtgraph.Qt import QtGui
import pyqtgraph as pg
# from GradientEditorItem.dockarea import *
import config
class GUI:
plot = []
curve = []
def __init__(self, width=800, height=450, title=''):
# Create GUI window
self.app = QtGui.QApplication([])
self.win = pg.GraphicsWindow(title)
self.win.resize(width, height)
self.win.setWindowTitle(title)
# Create GUI layout
self.layout = QtGui.QVBoxLayout()
self.win.setLayout(self.layout)
def add_plot(self, title):
new_plot = pg.PlotWidget()
self.layout.addWidget(new_plot)
self.plot.append(new_plot)
self.curve.append([])
def add_curve(self, plot_index, pen=(255, 255, 255)):
self.curve[plot_index].append(self.plot[plot_index].plot(pen=pen))
if __name__ == '__main__':
# Example test gui
N = 48
gui = GUI(title='Test')
# Sin plot
gui.add_plot(title='Sin Plot')
gui.add_curve(plot_index=0)
gui.win.nextRow()
# Cos plot
gui.add_plot(title='Cos Plot')
gui.add_curve(plot_index=1)
#
freq_label = pg.LabelItem('')
def freq_slider_change(tick):
minf = freq_slider.tickValue(0)**2.0 * (config.MIC_RATE / 2.0)
maxf = freq_slider.tickValue(1)**2.0 * (config.MIC_RATE / 2.0)
t = 'Frequency range: {:.0f} - {:.0f} Hz'.format(minf, maxf)
freq_label.setText(t)
# config.MIN_FREQUENCY = minf
# config.MAX_FREQUENCY = maxf
# dsp.create_mel_bank()
freq_slider = pg.TickSliderItem(orientation='bottom', allowAdd=False)
freq_slider.addTick((config.MIN_FREQUENCY / (config.MIC_RATE / 2.0))**0.5)
freq_slider.addTick((config.MAX_FREQUENCY / (config.MIC_RATE / 2.0))**0.5)
freq_slider.tickMoveFinished = freq_slider_change
freq_label.setText('Frequency range: {} - {} Hz'.format(
config.MIN_FREQUENCY,
config.MAX_FREQUENCY))
gui.win.nextRow()
gui.win.nextRow()
gui.win.addItem(freq_slider,colspan=3)
while True:
t = time.time()
x = np.linspace(t, 2 * np.pi + t, N)
gui.curve[0][0].setData(x=x, y=np.sin(x))
gui.curve[1][0].setData(x=x, y=np.cos(x))
gui.app.processEvents()
time.sleep(1.0 / 30.0)
| 32.876712 | 78 | 0.625 |
7dbb758cffcac675eb83d32aee5bfa10a5665003 | 5,765 | py | Python | saleor/plugins/webhook/plugin.py | enesustundag/saleor | 95ce4b577ca06110f4702e61f554e9d165ef5fd4 | [
"CC-BY-4.0"
] | 1 | 2021-01-13T15:55:33.000Z | 2021-01-13T15:55:33.000Z | saleor/plugins/webhook/plugin.py | enesustundag/saleor | 95ce4b577ca06110f4702e61f554e9d165ef5fd4 | [
"CC-BY-4.0"
] | 5 | 2021-06-10T20:57:04.000Z | 2022-03-12T01:04:33.000Z | saleor/plugins/webhook/plugin.py | enesustundag/saleor | 95ce4b577ca06110f4702e61f554e9d165ef5fd4 | [
"CC-BY-4.0"
] | 1 | 2021-02-03T09:34:04.000Z | 2021-02-03T09:34:04.000Z | from typing import TYPE_CHECKING, Any, Optional
from ...webhook.event_types import WebhookEventType
from ...webhook.payloads import (
generate_checkout_payload,
generate_customer_payload,
generate_fulfillment_payload,
generate_invoice_payload,
generate_order_payload,
generate_product_payload,
)
from ..base_plugin import BasePlugin
from .tasks import trigger_webhooks_for_event
if TYPE_CHECKING:
from ...account.models import User
from ...checkout.models import Checkout
from ...invoice.models import Invoice
from ...order.models import Fulfillment, Order
from ...product.models import Product
class WebhookPlugin(BasePlugin):
PLUGIN_ID = "mirumee.webhooks"
PLUGIN_NAME = "Webhooks"
DEFAULT_ACTIVE = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.active = True
def order_created(self, order: "Order", previous_value: Any) -> Any:
if not self.active:
return previous_value
order_data = generate_order_payload(order)
trigger_webhooks_for_event.delay(WebhookEventType.ORDER_CREATED, order_data)
def order_confirmed(self, order: "Order", previous_value: Any) -> Any:
if not self.active:
return previous_value
order_data = generate_order_payload(order)
trigger_webhooks_for_event.delay(WebhookEventType.ORDER_CONFIRMED, order_data)
def order_fully_paid(self, order: "Order", previous_value: Any) -> Any:
if not self.active:
return previous_value
order_data = generate_order_payload(order)
trigger_webhooks_for_event.delay(WebhookEventType.ORDER_FULLY_PAID, order_data)
def order_updated(self, order: "Order", previous_value: Any) -> Any:
if not self.active:
return previous_value
order_data = generate_order_payload(order)
trigger_webhooks_for_event.delay(WebhookEventType.ORDER_UPDATED, order_data)
def invoice_request(
self,
order: "Order",
invoice: "Invoice",
number: Optional[str],
previous_value: Any,
) -> Any:
if not self.active:
return previous_value
invoice_data = generate_invoice_payload(invoice)
trigger_webhooks_for_event.delay(
WebhookEventType.INVOICE_REQUESTED, invoice_data
)
def invoice_delete(self, invoice: "Invoice", previous_value: Any):
if not self.active:
return previous_value
invoice_data = generate_invoice_payload(invoice)
trigger_webhooks_for_event.delay(WebhookEventType.INVOICE_DELETED, invoice_data)
def invoice_sent(self, invoice: "Invoice", email: str, previous_value: Any) -> Any:
if not self.active:
return previous_value
invoice_data = generate_invoice_payload(invoice)
trigger_webhooks_for_event.delay(WebhookEventType.INVOICE_SENT, invoice_data)
def order_cancelled(self, order: "Order", previous_value: Any) -> Any:
if not self.active:
return previous_value
order_data = generate_order_payload(order)
trigger_webhooks_for_event.delay(WebhookEventType.ORDER_CANCELLED, order_data)
def order_fulfilled(self, order: "Order", previous_value: Any) -> Any:
if not self.active:
return previous_value
order_data = generate_order_payload(order)
trigger_webhooks_for_event.delay(WebhookEventType.ORDER_FULFILLED, order_data)
def fulfillment_created(self, fulfillment: "Fulfillment", previous_value):
if not self.active:
return previous_value
fulfillment_data = generate_fulfillment_payload(fulfillment)
trigger_webhooks_for_event.delay(
WebhookEventType.FULFILLMENT_CREATED, fulfillment_data
)
def customer_created(self, customer: "User", previous_value: Any) -> Any:
if not self.active:
return previous_value
customer_data = generate_customer_payload(customer)
trigger_webhooks_for_event.delay(
WebhookEventType.CUSTOMER_CREATED, customer_data
)
def product_created(self, product: "Product", previous_value: Any) -> Any:
if not self.active:
return previous_value
product_data = generate_product_payload(product)
trigger_webhooks_for_event.delay(WebhookEventType.PRODUCT_CREATED, product_data)
def product_updated(self, product: "Product", previous_value: Any) -> Any:
if not self.active:
return previous_value
product_data = generate_product_payload(product)
trigger_webhooks_for_event.delay(WebhookEventType.PRODUCT_UPDATED, product_data)
# Deprecated. This method will be removed in Saleor 3.0
def checkout_quantity_changed(
self, checkout: "Checkout", previous_value: Any
) -> Any:
if not self.active:
return previous_value
checkout_data = generate_checkout_payload(checkout)
trigger_webhooks_for_event.delay(
WebhookEventType.CHECKOUT_QUANTITY_CHANGED, checkout_data
)
def checkout_created(self, checkout: "Checkout", previous_value: Any) -> Any:
if not self.active:
return previous_value
checkout_data = generate_checkout_payload(checkout)
trigger_webhooks_for_event.delay(
WebhookEventType.CHECKOUT_CREATED, checkout_data
)
def checkout_updated(self, checkout: "Checkout", previous_value: Any) -> Any:
if not self.active:
return previous_value
checkout_data = generate_checkout_payload(checkout)
trigger_webhooks_for_event.delay(
WebhookEventType.CHECKOUT_UPADTED, checkout_data
)
| 38.952703 | 88 | 0.701301 |
f27b861216f6bfec8581aa1e710ef4723e38e6dc | 4,545 | py | Python | src/transformers/models/reformer/tokenization_reformer_fast.py | MarcelGM/transformers | aad1d9b6d5c58fd974618ac0aead1c5bd1119467 | [
"Apache-2.0"
] | 101 | 2021-12-22T00:03:51.000Z | 2022-03-30T07:39:09.000Z | src/transformers/models/reformer/tokenization_reformer_fast.py | MarcelGM/transformers | aad1d9b6d5c58fd974618ac0aead1c5bd1119467 | [
"Apache-2.0"
] | 7 | 2021-07-16T21:47:19.000Z | 2022-03-18T20:26:53.000Z | src/transformers/models/reformer/tokenization_reformer_fast.py | MarcelGM/transformers | aad1d9b6d5c58fd974618ac0aead1c5bd1119467 | [
"Apache-2.0"
] | 30 | 2021-04-30T07:11:22.000Z | 2022-03-15T19:34:58.000Z | # coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model Reformer."""
import os
from shutil import copyfile
from typing import Optional, Tuple
from ...file_utils import is_sentencepiece_available
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if is_sentencepiece_available():
from .tokenization_reformer import ReformerTokenizer
else:
ReformerTokenizer = None
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"google/reformer-crime-and-punishment": "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
},
"tokenizer_file": {
"google/reformer-crime-and-punishment": "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/tokenizer.json"
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"google/reformer-crime-and-punishment": 524288,
}
class ReformerTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" Reformer tokenizer (backed by HuggingFace's `tokenizers` library). Based on `Unigram
<https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models>`__.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main
methods. Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
`SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
additional_special_tokens (:obj:`List[str]`, `optional`):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = ReformerTokenizer
def __init__(
self,
vocab_file,
tokenizer_file=None,
eos_token="</s>",
unk_token="<unk>",
additional_special_tokens=[],
**kwargs
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
eos_token=eos_token,
unk_token=unk_token,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.vocab_file = vocab_file
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
| 37.875 | 137 | 0.693949 |
ccbb513831b4e5a7f157b66f852b6deecb86f747 | 555 | py | Python | src/restaurants/migrations/0001_initial.py | shihlinlu/django-web-app | 26c2d2f6f0c7cf0e0eb7aa3d298cb25d84955496 | [
"Apache-2.0"
] | null | null | null | src/restaurants/migrations/0001_initial.py | shihlinlu/django-web-app | 26c2d2f6f0c7cf0e0eb7aa3d298cb25d84955496 | [
"Apache-2.0"
] | null | null | null | src/restaurants/migrations/0001_initial.py | shihlinlu/django-web-app | 26c2d2f6f0c7cf0e0eb7aa3d298cb25d84955496 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-31 19:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Restaurant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
],
),
]
| 23.125 | 114 | 0.594595 |
ec97e793f1dc00773c99ee16651084e3e36c84f0 | 18,623 | py | Python | dataprep/clean/clean_lat_long.py | devinllu/dataprep | d56861e5bed3c608cace74983f797dc729072d0a | [
"MIT"
] | 1 | 2022-02-14T07:18:00.000Z | 2022-02-14T07:18:00.000Z | dataprep/clean/clean_lat_long.py | devinllu/dataprep | d56861e5bed3c608cace74983f797dc729072d0a | [
"MIT"
] | null | null | null | dataprep/clean/clean_lat_long.py | devinllu/dataprep | d56861e5bed3c608cace74983f797dc729072d0a | [
"MIT"
] | null | null | null | """
Clean and validate a DataFrame column containing geographic coordinates.
"""
import re
from operator import itemgetter
from typing import Any, Optional, Tuple, Union
import dask
import dask.dataframe as dd
import numpy as np
import pandas as pd
from ..progress_bar import ProgressBar
from .utils import NULL_VALUES, create_report_new, to_dask
LAT_LONG_PATTERN = re.compile(
r"""
[^/-]*?[(]?
(?P<dir_front>[NS])?[ ]*
(?P<deg>-?%(FLOAT)s)(?:[%(DEGREE)sD\*\u00B0\s][ ]*
(?:(?P<min>%(FLOAT)s)[%(PRIME)s'm]?[ ]*)?
(?:(?P<sec>%(FLOAT)s)[%(DOUBLE_PRIME)s"s][ ]*)?
)?(?P<dir_back>[NS])?
\s*[,;/\s]\s*
(?P<dir_front2>[EW])?[ ]*
(?P<deg2>-?%(FLOAT)s)(?:[%(DEGREE)sD\*\u00B0\s][ ]*
(?:(?P<min2>%(FLOAT)s)[%(PRIME)s'm]?[ ]*)?
(?:(?P<sec2>%(FLOAT)s)[%(DOUBLE_PRIME)s"s][ ]*)?
)?(?P<dir_back2>[EW])?
[)]?\s*$
"""
% {
"FLOAT": r"\d+(?:\.\d+)?",
"DEGREE": chr(176),
"PRIME": chr(8242),
"DOUBLE_PRIME": chr(8243),
},
re.VERBOSE | re.UNICODE,
)
LAT_PATTERN = re.compile(
r"""
[^/-]*?
(?P<dir_front>[NS])?[ ]*
(?P<deg>-?%(FLOAT)s)(?:[%(DEGREE)sD\*\u00B0\s][ ]*
(?:(?P<min>%(FLOAT)s)[%(PRIME)s'm]?[ ]*)?
(?:(?P<sec>%(FLOAT)s)[%(DOUBLE_PRIME)s"s][ ]*)?
)?(?P<dir_back>[NS])?
\s*$
"""
% {
"FLOAT": r"\d+(?:\.\d+)?",
"DEGREE": chr(176),
"PRIME": chr(8242),
"DOUBLE_PRIME": chr(8243),
},
re.VERBOSE | re.UNICODE,
)
LONG_PATTERN = re.compile(
r"""
[^/-]*?
(?P<dir_front>[EW])?[ ]*
(?P<deg>-?%(FLOAT)s)(?:[%(DEGREE)sD\*\u00B0\s][ ]*
(?:(?P<min>%(FLOAT)s)[%(PRIME)s'm]?[ ]*)?
(?:(?P<sec>%(FLOAT)s)[%(DOUBLE_PRIME)s"s][ ]*)?
)?(?P<dir_back>[EW])?
\s*$
"""
% {
"FLOAT": r"\d+(?:\.\d+)?",
"DEGREE": chr(176),
"PRIME": chr(8242),
"DOUBLE_PRIME": chr(8243),
},
re.VERBOSE | re.UNICODE,
)
def clean_lat_long(
df: Union[pd.DataFrame, dd.DataFrame],
lat_long: Optional[str] = None,
*,
lat_col: Optional[str] = None,
long_col: Optional[str] = None,
output_format: str = "dd",
split: bool = False,
inplace: bool = False,
errors: str = "coerce",
report: bool = True,
progress: bool = True,
) -> pd.DataFrame:
"""
Clean and standardize latitude and longitude coordinates.
Read more in the :ref:`User Guide <clean_lat_long_user_guide>`.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be cleaned.
lat_long
The name of the column containing latitude and longitude coordinates.
lat_col
The name of the column containing latitude coordinates.
If specified, the parameter lat_long must be None.
long_col
The name of the column containing longitude coordinates.
If specified, the parameter lat_long must be None.
output_format
The desired format of the coordinates.
- 'dd': decimal degrees (51.4934, 0.0098)
- 'ddh': decimal degrees with hemisphere ('51.4934° N, 0.0098° E')
- 'dm': degrees minutes ('51° 29.604′ N, 0° 0.588′ E')
- 'dms': degrees minutes seconds ('51° 29′ 36.24″ N, 0° 0′ 35.28″ E')
(default: 'dd')
split
If True, split the latitude and longitude coordinates into one column
for latitude and a separate column for longitude. Otherwise, merge
the latitude and longitude coordinates into one column.
(default: False)
inplace
If True, delete the column(s) containing the data that was cleaned. Otherwise,
keep the original column(s).
(default: False)
errors
How to handle parsing errors.
- ‘coerce’: invalid parsing will be set to NaN.
- ‘ignore’: invalid parsing will return the input.
- ‘raise’: invalid parsing will raise an exception.
(default: 'coerce')
report
If True, output the summary report. Otherwise, no report is outputted.
(default: True)
progress
If True, display a progress bar.
(default: True)
Examples
--------
Split a column containing latitude and longitude strings into separate
columns in decimal degrees format.
>>> df = pd.DataFrame({'coord': ['51° 29′ 36.24″ N, 0° 0′ 35.28″ E', '51.4934° N, 0.0098° E']})
>>> clean_lat_long(df, 'coord', split=True)
Latitude and Longitude Cleaning Report:
2 values cleaned (100.0%)
Result contains 2 (100.0%) values in the correct format and 0 null values (0.0%)
coord latitude longitude
0 51° 29′ 36.24″ N, 0° 0′ 35.28″ E 51.4934 0.0098
1 51.4934° N, 0.0098° E 51.4934 0.0098
"""
# pylint: disable=too-many-branches
if lat_long and (lat_col or long_col):
raise ValueError("lat_long must be None if either lat_col or long_col is not None")
if output_format not in {"dd", "ddh", "dm", "dms"}:
raise ValueError(
f'output_format {output_format} is invalid, it must be "dd", "ddh", "dm", or "dms"'
)
# convert to dask
df = to_dask(df)
# To clean, create a new column "clean_code_tup" which contains
# the cleaned values and code indicating how the initial value was
# changed in a tuple. Then split the column of tuples and count the
# amount of different codes to produce the report
def clean_lat_long_helper(df, col, col_name):
# A helper to clean a latitude and longitude column
df["clean_code_tup"] = df[col].map_partitions(
lambda srs: [_format_lat_or_long(x, output_format, errors, col_name) for x in srs],
meta=object,
)
df = df.assign(
_temp_=df["clean_code_tup"].map(itemgetter(0)),
_code_=df["clean_code_tup"].map(itemgetter(1)),
)
df = df.rename(columns={"_temp_": f"{col}_clean"})
if inplace:
df = df.drop(columns=col)
return df
if lat_long:
# clean a latitude and longitude column
df["clean_code_tup"] = df[lat_long].map_partitions(
lambda srs: [_format_lat_long(x, output_format, split, errors) for x in srs],
meta=object,
)
if split:
df = df.assign(
latitude=df["clean_code_tup"].map(itemgetter(0)),
longitude=df["clean_code_tup"].map(itemgetter(1)),
_code_=df["clean_code_tup"].map(itemgetter(2)),
)
else:
df = df.assign(
_temp_=df["clean_code_tup"].map(itemgetter(0)),
_code_=df["clean_code_tup"].map(itemgetter(1)),
)
df = df.rename(columns={"_temp_": f"{lat_long}_clean"})
if inplace:
df = df.drop(columns=lat_long)
else:
# clean a latitude column
if lat_col:
df = clean_lat_long_helper(df, lat_col, "lat")
# clean a longitude column
if long_col:
df = clean_lat_long_helper(df, long_col, "long")
# merge the cleaned latitude and longitude
if lat_col and long_col and not split:
if output_format == "dd":
df["latitude_longitude"] = df[[f"{lat_col}_clean", f"{long_col}_clean"]].apply(
tuple, axis=1, meta=object
)
else:
df["latitude_longitude"] = df[f"{lat_col}_clean"] + ", " + df[f"{long_col}_clean"]
# if seperate lat and long columns are merged, then all values are "cleaned"
df["_code_"] = 2
df = df.drop(columns=[f"{lat_col}_clean", f"{long_col}_clean"])
# counts of codes indicating how values were changed
stats = df["_code_"].value_counts(sort=False)
df = df.drop(columns=["clean_code_tup", "_code_"])
with ProgressBar(minimum=1, disable=not progress):
df, stats = dask.compute(df, stats)
# output a report describing the result of clean_lat_long
if report:
create_report_new("Latitude and Longitude", stats, errors)
return df
def validate_lat_long(
x: Union[pd.Series, str, float, Tuple[float, float]],
*,
lat_long: bool = True,
lat: bool = False,
lon: bool = False,
) -> Union[bool, pd.Series]:
"""
Validate latitude and longitude coordinates.
Read more in the :ref:`User Guide <clean_lat_long_user_guide>`.
Parameters
----------
x
A pandas Series, string, float, or tuple of floats, containing the latitude
and/or longitude coordinates to be validated.
lat_long
If True, valid values contain latitude and longitude coordinates. Parameters
lat and lon must be False if lat_long is True.
(default: True)
lat
If True, valid values contain only latitude coordinates. Parameters
lat_long and lon must be False if lat is True.
(default: False)
lon
If True, valid values contain only longitude coordinates. Parameters
lat_long and lat must be False if lon is True.
(default: False)
Examples
--------
Validate a coordinate string or series of coordinates.
>>> validate_lat_long('51° 29′ 36.24″ N, 0° 0′ 35.28″ E')
True
>>> df = pd.DataFrame({'coordinates', ['51° 29′ 36.24″ N, 0° 0′ 35.28″ E', 'NaN']})
>>> validate_lat_long(df['coordinates'])
0 True
1 False
Name: coordinates, dtype: bool
"""
if lat or lon:
hor_dir = "lat" if lat else "long"
if isinstance(x, pd.Series):
return x.apply(_check_lat_or_long, args=(False, hor_dir))
return _check_lat_or_long(x, False, hor_dir)
elif lat_long:
if isinstance(x, pd.Series):
return x.apply(_check_lat_long, args=(False,))
return _check_lat_long(x, False)
return None
def _format_lat_long(val: Any, output_format: str, split: bool, errors: str) -> Any:
"""
Function to transform a coordinate instance into the desired format
The last component of the returned tuple contains a code indicating how the
input value was changed:
0 := the value is null
1 := the value could not be parsed
2 := the value is cleaned and the cleaned value is DIFFERENT than the input value
3 := the value is cleaned and is THE SAME as the input value (no transformation)
"""
# pylint: disable=too-many-locals
# _check_lat_long parses the value val, and will return the components
# if the parse is succesful. The returned value "status" can be either 0 ie
# "null" (which means val is a null value), 1 ie ("unkwonw") (in which case
# val could not be parsed) or 2 ie "success" (a succesful parse of val).
# dds, mins, secs, hem are the latitude components and # dds2, mins2, secs2, hem2
# are the longitude components
dds, mins, secs, hem, dds2, mins2, secs2, hem2, status = _check_lat_long(val, True)
if status == 0: # val is a null value
return (np.nan, np.nan, 0) if split else (np.nan, 0)
if status == 1: # val contains an unknown value
if errors == "raise":
raise ValueError(f"unable to parse value {val}")
result = val if errors == "ignore" else np.nan
return (result, np.nan, 1) if split else (result, 1)
# derive the hemisphere if not given in the initial coordinate
if not hem:
hem = "N" if dds >= 0 else "S"
if not hem2:
hem2 = "E" if dds2 >= 0 else "W"
dds, dds2 = abs(dds), abs(dds2)
# the following code if/elif blocks converts the
# coordinate components to the desired output
# https://en.wikipedia.org/wiki/Geographic_coordinate_conversion#Change_of_units_and_format
if output_format == "dd":
fctr = -1 if hem == "S" else 1
fctr2 = -1 if hem2 == "W" else 1
lat, lon = round(fctr * dds, 4), round(fctr2 * dds2, 4)
elif output_format == "ddh":
lat = f"{round(dds, 4)}{chr(176)} {hem}"
lon = f"{round(dds2, 4)}{chr(176)} {hem2}"
elif output_format == "dm":
mins = round(60 * (dds - int(dds)), 4)
mins = int(mins) if mins.is_integer() else mins
mins2 = round(60 * (dds2 - int(dds2)), 4)
mins2 = int(mins2) if mins2.is_integer() else mins2
lat = f"{int(dds)}{chr(176)} {mins}{chr(8242)} {hem}"
lon = f"{int(dds2)}{chr(176)} {mins2}{chr(8242)} {hem2}"
elif output_format == "dms":
mins = int(60 * (dds - int(dds)))
secs = round(3600 * (dds - int(dds)) - 60 * mins, 4)
secs = int(secs) if secs.is_integer() else secs
mins2 = int(60 * (dds2 - int(dds2)))
secs2 = round(3600 * (dds2 - int(dds2)) - 60 * mins2, 4)
secs2 = int(secs2) if secs2.is_integer() else secs2
lat = f"{int(dds)}{chr(176)} {mins}{chr(8242)} {secs}{chr(8243)} {hem}"
lon = f"{int(dds2)}{chr(176)} {mins2}{chr(8242)} {secs2}{chr(8243)} {hem2}"
if split:
return lat, lon, 2
result = (lat, lon) if output_format == "dd" else f"{lat}, {lon}"
return result, 2 if val != result else 3
def _check_lat_long(val: Any, clean: bool) -> Any:
"""
Function to check if a coordinate instance is valid
"""
# pylint: disable=too-many-boolean-expressions
# if the value is null, return empty strings for the components
# and the code 0 to indicate a null status
if val in NULL_VALUES:
return (None,) * 8 + (0,) if clean else False
mch = re.match(LAT_LONG_PATTERN, re.sub(r"''", r'"', str(val)))
# check if the value was able to be parsed
if not mch:
return (None,) * 8 + (1,) if clean else False
if not mch.group("deg") or not mch.group("deg2"):
return (None,) * 8 + (1,) if clean else False
# coordinates for latitude
mins = float(mch.group("min")) if mch.group("min") else 0
secs = float(mch.group("sec")) if mch.group("sec") else 0
dds = float(mch.group("deg")) + mins / 60 + secs / 3600
hem = mch.group("dir_back") or mch.group("dir_front")
# coordinates for longitude
mins2 = float(mch.group("min2")) if mch.group("min2") else 0
secs2 = float(mch.group("sec2")) if mch.group("sec2") else 0
dds2 = float(mch.group("deg2")) + mins2 / 60 + secs2 / 3600
hem2 = mch.group("dir_back2") or mch.group("dir_front2")
# minutes and seconds need to be in the interval [0, 60)
# for degrees:
# if hemisphere is given, then 0<=lat<=90 and 0<=long<=180
# if hemisphere is not given, then -90<=lat<=90 and -180<=long<=180
# decimal degrees must be -90<=lat<=90 and -180<=long<=180
# the first given hemisphere and last hemisphere cannot both be set
if (
not 0 <= mins < 60
or not 0 <= mins2 < 60
or not 0 <= secs < 60
or not 0 <= secs2 < 60
or hem
and not 0 <= float(mch.group("deg")) <= 90
or hem2
and not 0 <= float(mch.group("deg2")) <= 180
or not hem
and abs(float(mch.group("deg"))) > 90
or not hem2
and abs(float(mch.group("deg2"))) > 180
or abs(dds) > 90
or abs(dds2) > 180
or sum([mch.group("dir_back") is not None, mch.group("dir_front") is not None]) > 1
or sum([mch.group("dir_back2") is not None, mch.group("dir_front2") is not None]) > 1
):
return (None,) * 8 + (1,) if clean else False
return (dds, mins, secs, hem, dds2, mins2, secs2, hem2, 2) if clean else True
def _format_lat_or_long(val: Any, output_format: str, errors: str, hor_dir: str) -> Any:
"""
Function to transform a coordinate instance into the desired format
"""
dds, mins, secs, hem, status = _check_lat_or_long(val, True, hor_dir)
if status == 0: # val contains a null value
return np.nan, 0
if status == 1: # val contains an unknown value
if errors == "raise":
raise ValueError(f"unable to parse value {val}")
return val if errors == "ignore" else np.nan, 1
if not hem:
if hor_dir == "lat":
hem = "N" if dds >= 0 else "S"
else:
hem = "E" if dds >= 0 else "W"
dds = abs(dds)
if output_format == "dd":
fctr = 1 if hem in {"N", "E"} else -1
res = round(fctr * dds, 4)
if output_format == "ddh":
res = f"{round(dds, 4)}{chr(176)} {hem}"
elif output_format == "dm":
mins = round(60 * (dds - int(dds)), 4)
mins = int(mins) if mins.is_integer() else mins
res = f"{int(dds)}{chr(176)} {mins}{chr(8242)} {hem}"
elif output_format == "dms":
mins = int(60 * (dds - int(dds)))
secs = round(3600 * (dds - int(dds)) - 60 * mins, 4)
secs = int(secs) if secs.is_integer() else secs
res = f"{int(dds)}{chr(176)} {mins}{chr(8242)} {secs}{chr(8243)} {hem}"
return res, 2 if val != res else 3
def _check_lat_or_long(val: Any, clean: bool, hor_dir: str) -> Any:
"""
Function to check if a coordinate instance is valid
"""
# pylint: disable=too-many-boolean-expressions
if val in NULL_VALUES:
return (None,) * 4 + (0,) if clean else False
pat = LAT_PATTERN if hor_dir == "lat" else LONG_PATTERN
mch = re.match(pat, re.sub(r"''", r'"', str(val)))
if not mch:
return (None,) * 4 + (1,) if clean else False
if not mch.group("deg"):
return (None,) * 4 + (1,) if clean else False
# coordinates
mins = float(mch.group("min")) if mch.group("min") else 0
secs = float(mch.group("sec")) if mch.group("sec") else 0
dds = float(mch.group("deg")) + mins / 60 + secs / 3600
hem = mch.group("dir_back") or mch.group("dir_front")
# range is [-90, 90] for latitude and [-180, 180] for longitude
bound = 90 if hor_dir == "lat" else 180
# minutes and seconds need to be in the interval [0, 60]
# for degrees:
# if hemisphere is give, then 0<=deg<=bound
# if hemisphere is not given, then -bound<=deg<=bound
# decimal degrees must be -bound<=lat<=bound
# the first given hemisphere and last hemisphere cannot both be set
if (
not 0 <= mins <= 60
or not 0 <= secs <= 60
or hem
and not 0 <= float(mch.group("deg")) <= bound
or not hem
and abs(float(mch.group("deg"))) > bound
or abs(dds) > bound
or sum([mch.group("dir_back") is not None, mch.group("dir_front") is not None]) > 1
):
return (None,) * 4 + (1,) if clean else False
return (dds, mins, secs, hem, 2) if clean else True
| 36.091085 | 99 | 0.582183 |
a03fcb0fdce18745ba74e2ece6d79eea4ab23a72 | 341 | py | Python | python-algorithm/leetcode/problem_476.py | isudox/leetcode-solution | 60085e64deaf396a171367affc94b18114565c43 | [
"MIT"
] | 5 | 2017-06-11T09:19:34.000Z | 2019-01-16T16:58:31.000Z | python-algorithm/leetcode/problem_476.py | isudox/leetcode-solution | 60085e64deaf396a171367affc94b18114565c43 | [
"MIT"
] | null | null | null | python-algorithm/leetcode/problem_476.py | isudox/leetcode-solution | 60085e64deaf396a171367affc94b18114565c43 | [
"MIT"
] | 1 | 2019-03-02T15:50:43.000Z | 2019-03-02T15:50:43.000Z | """476. Number Complement
https://leetcode.com/problems/number-complement/
"""
class Solution:
def findComplement(self, num: int) -> int:
ans = 0
i = 0
while num:
bit = num & 0b1
if bit == 0:
ans += 1 << i
num = num >> 1
i += 1
return ans
| 20.058824 | 48 | 0.451613 |
bb60e863958241d3ebac5ce4084bd3f4d6c47b27 | 522 | py | Python | tests/int_tests/test_le.py | lycantropos/rithm | 61ae1614411ab0ce7feb403fdf93b71f49231ec1 | [
"MIT"
] | null | null | null | tests/int_tests/test_le.py | lycantropos/rithm | 61ae1614411ab0ce7feb403fdf93b71f49231ec1 | [
"MIT"
] | null | null | null | tests/int_tests/test_le.py | lycantropos/rithm | 61ae1614411ab0ce7feb403fdf93b71f49231ec1 | [
"MIT"
] | null | null | null | from hypothesis import given
from tests.utils import (IntWithBuiltin,
equivalence)
from . import strategies
@given(strategies.ints_with_builtins, strategies.ints_with_builtins)
def test_connection_with_builtin(first_with_builtin: IntWithBuiltin,
second_with_builtin: IntWithBuiltin) -> None:
first, first_builtin = first_with_builtin
second, second_builtin = second_with_builtin
assert equivalence(first <= second, first_builtin <= second_builtin)
| 34.8 | 78 | 0.729885 |
851f5b1fdfc73cb79296c6dd7f59f484caddfd5b | 2,446 | py | Python | localflavor/za/forms.py | ifanrx/django-localflavor | 38328bbb127a33cb06eaea82288cd70821b2bad6 | [
"BSD-3-Clause"
] | null | null | null | localflavor/za/forms.py | ifanrx/django-localflavor | 38328bbb127a33cb06eaea82288cd70821b2bad6 | [
"BSD-3-Clause"
] | null | null | null | localflavor/za/forms.py | ifanrx/django-localflavor | 38328bbb127a33cb06eaea82288cd70821b2bad6 | [
"BSD-3-Clause"
] | null | null | null | """
South Africa-specific Form helpers
"""
from __future__ import unicode_literals
import re
from datetime import date
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField, Select
from django.utils.checksums import luhn
from django.utils.translation import gettext as _
id_re = re.compile(r'^(?P<yy>\d\d)(?P<mm>\d\d)(?P<dd>\d\d)(?P<mid>\d{4})(?P<end>\d{3})')
class ZAIDField(CharField):
"""
A form field for South African ID numbers -- the checksum is validated
using the Luhn checksum, and uses a simlistic (read: not entirely accurate)
check for the birthdate
"""
default_error_messages = {
'invalid': _('Enter a valid South African ID number'),
}
def clean(self, value):
super(ZAIDField, self).clean(value)
if value in EMPTY_VALUES:
return ''
# strip spaces and dashes
value = value.strip().replace(' ', '').replace('-', '')
match = re.match(id_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
g = match.groupdict()
try:
# The year 2000 is conveniently a leapyear.
# This algorithm will break in xx00 years which aren't leap years
# There is no way to guess the century of a ZA ID number
date(int(g['yy']) + 2000, int(g['mm']), int(g['dd']))
except ValueError:
raise ValidationError(self.error_messages['invalid'])
if not luhn(value):
raise ValidationError(self.error_messages['invalid'])
return value
class ZAPostCodeField(RegexField):
"""
A form field that validates input as a South African postcode. Valid
postcodes must have four digits.
"""
default_error_messages = {
'invalid': _('Enter a valid South African postal code'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ZAPostCodeField, self).__init__(r'^\d{4}$',
max_length, min_length, *args, **kwargs)
class ZAProvinceSelect(Select):
"""
A Select widget that uses a list of South African Provinces as its choices.
"""
def __init__(self, attrs=None):
from .za_provinces import PROVINCE_CHOICES
super(ZAProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
| 30.962025 | 88 | 0.641864 |
9869ef549ccefe2949b690ed81856e4209a0ddf1 | 9,835 | py | Python | neighborhood_development_18/migrations/0006_bikecount_bikedailyestimate_bikegreenway_bikelane_blockgroup_busstop_campreport_campsweep_communityg.py | AraOshin/civic-sandbox-backend | 7f864cd829927e5cbe99f252ba54c488ed4dedb6 | [
"MIT"
] | 1 | 2018-11-16T21:57:25.000Z | 2018-11-16T21:57:25.000Z | neighborhood_development_18/migrations/0006_bikecount_bikedailyestimate_bikegreenway_bikelane_blockgroup_busstop_campreport_campsweep_communityg.py | nam20485/civic-sandbox-backend | 7f864cd829927e5cbe99f252ba54c488ed4dedb6 | [
"MIT"
] | null | null | null | neighborhood_development_18/migrations/0006_bikecount_bikedailyestimate_bikegreenway_bikelane_blockgroup_busstop_campreport_campsweep_communityg.py | nam20485/civic-sandbox-backend | 7f864cd829927e5cbe99f252ba54c488ed4dedb6 | [
"MIT"
] | 1 | 2019-03-07T17:24:26.000Z | 2019-03-07T17:24:26.000Z | # Generated by Django 2.0.1 on 2018-08-15 18:18
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('neighborhood_development_18', '0005_bikeparking_demolition'),
]
operations = [
migrations.CreateModel(
name='BikeCount',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('count_time', models.CharField(max_length=5)),
('year_2017', models.IntegerField(blank=True, db_column='2017', null=True)),
('geom', django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326)),
],
options={
'db_table': 'bike_counts',
'managed': False,
},
),
migrations.CreateModel(
name='BikeDailyEstimate',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('year_2016', models.IntegerField(blank=True, db_column='2016', null=True)),
('geom', django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326)),
],
options={
'db_table': 'bike_daily_estimates',
'managed': False,
},
),
migrations.CreateModel(
name='BikeGreenway',
fields=[
('objectid', models.IntegerField(primary_key=True, serialize=False)),
('geom', django.contrib.gis.db.models.fields.GeometryField(srid=4326)),
],
options={
'db_table': 'bike_greenways',
'managed': False,
},
),
migrations.CreateModel(
name='BikeLane',
fields=[
('objectid', models.IntegerField(primary_key=True, serialize=False)),
('geom', django.contrib.gis.db.models.fields.LineStringField(srid=4326)),
],
options={
'db_table': 'bike_lanes',
'managed': False,
},
),
migrations.CreateModel(
name='BlockGroup',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('year', models.CharField(max_length=50)),
('median_household_income', models.IntegerField()),
('Median_gross_rent', models.IntegerField()),
('evictions', models.IntegerField()),
('eviction_rate', models.FloatField()),
('renter_occupied_households', models.IntegerField()),
('rent_burden', models.FloatField()),
('poverty_rate', models.FloatField()),
('pctrenter_occupied', models.FloatField()),
('geom', django.contrib.gis.db.models.fields.PolygonField(srid=4326)),
],
options={
'db_table': 'evictions_blockgroups_scope',
'managed': False,
},
),
migrations.CreateModel(
name='BusStop',
fields=[
('keyitem', models.CharField(max_length=50, primary_key=True, serialize=False)),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326)),
],
options={
'db_table': 'bus_stops',
'managed': False,
},
),
migrations.CreateModel(
name='CampReport',
fields=[
('id', models.IntegerField(db_column='ItemID', primary_key=True, serialize=False)),
('date', models.DateTimeField()),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326)),
],
options={
'db_table': 'campsite_reports',
'managed': False,
},
),
migrations.CreateModel(
name='CampSweep',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('formatted_date', models.CharField(max_length=50)),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326)),
],
options={
'db_table': 'camp_sweeps_view',
'managed': False,
},
),
migrations.CreateModel(
name='CommunityGarden',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('sitename', models.CharField(max_length=50)),
('geom', django.contrib.gis.db.models.fields.GeometryField(srid=4326)),
],
options={
'db_table': 'community_gardens',
'managed': False,
},
),
migrations.CreateModel(
name='IMSNeighborhood',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('year', models.CharField(max_length=50)),
('total_population', models.IntegerField()),
('pc_household_with_children_under_18', models.FloatField()),
('pc_household_with_individuals_65_ovr', models.FloatField()),
('pc_owner_occupied_housing_units', models.FloatField()),
('pc_householders_living_alone', models.FloatField()),
('geom', django.contrib.gis.db.models.fields.GeometryField(srid=4326)),
],
options={
'db_table': 'ims_neighborhood_demographics',
'managed': False,
},
),
migrations.CreateModel(
name='MultiuseTrail',
fields=[
('ogc_fid', models.IntegerField(primary_key=True, serialize=False)),
('geom', django.contrib.gis.db.models.fields.LineStringField(srid=4326)),
],
options={
'db_table': 'active_multiuse_trail',
'managed': False,
},
),
migrations.CreateModel(
name='NeighborhoodVoterRegistrationByAgeGroup',
fields=[
('neighborhood', models.TextField()),
('id', models.IntegerField(primary_key=True, serialize=False)),
('year', models.IntegerField()),
('pct_18_25', models.FloatField()),
('pct_26_32', models.FloatField()),
('pct_33_39', models.FloatField()),
('pct_40_49', models.FloatField()),
('pct_50_plus', models.FloatField()),
('geom', django.contrib.gis.db.models.fields.GeometryField(srid=4326)),
],
options={
'db_table': 'neighborhood_voters_ages_over_time_geom',
'managed': False,
},
),
migrations.CreateModel(
name='Park',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('acres', models.FloatField()),
('geom', django.contrib.gis.db.models.fields.GeometryField(srid=4326)),
],
options={
'db_table': 'parks',
'managed': False,
},
),
migrations.CreateModel(
name='ParksTrail',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('geom', django.contrib.gis.db.models.fields.GeometryField(srid=4326)),
],
options={
'db_table': 'parks_trails',
'managed': False,
},
),
migrations.CreateModel(
name='RailStop',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326)),
],
options={
'db_table': 'rail_stops',
'managed': False,
},
),
migrations.CreateModel(
name='ReportsByMonth',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=80)),
('formatted_date', models.CharField(max_length=50)),
('count', models.IntegerField()),
('geom', django.contrib.gis.db.models.fields.GeometryField(srid=4326)),
],
options={
'db_table': 'campsite_reports_by_month_neigh',
'managed': False,
},
),
migrations.CreateModel(
name='RetailGrocer',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('company_na', models.CharField(max_length=50)),
('geom', django.contrib.gis.db.models.fields.GeometryField(srid=4326)),
],
options={
'db_table': 'retail_grocers',
'managed': False,
},
),
migrations.CreateModel(
name='Tree',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('date_inventoried', models.DateTimeField(blank=True, null=True)),
('common', models.CharField(max_length=50)),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326)),
],
options={
'db_table': 'trees',
'managed': False,
},
),
]
| 39.497992 | 107 | 0.504525 |
d7fb5ce0cbfbbacd38f2d144c7e1e2553d89f032 | 5,787 | py | Python | controller/response.py | pondchamp/among_bots | d57eea79d0672aa596fcde6c58a0e636d04511ed | [
"Apache-2.0"
] | 4 | 2020-12-15T05:21:25.000Z | 2021-04-30T04:25:55.000Z | controller/response.py | pondchamp/among_bots | d57eea79d0672aa596fcde6c58a0e636d04511ed | [
"Apache-2.0"
] | null | null | null | controller/response.py | pondchamp/among_bots | d57eea79d0672aa596fcde6c58a0e636d04511ed | [
"Apache-2.0"
] | null | null | null | from typing import List, Optional, Dict
import random
import re
from controller.substitute import SubstituteHelper
from data import consts, dialogs, enums
from data.dialogs import Dialog
from data.enums import KeyCommand, AUMap, ResponseFlags, Substitution
from state.game import GameState
from state.context import context
from data.trust import SusScore
def generate_response(mode: KeyCommand, curr_map: AUMap, me: Optional[str],
flags: List[ResponseFlags]) -> Optional[str]:
if mode == KeyCommand.ATTACK_:
mode_arr, score_target = dialogs.attack, SusScore.SUS
elif mode == KeyCommand.DEFENCE_:
mode_arr, score_target = dialogs.defense, None
elif mode == KeyCommand.PROBE_:
mode_arr, score_target = dialogs.probe, None
elif mode == KeyCommand.STATEMENT_:
mode_arr, score_target = dialogs.statement, SusScore.SAFE
else:
return None
players = context.trust_map_score_get(me)
player_select = [p for p in players]
if score_target is not None:
filtered = list(filter(lambda p: players[p] == score_target.value, player_select))
player_select = filtered if len(filtered) > 0 else player_select
chat_log = context.chat_log
chat_turns = context.chat_turns
pri_arr = [[x.text for x in mode_arr if _dialog_flags_match(x, flags) and _dialog_turns_valid(x, chat_turns)],
[x.text for x in mode_arr if _dialog_flags_match(x, flags)
and x.min_turns is None and x.max_turns is None] +
[x.text for x in mode_arr if x.flags is None and x.min_turns is None and x.max_turns is None]]
pri_arr_filtered = [[x for x in pri_arr[i] if x not in chat_log] for i in range(len(pri_arr))]
if consts.debug_chat:
print("Scores:", players)
print("Past messages:", chat_log)
print("Flags:", [x.name for x in flags])
print("Dialogs:", pri_arr)
select_arr = -1
for i in range(len(pri_arr)):
if len(pri_arr_filtered[i]) > 0:
select_arr = i
break
if select_arr == -1:
context.chat_log_clear()
pri_arr_filtered = pri_arr
m = pri_arr_filtered[select_arr]
r = m[random.randint(0, len(m) - 1)]
context.chat_log_append(r)
resp_sub, sub_dict = _sub_placeholders(r, curr_map, player_select)
if mode == KeyCommand.ATTACK_:
p = None
if Substitution.PLAYER in sub_dict:
p = sub_dict[Substitution.PLAYER]
elif Substitution.PLAYER_NEAREST in sub_dict:
p = sub_dict[Substitution.PLAYER_NEAREST]
if me is not None and p is not None:
context.trust_map_score_offset(me, p, -1.0)
return resp_sub
def get_strategy(game_state: GameState) -> Optional[enums.KeyCommand]:
valid = [enums.KeyCommand.PROBE_, enums.KeyCommand.STATEMENT_, enums.KeyCommand.ATTACK_, enums.KeyCommand.DEFENCE_]
me = game_state.me
if me is not None and not me.alive:
return None
trust_scores = context.trust_map_score_get()
me_score = trust_scores[game_state.me_colour] \
if len(trust_scores) > 0 and game_state.me_colour else None
score_sum = None
trust_map = context.trust_map
if trust_map is not None:
score_sum = 0.0
for p in trust_map:
score_sum += sum([abs(x) for x in trust_map[p].values()])
players = game_state.get_players()
flags = game_state.get_response_flags()
chat_turns = context.chat_turns
if players is not None and len(players) == 2: # three players left
return enums.KeyCommand.ATTACK_
elif me_score is not None and me_score < 0 and me_score == min(trust_scores.values()): # counter sus
return enums.KeyCommand.DEFENCE_
elif score_sum is not None and score_sum < consts.PROBE_SCORE_THRESH * len(players): # not enough info
if chat_turns == 0: # opener
if random.random() < 0.1: # random attack opener
return enums.KeyCommand.ATTACK_
if _flags_match(flags, [ResponseFlags.EMERGENCY_MEET_ME, ResponseFlags.BODY_FOUND_ME]):
return enums.KeyCommand.STATEMENT_
elif _flags_match(flags, [ResponseFlags.EMERGENCY_MEET_OTHER, ResponseFlags.BODY_FOUND_OTHER]):
return enums.KeyCommand.PROBE_
return enums.KeyCommand.PROBE_ if random.randint(0, 1) == 0 else enums.KeyCommand.STATEMENT_ # 50-50
# enough info at this point
elif len([p for p in trust_scores if trust_scores[p] == SusScore.SUS.value]) > 0:
return enums.KeyCommand.ATTACK_
elif len([p for p in trust_scores if trust_scores[p] == SusScore.SAFE.value]) > 0:
return enums.KeyCommand.DEFENCE_
else:
print('Unable to determine a strategy - picking at random.')
return valid[random.randint(0, len(valid) - 1)]
def _sub_placeholders(resp: str, curr_map: AUMap, players: List[str]) -> (str, Dict):
sub_dict = {}
subs = SubstituteHelper(players)
for sub in subs.substitutions:
res = subs.get(curr_map, sub)
i = random.randint(0, len(res) - 1)
new_resp = re.sub(fr"\[{sub.value}]", res[i], resp)
if new_resp != resp:
sub_dict[sub] = res[i]
resp = new_resp
return resp, sub_dict
def _dialog_turns_valid(dialog: Dialog, chat_turns: int) -> bool:
return (dialog.max_turns is None or dialog.max_turns >= chat_turns) \
and (dialog.min_turns is None or dialog.min_turns <= chat_turns)
def _dialog_flags_match(dialog: Dialog, flags: List[ResponseFlags]) -> bool:
return dialog.flags is not None and (ResponseFlags.PRIORITY in dialog.flags or _flags_match(dialog.flags, flags))
def _flags_match(a: List[ResponseFlags], b: List[ResponseFlags]) -> bool:
return len(set(a) & set(b)) > 0
| 43.840909 | 119 | 0.676689 |
3dafaef0b992ecdc93495da228f43e1098ccb3e5 | 582 | py | Python | asteroids/__main__.py | rohinivsenthil/Asteroids | 75baf2c65515002c72088695eda54c66bc963eb9 | [
"MIT"
] | 8 | 2019-05-25T19:11:46.000Z | 2019-07-04T14:59:20.000Z | asteroids/__main__.py | rohinivsenthil/Asteroids | 75baf2c65515002c72088695eda54c66bc963eb9 | [
"MIT"
] | null | null | null | asteroids/__main__.py | rohinivsenthil/Asteroids | 75baf2c65515002c72088695eda54c66bc963eb9 | [
"MIT"
] | null | null | null | import json
import random
import pygame
from pygame.locals import *
from .pages import game
with open("config.json") as configfile:
config = json.load(configfile)
SCREEN_SIZE = config["screenSize"]
FRAMERATE = config["framerate"]
def main():
pygame.mixer.pre_init(buffer=1024)
pygame.init()
pygame.display.set_caption("Asteroids")
screen = pygame.display.set_mode(SCREEN_SIZE)
exit = False
while not exit:
score, time_played, exit = game(screen)
pygame.quit()
if __name__ == "__main__":
main()
| 17.636364 | 50 | 0.652921 |
7a4c65b2f7ebeabd0b80fde0c87914edc8dae337 | 1,303 | py | Python | tasks/agent/observation/cv/panorama_encoder.py | NCTUMLlab/Je-Wei-Jang-AVAST_Attentive_Variational_State_Tracker_for_Vision-and-Language-Navigation | 3c3cf1ce4ce0935516abf880f0a06210923eb081 | [
"MIT"
] | null | null | null | tasks/agent/observation/cv/panorama_encoder.py | NCTUMLlab/Je-Wei-Jang-AVAST_Attentive_Variational_State_Tracker_for_Vision-and-Language-Navigation | 3c3cf1ce4ce0935516abf880f0a06210923eb081 | [
"MIT"
] | null | null | null | tasks/agent/observation/cv/panorama_encoder.py | NCTUMLlab/Je-Wei-Jang-AVAST_Attentive_Variational_State_Tracker_for_Vision-and-Language-Navigation | 3c3cf1ce4ce0935516abf880f0a06210923eb081 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
class PanoramaEncoder(nn.Module):
def __init__(
self,
config: dict,
vision_feature_size: int
) -> None:
super(PanoramaEncoder, self).__init__()
query_dim = config['state_tracker']['obs']['vision']['attn']['query_dim']
self.query_layer = nn.Sequential(
nn.Linear(query_dim, vision_feature_size, bias=False),
nn.Linear(vision_feature_size, vision_feature_size, bias=False),
nn.Dropout(p=config['state_tracker']['dropout_ratio'])
)
self.softmax = nn.Softmax(dim=1)
self.encode = self.forward
return
def forward(
self,
visions: torch.Tensor,
h_t: torch.Tensor
) -> torch.Tensor:
panorama = visions.squeeze(0) # batch x v_num x v_dim
query = self.query_layer(h_t).unsqueeze(2) # batch x v_dim x 1
# Get attention
attn = torch.bmm(panorama, query).squeeze(2) # batch x v_num
attn = self.softmax(attn)
vision_embed = torch.bmm(attn.unsqueeze(1), panorama).squeeze(1) # batch x v_dim
return vision_embed
def main():
return
if __name__ == '__main__':
main()
| 28.955556 | 99 | 0.568688 |
63f51649592962fe141c943cd3aac219080dd44e | 564 | py | Python | wsgi/iportalen_django/events/migrations/0044_auto_20160306_1546.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 4 | 2016-09-21T17:06:01.000Z | 2018-02-06T16:36:44.000Z | wsgi/iportalen_django/events/migrations/0044_auto_20160306_1546.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 149 | 2016-03-07T23:50:47.000Z | 2022-03-11T23:16:33.000Z | wsgi/iportalen_django/events/migrations/0044_auto_20160306_1546.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 1 | 2016-03-07T23:02:06.000Z | 2016-03-07T23:02:06.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0043_auto_20160306_1419'),
]
operations = [
migrations.AlterField(
model_name='event',
name='status',
field=models.CharField(default='d', max_length=1, choices=[('d', 'utkast'), ('b', 'väntar på godkännande'), ('r', 'Avslaget'), ('a', 'Godkänt'), ('c', 'Inställt'), ('e', 'väntar på att bli inställd')]),
),
]
| 28.2 | 214 | 0.586879 |
727d87df93072276c71ba65d7f6ff70e5eb75aa8 | 402 | py | Python | setup.py | tveebot/communication | 33f92f6027cb812c887d8a9d7b9ce8b1543adfa8 | [
"MIT"
] | null | null | null | setup.py | tveebot/communication | 33f92f6027cb812c887d8a9d7b9ce8b1543adfa8 | [
"MIT"
] | null | null | null | setup.py | tveebot/communication | 33f92f6027cb812c887d8a9d7b9ce8b1543adfa8 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='tveebot-communication',
version='0.1',
description='Set of packages and modules to enable communication '
'between clients and daemons',
url='https://github.com/tveebot/communication',
license='MIT',
author='David Fialho',
author_email='fialho.david@protonmail.com',
packages=find_packages(),
)
| 26.8 | 70 | 0.686567 |
0a02a561c18189b59e1f27fd9281796b1675466a | 1,902 | py | Python | docs/core/examples/udpbroadcast.py | hawkowl/twisted | c413aac3888dea2202c0dc26f978d7f88b4b837a | [
"Unlicense",
"MIT"
] | 2 | 2015-11-08T12:59:22.000Z | 2018-10-19T01:06:40.000Z | docs/core/examples/udpbroadcast.py | hawkowl/twisted | c413aac3888dea2202c0dc26f978d7f88b4b837a | [
"Unlicense",
"MIT"
] | 5 | 2020-06-05T18:16:39.000Z | 2022-01-13T00:45:49.000Z | docs/core/examples/udpbroadcast.py | hawkowl/twisted | c413aac3888dea2202c0dc26f978d7f88b4b837a | [
"Unlicense",
"MIT"
] | 1 | 2020-12-18T11:13:15.000Z | 2020-12-18T11:13:15.000Z | #!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An example demonstrating how to send and receive UDP broadcast messages.
Every second, this application will send out a PING message with a unique ID.
It will respond to all PING messages with a PONG (including ones sent by
itself). You can tell how many copies of this script are running on the local
network by the number of "RECV PONG".
Run using twistd:
$ twistd -ny udpbroadcast.py
"""
from uuid import uuid4
from twisted.application import internet, service
from twisted.internet.protocol import DatagramProtocol
from twisted.python import log
class PingPongProtocol(DatagramProtocol):
noisy = False
def __init__(self, controller, port):
self.port = port
def startProtocol(self):
self.transport.setBroadcastAllowed(True)
def sendPing(self):
pingMsg = "PING {0}".format(uuid4().hex)
self.transport.write(pingMsg, ('<broadcast>', self.port))
log.msg("SEND " + pingMsg)
def datagramReceived(self, datagram, addr):
if datagram[:4] == "PING":
uuid = datagram[5:]
pongMsg = "PONG {0}".format(uuid)
self.transport.write(pongMsg, ('<broadcast>', self.port))
log.msg("RECV " + datagram)
elif datagram[:4] == "PONG":
log.msg("RECV " + datagram)
class Broadcaster(object):
def ping(self, proto):
proto.sendPing()
def makeService(self):
application = service.Application('Broadcaster')
root = service.MultiService()
root.setServiceParent(application)
proto = PingPongProtocol(controller=self, port=8555)
root.addService(internet.UDPServer(8555, proto))
root.addService(internet.TimerService(1, self.ping, proto))
return application
application = Broadcaster().makeService()
| 25.36 | 77 | 0.673502 |
befc722db06b69c0b2485ae3dc98487c305e4901 | 8,899 | py | Python | second/protos/box_coder_pb2.py | yukke42/SECOND | e4d52f590844c4c53c25ec1688fdc6a045ebbf13 | [
"MIT"
] | null | null | null | second/protos/box_coder_pb2.py | yukke42/SECOND | e4d52f590844c4c53c25ec1688fdc6a045ebbf13 | [
"MIT"
] | null | null | null | second/protos/box_coder_pb2.py | yukke42/SECOND | e4d52f590844c4c53c25ec1688fdc6a045ebbf13 | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: second/protos/box_coder.proto
import sys
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_b = sys.version_info[0] < 3 and (lambda x: x) or (
lambda x: x.encode('latin1'))
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='second/protos/box_coder.proto',
package='second.protos',
syntax='proto3',
serialized_options=None,
serialized_pb=_b(
'\n\x1dsecond/protos/box_coder.proto\x12\rsecond.protos\"\x8b\x01\n\x08\x42oxCoder\x12=\n\x12ground_box3d_coder\x18\x01 \x01(\x0b\x32\x1f.second.protos.GroundBox3dCoderH\x00\x12\x33\n\rbev_box_coder\x18\x02 \x01(\x0b\x32\x1a.second.protos.BevBoxCoderH\x00\x42\x0b\n\tbox_coder\"C\n\x10GroundBox3dCoder\x12\x12\n\nlinear_dim\x18\x01 \x01(\x08\x12\x1b\n\x13\x65ncode_angle_vector\x18\x02 \x01(\x08\"`\n\x0b\x42\x65vBoxCoder\x12\x12\n\nlinear_dim\x18\x01 \x01(\x08\x12\x1b\n\x13\x65ncode_angle_vector\x18\x02 \x01(\x08\x12\x0f\n\x07z_fixed\x18\x03 \x01(\x02\x12\x0f\n\x07h_fixed\x18\x04 \x01(\x02\x62\x06proto3'
))
_BOXCODER = _descriptor.Descriptor(
name='BoxCoder',
full_name='second.protos.BoxCoder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ground_box3d_coder',
full_name='second.protos.BoxCoder.ground_box3d_coder',
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bev_box_coder',
full_name='second.protos.BoxCoder.bev_box_coder',
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='box_coder',
full_name='second.protos.BoxCoder.box_coder',
index=0,
containing_type=None,
fields=[]),
],
serialized_start=49,
serialized_end=188,
)
_GROUNDBOX3DCODER = _descriptor.Descriptor(
name='GroundBox3dCoder',
full_name='second.protos.GroundBox3dCoder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='linear_dim',
full_name='second.protos.GroundBox3dCoder.linear_dim',
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='encode_angle_vector',
full_name='second.protos.GroundBox3dCoder.encode_angle_vector',
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=190,
serialized_end=257,
)
_BEVBOXCODER = _descriptor.Descriptor(
name='BevBoxCoder',
full_name='second.protos.BevBoxCoder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='linear_dim',
full_name='second.protos.BevBoxCoder.linear_dim',
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='encode_angle_vector',
full_name='second.protos.BevBoxCoder.encode_angle_vector',
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='z_fixed',
full_name='second.protos.BevBoxCoder.z_fixed',
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='h_fixed',
full_name='second.protos.BevBoxCoder.h_fixed',
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=259,
serialized_end=355,
)
_BOXCODER.fields_by_name['ground_box3d_coder'].message_type = _GROUNDBOX3DCODER
_BOXCODER.fields_by_name['bev_box_coder'].message_type = _BEVBOXCODER
_BOXCODER.oneofs_by_name['box_coder'].fields.append(
_BOXCODER.fields_by_name['ground_box3d_coder'])
_BOXCODER.fields_by_name[
'ground_box3d_coder'].containing_oneof = _BOXCODER.oneofs_by_name[
'box_coder']
_BOXCODER.oneofs_by_name['box_coder'].fields.append(
_BOXCODER.fields_by_name['bev_box_coder'])
_BOXCODER.fields_by_name[
'bev_box_coder'].containing_oneof = _BOXCODER.oneofs_by_name['box_coder']
DESCRIPTOR.message_types_by_name['BoxCoder'] = _BOXCODER
DESCRIPTOR.message_types_by_name['GroundBox3dCoder'] = _GROUNDBOX3DCODER
DESCRIPTOR.message_types_by_name['BevBoxCoder'] = _BEVBOXCODER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BoxCoder = _reflection.GeneratedProtocolMessageType(
'BoxCoder',
(_message.Message, ),
dict(DESCRIPTOR=_BOXCODER,
__module__='second.protos.box_coder_pb2'
# @@protoc_insertion_point(class_scope:second.protos.BoxCoder)
))
_sym_db.RegisterMessage(BoxCoder)
GroundBox3dCoder = _reflection.GeneratedProtocolMessageType(
'GroundBox3dCoder',
(_message.Message, ),
dict(DESCRIPTOR=_GROUNDBOX3DCODER,
__module__='second.protos.box_coder_pb2'
# @@protoc_insertion_point(class_scope:second.protos.GroundBox3dCoder)
))
_sym_db.RegisterMessage(GroundBox3dCoder)
BevBoxCoder = _reflection.GeneratedProtocolMessageType(
'BevBoxCoder',
(_message.Message, ),
dict(DESCRIPTOR=_BEVBOXCODER,
__module__='second.protos.box_coder_pb2'
# @@protoc_insertion_point(class_scope:second.protos.BevBoxCoder)
))
_sym_db.RegisterMessage(BevBoxCoder)
# @@protoc_insertion_point(module_scope)
| 32.59707 | 616 | 0.627374 |
5adb869006e29bda6ad1ff4cc0663ce6e5c63286 | 2,870 | py | Python | percolation/analysis/percolating_cluster_strength.py | cerisola/fiscomp | 28c2b4cf5e356c67df983ad393011ad6a1e4a654 | [
"MIT"
] | null | null | null | percolation/analysis/percolating_cluster_strength.py | cerisola/fiscomp | 28c2b4cf5e356c67df983ad393011ad6a1e4a654 | [
"MIT"
] | null | null | null | percolation/analysis/percolating_cluster_strength.py | cerisola/fiscomp | 28c2b4cf5e356c67df983ad393011ad6a1e4a654 | [
"MIT"
] | null | null | null | import importlib
import numpy as np
from scipy.stats import linregress
import matplotlib.pyplot as plt
import load_data
import common
import clusters
importlib.reload(load_data)
importlib.reload(common)
importlib.reload(clusters)
def fit_beta_percolating_cluster_strength(size, count, percolated, L, p_occupation):
strength = clusters.percolating_cluster_strength_list(size, percolated, L)
idx_positive = np.where(strength > 0)[0]
idx_min = 0
idx_max = 30
slope, intercept, _, _, std_err = linregress(np.log(p_occupation[idx_positive][idx_min:idx_max]),
np.log(strength[idx_positive][idx_min:idx_max]))
expected = slope*np.log(p_occupation[idx_positive][idx_min:idx_max]) + intercept
ddof = strength[idx_positive][idx_min:idx_max].size - 2
chi2 = np.sum((np.log(strength[idx_positive][idx_min:idx_max]) - expected)**2)/ddof
return slope, intercept, std_err, chi2
def plot_percolating_cluster_strength(size, count, percolated, L, p_occupation):
strength = clusters.percolating_cluster_strength_list(size, percolated, L)
plt.figure()
plt.title('Percolating cluster strength for L = {}'.format(L[0]))
plt.plot(p_occupation, strength, 'o', markersize=2.0)
plt.grid()
plt.xlabel('$p$')
plt.ylabel('$P_{\infty}$')
plt.show()
idx_positive = np.where(strength > 0)[0]
plt.figure()
plt.title('Percolating cluster strength for L = {} (log scale)'.format(L[0]))
plt.loglog(p_occupation[idx_positive], strength[idx_positive], 'o', markersize=2.0)
plt.grid()
plt.xlabel('$p$')
plt.ylabel('$P_{\infty}$')
plt.show()
def plot_beta_fit(size, count, percolated, L, p_occupation):
strength = clusters.percolating_cluster_strength_list(size, percolated, L)
slope, intercept, _, _ = fit_beta_percolating_cluster_strength(size, count, percolated, L, p_occupation)
idx_positive = np.where(strength > 0)
plt.figure()
plt.title('$\\beta$ fit for L = {} [$beta$ = {:.2f}]'.format(L[0], slope))
plt.loglog(p_occupation[idx_positive], strength[idx_positive], 'o', markersize=2.0, label='observations')
plt.loglog(p_occupation[idx_positive], np.exp(intercept)*p_occupation[idx_positive]**slope, '-', label='fit')
plt.grid()
plt.xlabel('$p$')
plt.ylabel('$P_{\infty}$')
plt.ylim((0, 1.1*np.max(strength[idx_positive])))
plt.show()
save_figures = False
if not save_figures:
plt.ion()
L = 512
files_root_prefix = 'print/data/probability_sweep/v5/'
files = load_data.get_cluster_statistics_file_list(files_root_prefix, L=L)
size, count, percolated, L, p_occupation, _ = load_data.load_cluster_statistics_file_list(files)
plot_percolating_cluster_strength(size, count, percolated, L, p_occupation)
plot_beta_fit(size, count, percolated, L, p_occupation)
if not save_figures:
plt.ioff()
| 38.266667 | 113 | 0.710801 |
ade4a623d4ffedc744e8771e52233908cd8bf41b | 501 | py | Python | desafios/iniciante/diferenca.py | monikode/ipc-python | 34570d45658e6943c78815fc71072f7fee0e7a02 | [
"MIT"
] | 9 | 2021-08-31T04:25:51.000Z | 2021-09-16T06:40:30.000Z | desafios/iniciante/diferenca.py | monikode/ipc-python | 34570d45658e6943c78815fc71072f7fee0e7a02 | [
"MIT"
] | 2 | 2021-09-05T20:49:01.000Z | 2021-09-06T23:34:37.000Z | desafios/iniciante/diferenca.py | monikode/ipc-python | 34570d45658e6943c78815fc71072f7fee0e7a02 | [
"MIT"
] | 8 | 2021-08-31T23:55:18.000Z | 2021-09-29T23:33:49.000Z | '''
Leia quatro valores inteiros A, B, C e D. A seguir, calcule e mostre a diferença do produto de A e B pelo produto de C e D segundo a fórmula: DIFERENCA = (A * B - C * D).
Entrada
O arquivo de entrada contém 4 valores inteiros.
Saída
Imprima a mensagem DIFERENCA com todas as letras maiúsculas, conforme exemplo abaixo, com um espaço em branco antes e depois da igualdade.
'''
a = int(input())
b = int(input())
c = int(input())
d = int(input())
dif = (a*b - c*d)
print("DIFERENCA = "+str(dif)) | 27.833333 | 170 | 0.692615 |
fad8d014c1c58132a8d74a6bc09c6b22f969874d | 4,088 | py | Python | rapid7vmconsole/models/resources_policy_override.py | kiblik/vm-console-client-python | 038f6d33e8b2654a558326c6eb87f09ee23e0e22 | [
"MIT"
] | 61 | 2018-05-17T05:57:09.000Z | 2022-03-08T13:59:21.000Z | rapid7vmconsole/models/resources_policy_override.py | kiblik/vm-console-client-python | 038f6d33e8b2654a558326c6eb87f09ee23e0e22 | [
"MIT"
] | 33 | 2018-06-26T16:21:14.000Z | 2022-03-03T20:55:47.000Z | rapid7vmconsole/models/resources_policy_override.py | kiblik/vm-console-client-python | 038f6d33e8b2654a558326c6eb87f09ee23e0e22 | [
"MIT"
] | 43 | 2018-02-24T05:45:53.000Z | 2022-03-31T22:15:16.000Z | # coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ResourcesPolicyOverride(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'links': 'list[Link]',
'resources': 'list[PolicyOverride]'
}
attribute_map = {
'links': 'links',
'resources': 'resources'
}
def __init__(self, links=None, resources=None): # noqa: E501
"""ResourcesPolicyOverride - a model defined in Swagger""" # noqa: E501
self._links = None
self._resources = None
self.discriminator = None
if links is not None:
self.links = links
if resources is not None:
self.resources = resources
@property
def links(self):
"""Gets the links of this ResourcesPolicyOverride. # noqa: E501
Hypermedia links to corresponding or related resources. # noqa: E501
:return: The links of this ResourcesPolicyOverride. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ResourcesPolicyOverride.
Hypermedia links to corresponding or related resources. # noqa: E501
:param links: The links of this ResourcesPolicyOverride. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def resources(self):
"""Gets the resources of this ResourcesPolicyOverride. # noqa: E501
The resources returned. # noqa: E501
:return: The resources of this ResourcesPolicyOverride. # noqa: E501
:rtype: list[PolicyOverride]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this ResourcesPolicyOverride.
The resources returned. # noqa: E501
:param resources: The resources of this ResourcesPolicyOverride. # noqa: E501
:type: list[PolicyOverride]
"""
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourcesPolicyOverride, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourcesPolicyOverride):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.388889 | 86 | 0.578767 |
8c63c5eae509b42d89dc38e55e3f3387c44a3631 | 30,570 | py | Python | cookbook/deployment/cluster/auth_setup.py | scarf-sh/flytesnacks | d873d891869133ad60685fa205937448a2b7178e | [
"Apache-2.0"
] | 1 | 2021-08-20T17:28:42.000Z | 2021-08-20T17:28:42.000Z | cookbook/deployment/cluster/auth_setup.py | jeevb/flytesnacks | f32f32482088d717b399864c5470ae546ebcba7d | [
"Apache-2.0"
] | null | null | null | cookbook/deployment/cluster/auth_setup.py | jeevb/flytesnacks | f32f32482088d717b399864c5470ae546ebcba7d | [
"Apache-2.0"
] | null | null | null | """
Authentication in Flyte
-----------------------
Flyte ships with a canonical implementation of OpenIDConnect client and OAuth2 Server, integrating seamlessly into an organization's existing identity provider.
This section includes:
- :ref:`Overview <auth-overview>`
- :ref:`Authentication Setup <auth-setup>`
- :ref:`Migrating Your Authentication Config <migrating-auth-config>`
- :ref:`References <auth-references>`
.. _auth-overview:
########
Overview
########
Flyte system consists of multiple components. For the purposes of this document, let's categorize them into server-side and client-side components:
- **Admin**: A server-side control plane component accessible from Console, cli and other backends.
- **Catalog**: A server-side control plane component accessible from Console, cli and other backends.
- **Console**: A client-side single page react app.
- **flyte-cli**: A python-based client-side command line interface that interacts with Admin and Catalog.
- **flytectl**: A go-based client-side command line interface that interacts with Admin and Catalog.
- **Propeller**: A server-side data plane component that interacts with both Admin and Catalog services.
**************
OpenID Connect
**************
Flyte supports OpenID Connect. A defacto standard for user authentication. After configuring OpenID Connect, users accessing flyte console or flytectl
(or other 3rd party apps) will be prompted to authenticate using the configured provider.
.. image:: https://mermaid.ink/img/eyJjb2RlIjoic2VxdWVuY2VEaWFncmFtXG4lJXtjb25maWc6IHsgJ2ZvbnRGYW1pbHknOiAnTWVubG8nLCAnZm9udFNpemUnOiAxMCwgJ2ZvbnRXZWlnaHQnOiAxMDB9IH0lJVxuICAgIGF1dG9udW1iZXJcbiAgICBVc2VyLT4-K0Jyb3dzZXI6IC9ob21lXG4gICAgQnJvd3Nlci0-PitDb25zb2xlOiAvaG9tZVxuICAgIENvbnNvbGUtPj4tQnJvd3NlcjogMzAyIC9sb2dpblxuICAgIEJyb3dzZXItPj4rQWRtaW46IC9sb2dpblxuICAgIEFkbWluLT4-LUJyb3dzZXI6IElkcC5jb20vb2lkY1xuICAgIEJyb3dzZXItPj4rSWRwOiBJZHAuY29tL29pZGNcbiAgICBJZHAtPj4tQnJvd3NlcjogMzAyIC9sb2dpblxuICAgIEJyb3dzZXItPj4tVXNlcjogRW50ZXIgdXNlci9wYXNzXG4gICAgVXNlci0-PitCcm93c2VyOiBsb2dpblxuICAgIEJyb3dzZXItPj4rSWRwOiBTdWJtaXQgdXNlcm5hbWUvcGFzc1xuICAgIElkcC0-Pi1Ccm93c2VyOiBhZG1pbi8_YXV0aENvZGU9PGFiYz5cbiAgICBCcm93c2VyLT4-K0FkbWluOiBhZG1pbi9hdXRoQ29kZT08YWJjPlxuICAgIEFkbWluLT4-K0lkcDogRXhjaGFuZ2UgVG9rZW5zXG4gICAgSWRwLT4-LUFkbWluOiBpZHQsIGF0LCBydFxuICAgIEFkbWluLT4-K0Jyb3dzZXI6IFdyaXRlIENvb2tpZXMgJiBSZWRpcmVjdCB0byAvY29uc29sZVxuICAgIEJyb3dzZXItPj4rQ29uc29sZTogL2hvbWVcbiAgICBCcm93c2VyLT4-LVVzZXI6IFJlbmRlciAvaG9tZVxuIiwibWVybWFpZCI6eyJ0aGVtZSI6Im5ldXRyYWwifSwidXBkYXRlRWRpdG9yIjpmYWxzZX0
:target: https://mermaid-js.github.io/mermaid-live-editor/#/edit/eyJjb2RlIjoic2VxdWVuY2VEaWFncmFtXG4lJXtjb25maWc6IHsgJ2ZvbnRGYW1pbHknOiAnTWVubG8nLCAnZm9udFNpemUnOiAxMCwgJ2ZvbnRXZWlnaHQnOiAxMDB9IH0lJVxuICAgIGF1dG9udW1iZXJcbiAgICBVc2VyLT4-K0Jyb3dzZXI6IC9ob21lXG4gICAgQnJvd3Nlci0-PitDb25zb2xlOiAvaG9tZVxuICAgIENvbnNvbGUtPj4tQnJvd3NlcjogMzAyIC9sb2dpblxuICAgIEJyb3dzZXItPj4rQWRtaW46IC9sb2dpblxuICAgIEFkbWluLT4-LUJyb3dzZXI6IElkcC5jb20vb2lkY1xuICAgIEJyb3dzZXItPj4rSWRwOiBJZHAuY29tL29pZGNcbiAgICBJZHAtPj4tQnJvd3NlcjogMzAyIC9sb2dpblxuICAgIEJyb3dzZXItPj4tVXNlcjogRW50ZXIgdXNlci9wYXNzXG4gICAgVXNlci0-PitCcm93c2VyOiBsb2dpblxuICAgIEJyb3dzZXItPj4rSWRwOiBTdWJtaXQgdXNlcm5hbWUvcGFzc1xuICAgIElkcC0-Pi1Ccm93c2VyOiBhZG1pbi8_YXV0aENvZGU9PGFiYz5cbiAgICBCcm93c2VyLT4-K0FkbWluOiBhZG1pbi9hdXRoQ29kZT08YWJjPlxuICAgIEFkbWluLT4-K0lkcDogRXhjaGFuZ2UgVG9rZW5zXG4gICAgSWRwLT4-LUFkbWluOiBpZHQsIGF0LCBydFxuICAgIEFkbWluLT4-K0Jyb3dzZXI6IFdyaXRlIENvb2tpZXMgJiBSZWRpcmVjdCB0byAvY29uc29sZVxuICAgIEJyb3dzZXItPj4rQ29uc29sZTogL2hvbWVcbiAgICBCcm93c2VyLT4-LVVzZXI6IFJlbmRlciAvaG9tZVxuIiwibWVybWFpZCI6eyJ0aGVtZSI6Im5ldXRyYWwifSwidXBkYXRlRWRpdG9yIjpmYWxzZX0
:width: 600
:alt: Flyte UI Swimlane
******
OAuth2
******
Flyte supports OAuth2 to control access to 3rd party and native apps. FlyteAdmin comes with a built in Authorization Server that can perform 3-legged
and 2-legged OAuth2 flows. It also supports delegating these responsibilities to an external Authorization Server.
Service Authentication using OAuth2
===================================
Propeller (and potentially other non-user facing services) can also authenticate using client_credentials to the Idp and be granted an
access_token valid to be used with admin and other backend services.
Using FlyteAdmin's builtin Authorization Server:
.. image:: https://mermaid.ink/img/eyJjb2RlIjoic2VxdWVuY2VEaWFncmFtXG4gICAgUHJvcGVsbGVyLT4-K0FkbWluOiAvdG9rZW4_Y2xpZW50X2NyZWRzJnNjb3BlPWh0dHBzOi8vYWRtaW4vXG4gICAgQWRtaW4tPj4tUHJvcGVsbGVyOiBhY2Nlc3NfdG9rZW5cbiAgICBQcm9wZWxsZXItPj4rQWRtaW46IC9saXN0X3Byb2plY3RzP3Rva2VuPWFjY2Vzc190b2tlbiIsIm1lcm1haWQiOnsidGhlbWUiOiJuZXV0cmFsIn0sInVwZGF0ZUVkaXRvciI6ZmFsc2V9
:target: https://mermaid-js.github.io/mermaid-live-editor/#/edit/eyJjb2RlIjoic2VxdWVuY2VEaWFncmFtXG4gICAgUHJvcGVsbGVyLT4-K0FkbWluOiAvdG9rZW4_Y2xpZW50X2NyZWRzJnNjb3BlPWh0dHBzOi8vYWRtaW4vXG4gICAgQWRtaW4tPj4tUHJvcGVsbGVyOiBhY2Nlc3NfdG9rZW5cbiAgICBQcm9wZWxsZXItPj4rQWRtaW46IC9saXN0X3Byb2plY3RzP3Rva2VuPWFjY2Vzc190b2tlbiIsIm1lcm1haWQiOnsidGhlbWUiOiJuZXV0cmFsIn0sInVwZGF0ZUVkaXRvciI6ZmFsc2V9
:width: 600
:alt: Service Authentication Swimlane
Using an External Authorization Server:
.. image:: https://mermaid.ink/img/eyJjb2RlIjoic2VxdWVuY2VEaWFncmFtXG4gICAgUHJvcGVsbGVyLT4-K0V4dGVybmFsIEF1dGhvcml6YXRpb24gU2VydmVyOiAvdG9rZW4_Y2xpZW50X2NyZWRzJnNjb3BlPWh0dHBzOi8vYWRtaW4vXG4gICAgRXh0ZXJuYWwgQXV0aG9yaXphdGlvbiBTZXJ2ZXItPj4tUHJvcGVsbGVyOiBhY2Nlc3NfdG9rZW5cbiAgICBQcm9wZWxsZXItPj4rQWRtaW46IC9saXN0X3Byb2plY3RzP3Rva2VuPWFjY2Vzc190b2tlbiIsIm1lcm1haWQiOnsidGhlbWUiOiJuZXV0cmFsIn0sInVwZGF0ZUVkaXRvciI6ZmFsc2V9
:target: https://mermaid-js.github.io/mermaid-live-editor/#/edit/eyJjb2RlIjoic2VxdWVuY2VEaWFncmFtXG4gICAgUHJvcGVsbGVyLT4-K0V4dGVybmFsIEF1dGhvcml6YXRpb24gU2VydmVyOiAvdG9rZW4_Y2xpZW50X2NyZWRzJnNjb3BlPWh0dHBzOi8vYWRtaW4vXG4gICAgRXh0ZXJuYWwgQXV0aG9yaXphdGlvbiBTZXJ2ZXItPj4tUHJvcGVsbGVyOiBhY2Nlc3NfdG9rZW5cbiAgICBQcm9wZWxsZXItPj4rQWRtaW46IC9saXN0X3Byb2plY3RzP3Rva2VuPWFjY2Vzc190b2tlbiIsIm1lcm1haWQiOnsidGhlbWUiOiJuZXV0cmFsIn0sInVwZGF0ZUVkaXRvciI6ZmFsc2V9
:width: 600
:alt: Service Authentication Swimlane
User Authentication in other clients (e.g. Cli) using OAuth2-Pkce
==================================================================
Users accessing backend services through Cli should be able to use OAuth2-Pkce flow to authenticate (in a browser) to the Idp and be issued
an access_token valid to communicate with the intended backend service on behalf of the user.
Using FlyteAdmin's builtin Authorization Server:
.. image:: https://mermaid.ink/img/eyJjb2RlIjoic2VxdWVuY2VEaWFncmFtXG4lJXtjb25maWc6IHsgJ2ZvbnRGYW1pbHknOiAnTWVubG8nLCAnZm9udFNpemUnOiAxMCwgJ2ZvbnRXZWlnaHQnOiAxMDB9IH0lJVxuICAgIGF1dG9udW1iZXJcbiAgICBVc2VyLT4-K0NsaTogZmx5dGVjdGwgbGlzdC1wcm9qZWN0c1xuICAgIENsaS0-PitBZG1pbjogYWRtaW4vY2xpZW50LWNvbmZpZ1xuICAgIEFkbWluLT4-LUNsaTogQ2xpZW50X2lkPTxhYmM-LCAuLi5cbiAgICBDbGktPj4rQnJvd3NlcjogL29hdXRoMi9hdXRob3JpemU_cGtjZSZjb2RlX2NoYWxsZW5nZSxjbGllbnRfaWQsc2NvcGVcbiAgICBCcm93c2VyLT4-K0FkbWluOiAvb2F1dGgyL2F1dGhvcml6ZT9wa2NlLi4uXG4gICAgQWRtaW4tPj4tQnJvd3NlcjogMzAyIGlkcC5jb20vbG9naW5cbiAgICBOb3RlIG92ZXIgQnJvd3NlcixBZG1pbjogVGhlIHByaW9yIE9wZW5JRCBDb25uZWN0IGZsb3dcbiAgICBCcm93c2VyLT4-K0FkbWluOiBhZG1pbi9sb2dnZWRfaW5cbiAgICBOb3RlIG92ZXIgQnJvd3NlcixBZG1pbjogUG90ZW50aWFsbHkgc2hvdyBjdXN0b20gY29uc2VudCBzY3JlZW5cbiAgICBBZG1pbi0-Pi1Ccm93c2VyOiBsb2NhbGhvc3QvP2F1dGhDb2RlPTxhYmM-XG4gICAgQnJvd3Nlci0-PitDbGk6IGxvY2FsaG9zdC9hdXRoQ29kZT08YWJjPlxuICAgIENsaS0-PitBZG1pbjogL3Rva2VuP2NvZGUsY29kZV92ZXJpZmllclxuICAgIEFkbWluLT4-LUNsaTogYWNjZXNzX3Rva2VuXG4gICAgQ2xpLT4-K0FkbWluOiAvcHJvamVjdHMvICsgYWNjZXNzX3Rva2VuXG4gICAgQWRtaW4tPj4tQ2xpOiBwcm9qZWN0MSwgcHJvamVjdDJcbiIsIm1lcm1haWQiOnsidGhlbWUiOiJuZXV0cmFsIn0sInVwZGF0ZUVkaXRvciI6ZmFsc2V9
:target: https://mermaid-js.github.io/mermaid-live-editor/#/edit/eyJjb2RlIjoic2VxdWVuY2VEaWFncmFtXG4lJXtjb25maWc6IHsgJ2ZvbnRGYW1pbHknOiAnTWVubG8nLCAnZm9udFNpemUnOiAxMCwgJ2ZvbnRXZWlnaHQnOiAxMDB9IH0lJVxuICAgIGF1dG9udW1iZXJcbiAgICBVc2VyLT4-K0NsaTogZmx5dGVjdGwgbGlzdC1wcm9qZWN0c1xuICAgIENsaS0-PitBZG1pbjogYWRtaW4vY2xpZW50LWNvbmZpZ1xuICAgIEFkbWluLT4-LUNsaTogQ2xpZW50X2lkPTxhYmM-LCAuLi5cbiAgICBDbGktPj4rQnJvd3NlcjogL29hdXRoMi9hdXRob3JpemU_cGtjZSZjb2RlX2NoYWxsZW5nZSxjbGllbnRfaWQsc2NvcGVcbiAgICBCcm93c2VyLT4-K0FkbWluOiAvb2F1dGgyL2F1dGhvcml6ZT9wa2NlLi4uXG4gICAgQWRtaW4tPj4tQnJvd3NlcjogMzAyIGlkcC5jb20vbG9naW5cbiAgICBOb3RlIG92ZXIgQnJvd3NlcixBZG1pbjogVGhlIHByaW9yIE9wZW5JRCBDb25uZWN0IGZsb3dcbiAgICBCcm93c2VyLT4-K0FkbWluOiBhZG1pbi9sb2dnZWRfaW5cbiAgICBOb3RlIG92ZXIgQnJvd3NlcixBZG1pbjogUG90ZW50aWFsbHkgc2hvdyBjdXN0b20gY29uc2VudCBzY3JlZW5cbiAgICBBZG1pbi0-Pi1Ccm93c2VyOiBsb2NhbGhvc3QvP2F1dGhDb2RlPTxhYmM-XG4gICAgQnJvd3Nlci0-PitDbGk6IGxvY2FsaG9zdC9hdXRoQ29kZT08YWJjPlxuICAgIENsaS0-PitBZG1pbjogL3Rva2VuP2NvZGUsY29kZV92ZXJpZmllclxuICAgIEFkbWluLT4-LUNsaTogYWNjZXNzX3Rva2VuXG4gICAgQ2xpLT4-K0FkbWluOiAvcHJvamVjdHMvICsgYWNjZXNzX3Rva2VuXG4gICAgQWRtaW4tPj4tQ2xpOiBwcm9qZWN0MSwgcHJvamVjdDJcbiIsIm1lcm1haWQiOnsidGhlbWUiOiJuZXV0cmFsIn0sInVwZGF0ZUVkaXRvciI6ZmFsc2V9
:width: 600
:alt: CLI Authentication with Admin's own Authorization Server
Using an External Authorization Server:
.. image:: https://mermaid.ink/img/eyJjb2RlIjoic2VxdWVuY2VEaWFncmFtXG4lJXtjb25maWc6IHsgJ2ZvbnRGYW1pbHknOiAnTWVubG8nLCAnZm9udFNpemUnOiAxMCwgJ2ZvbnRXZWlnaHQnOiAxMDB9IH0lJVxuICAgIGF1dG9udW1iZXJcbiAgICBVc2VyLT4-K0NsaTogZmx5dGVjdGwgbGlzdC1wcm9qZWN0c1xuICAgIENsaS0-PitBZG1pbjogYWRtaW4vY2xpZW50LWNvbmZpZ1xuICAgIEFkbWluLT4-LUNsaTogQ2xpZW50X2lkPTxhYmM-LCAuLi5cbiAgICBDbGktPj4rQnJvd3NlcjogL29hdXRoMi9hdXRob3JpemU_cGtjZSZjb2RlX2NoYWxsZW5nZSxjbGllbnRfaWQsc2NvcGVcbiAgICBCcm93c2VyLT4-K0V4dGVybmFsSWRwOiAvb2F1dGgyL2F1dGhvcml6ZT9wa2NlLi4uXG4gICAgRXh0ZXJuYWxJZHAtPj4tQnJvd3NlcjogMzAyIGlkcC5jb20vbG9naW5cbiAgICBOb3RlIG92ZXIgQnJvd3NlcixFeHRlcm5hbElkcDogVGhlIHByaW9yIE9wZW5JRCBDb25uZWN0IGZsb3dcbiAgICBCcm93c2VyLT4-K0V4dGVybmFsSWRwOiAvbG9nZ2VkX2luXG4gICAgTm90ZSBvdmVyIEJyb3dzZXIsRXh0ZXJuYWxJZHA6IFBvdGVudGlhbGx5IHNob3cgY3VzdG9tIGNvbnNlbnQgc2NyZWVuXG4gICAgRXh0ZXJuYWxJZHAtPj4tQnJvd3NlcjogbG9jYWxob3N0Lz9hdXRoQ29kZT08YWJjPlxuICAgIEJyb3dzZXItPj4rQ2xpOiBsb2NhbGhvc3QvYXV0aENvZGU9PGFiYz5cbiAgICBDbGktPj4rRXh0ZXJuYWxJZHA6IC90b2tlbj9jb2RlLGNvZGVfdmVyaWZpZXJcbiAgICBFeHRlcm5hbElkcC0-Pi1DbGk6IGFjY2Vzc190b2tlblxuICAgIENsaS0-PitBZG1pbjogL3Byb2plY3RzLyArIGFjY2Vzc190b2tlblxuICAgIEFkbWluLT4-LUNsaTogcHJvamVjdDEsIHByb2plY3QyXG4iLCJtZXJtYWlkIjp7InRoZW1lIjoibmV1dHJhbCJ9LCJ1cGRhdGVFZGl0b3IiOmZhbHNlfQ
:target: https://mermaid-js.github.io/mermaid-live-editor/#/edit/eyJjb2RlIjoic2VxdWVuY2VEaWFncmFtXG4lJXtjb25maWc6IHsgJ2ZvbnRGYW1pbHknOiAnTWVubG8nLCAnZm9udFNpemUnOiAxMCwgJ2ZvbnRXZWlnaHQnOiAxMDB9IH0lJVxuICAgIGF1dG9udW1iZXJcbiAgICBVc2VyLT4-K0NsaTogZmx5dGVjdGwgbGlzdC1wcm9qZWN0c1xuICAgIENsaS0-PitBZG1pbjogYWRtaW4vY2xpZW50LWNvbmZpZ1xuICAgIEFkbWluLT4-LUNsaTogQ2xpZW50X2lkPTxhYmM-LCAuLi5cbiAgICBDbGktPj4rQnJvd3NlcjogL29hdXRoMi9hdXRob3JpemU_cGtjZSZjb2RlX2NoYWxsZW5nZSxjbGllbnRfaWQsc2NvcGVcbiAgICBCcm93c2VyLT4-K0V4dGVybmFsSWRwOiAvb2F1dGgyL2F1dGhvcml6ZT9wa2NlLi4uXG4gICAgRXh0ZXJuYWxJZHAtPj4tQnJvd3NlcjogMzAyIGlkcC5jb20vbG9naW5cbiAgICBOb3RlIG92ZXIgQnJvd3NlcixFeHRlcm5hbElkcDogVGhlIHByaW9yIE9wZW5JRCBDb25uZWN0IGZsb3dcbiAgICBCcm93c2VyLT4-K0V4dGVybmFsSWRwOiAvbG9nZ2VkX2luXG4gICAgTm90ZSBvdmVyIEJyb3dzZXIsRXh0ZXJuYWxJZHA6IFBvdGVudGlhbGx5IHNob3cgY3VzdG9tIGNvbnNlbnQgc2NyZWVuXG4gICAgRXh0ZXJuYWxJZHAtPj4tQnJvd3NlcjogbG9jYWxob3N0Lz9hdXRoQ29kZT08YWJjPlxuICAgIEJyb3dzZXItPj4rQ2xpOiBsb2NhbGhvc3QvYXV0aENvZGU9PGFiYz5cbiAgICBDbGktPj4rRXh0ZXJuYWxJZHA6IC90b2tlbj9jb2RlLGNvZGVfdmVyaWZpZXJcbiAgICBFeHRlcm5hbElkcC0-Pi1DbGk6IGFjY2Vzc190b2tlblxuICAgIENsaS0-PitBZG1pbjogL3Byb2plY3RzLyArIGFjY2Vzc190b2tlblxuICAgIEFkbWluLT4-LUNsaTogcHJvamVjdDEsIHByb2plY3QyXG4iLCJtZXJtYWlkIjp7InRoZW1lIjoibmV1dHJhbCJ9LCJ1cGRhdGVFZGl0b3IiOmZhbHNlfQ
:width: 600
:alt: CLI Authentication with an external Authorization Server
Identity Providers Support
==========================
+-----------------+--------+-------------+---------------------+----------+-------+----------+--------+
| Feature | Okta | Google free | GC Identity Service | Azure AD | Auth0 | KeyCloak | Github |
+=================+========+=============+=====================+==========+=======+==========+========+
| OpenIdConnect | Yes | Yes | Yes | Yes | Yes | Yes | No |
+-----------------+--------+-------------+---------------------+----------+-------+----------+--------+
| Custom RP | Yes | No | Yes | Yes | ? | Yes | No |
+-----------------+--------+-------------+---------------------+----------+-------+----------+--------+
.. _auth-setup:
####################
Authentication Setup
####################
*****************
IdP Configuration
*****************
Flyte Admin requires that the application in your identity provider be configured as a web client (i.e. with a client secret). We recommend allowing the application to be issued a refresh token to avoid interrupting the user's flow by frequently redirecting to the IdP.
*************************
Flyte Admin Configuration
*************************
Please refer to the `inline documentation <https://github.com/flyteorg/flyteadmin/blob/eaca2fb0e6018a2e261e9e2da8998906477cadb5/pkg/auth/config/config.go>`_ on the ``Config`` object in the ``auth`` package for a discussion on the settings required.
**********************
Example Configurations
**********************
Below are some canonical examples of how to set up some of the common IdPs to secure your Fyte services. OpenID Connect enables users to authenticate, in the
browser, with an existing IdP. Flyte also allows connecting to an external OAuth2 Authorization Server to allow centrally managed third party app access.
OpenID Connect
===============
OpenID Connect allows users to authenticate to Flyte in their browser using a familiar authentication provider (perhaps an organization-wide configured IdP).
Flyte supports connecting with external OIdC providers. Here are some examples for how to set these up:
Google OpenID Connect
=====================
Follow `Google Docs <https://developers.google.com/identity/protocols/oauth2/openid-connect>`__ on how to configure the IdP for OpenIDConnect.
.. note::
Make sure to create an OAuth2 Client Credential. The `client_id` and `client_secret` will be needed in the following
steps.
Okta OpenID Connect
===================
Okta supports OpenID Connect protocol and the creation of custom OAuth2 Authorization Servers, allowing it to act as both the user and apps IdP.
It offers more detailed control on access policies, user consent, and app management.
1. If you don't already have an Okta account, sign up for one `here <https://developer.okta.com/signup/>`__.
2. Create an app (choose Web for the platform) and OpenID Connect for the sign-on method.
3. Add Login redirect URIs (e.g. http://localhost:30081/callback for sandbox or ``https://<your deployment url>/callback``)
4. *Optional*: Add logout redirect URIs (e.g. http://localhost:30081/logout for sandbox)
5. Write down the Client ID and Client Secret
KeyCloak OpenID Connect
=======================
`KeyCloak <https://www.keycloak.org/>`__ is an open source solution for authentication, it supports both OpenID Connect and OAuth2 protocols (among others).
KeyCloak can be configured to be both the OpenID Connect and OAuth2 Authorization Server provider for Flyte.
1. Store the `client_secret` in a k8s secret as follows:
.. prompt:: bash
kubectl edit secret -n flyte flyte-admin-auth
Add a new key under `stringData`:
.. code-block:: yaml
stringData:
oidc_client_secret: <client_secret> from the previous step
data:
...
Save and close your editor.
2. Edit FlyteAdmin config to add `client_id` and configure auth as follows:
.. prompt:: bash
kubectl get deploy -n flyte flyteadmin -o yaml | grep "name: flyte-admin-config"
This will output the name of the config map where the `client_id` needs to go.
.. prompt:: bash
kubectl edit configmap -n flyte <the name of the config map from previous command>
Follow the inline comments to make the necessary changes:
.. code-block:: yaml
server:
...
security:
secure: false
# 1. Enable Auth by turning useAuth to true
useAuth: true
...
auth:
userAuth:
openId:
# 2. Put the URL of the OpenID Connect provider.
# baseUrl: https://accounts.google.com # Uncomment for Google
baseUrl: https://dev-14186422.okta.com/oauth2/default # Okta with a custom Authorization Server
scopes:
- profile
- openid
# - offline_access # Uncomment if OIdC supports issuing refresh tokens.
# 3. Replace with the client ID created for Flyte.
clientId: 0oakkheteNjCMERst5d6
Save and exit your editor.
3. Restart `flyteadmin` for the changes to take effect:
.. prompt:: bash
kubectl rollout restart deployment/flyteadmin -n flyte
***************************
OAuth2 Authorization Server
***************************
An OAuth2 Authorization Server allows external clients to request to authenticate and act on behalf of users (or as their own identities). Having
an OAuth2 Authorization Server enables Flyte administrators control over which apps can be installed and what scopes they are allowed to request or be granted (i.e. what privileges can they assume).
Flyte comes with a built-in authorization server that can be statically configured with a set of clients to request and act on behalf of the user.
The default clients are defined `here <https://github.com/flyteorg/flyteadmin/pull/168/files#diff-1267ff8bd9146e1c0ff22a9e9d53cfc56d71c1d47fed9905f95ed4bddf930f8eR74-R100>`__
and the corresponding section can be modified through configs.
To set up an external OAuth2 Authorization Server, please follow the instructions below:
Okta IdP
========
1. Under security -> API, click `Add Authorization Server`. Set the audience to the public URL of flyte admin (e.g. https://flyte.mycompany.io/).
2. Under `Access Policies`, click `Add New Access Policy` and walk through the wizard to allow access to the authorization server.
3. Under `Scopes`, click `Add Scope`. Set the name to `all` (required) and check `Require user consent for this scope` (recommended).
4. Create 2 apps (for fltyectl and flytepropeller) to enable these clients to communicate with the service.
Flytectl should be created as a `native client`.
FlytePropeller should be created as an `OAuth Service` and note the client ID and client Secrets provided.
KeyCloak IdP
============
`KeyCloak <https://www.keycloak.org/>`__ is an open source solution for authentication, it supports both OpenID Connect and OAuth2 protocols (among others).
KeyCloak can be configured to be both the OpenID Connect and OAuth2 Authorization Server provider for flyte.
Apply Configuration
===================
1. It is possible to direct Flyte admin to use an external authorization server. To do so, edit the same config map once more and follow these changes:
.. code-block:: yaml
auth:
appAuth:
# 1. Choose External if you will use an external Authorization Server (e.g. a Custom Authorization server in Okta)
# Choose Self (or omit the value) to use Flyte Admin's internal (albeit limited) Authorization Server.
authServerType: External
# 2. Optional: Set external auth server baseUrl if different from OpenId baseUrl.
externalAuthServer:
baseUrl: https://dev-14186422.okta.com/oauth2/auskngnn7uBViQq6b5d6
thirdPartyConfig:
flyteClient:
# 3. Replace with a new Native Client ID provisioned in the custom authorization server
clientId: flytectl
redirectUri: https://localhost:53593/callback
# 4. "all" is a required scope and must be configured in the custom authorization server
scopes:
- offline
- all
userAuth:
openId:
baseUrl: https://dev-14186422.okta.com/oauth2/auskngnn7uBViQq6b5d6 # Okta with a custom Authorization Server
scopes:
- profile
- openid
# - offline_access # Uncomment if OIdC supports issuing refresh tokens.
clientId: 0oakkheteNjCMERst5d6
1. Store flyte propeller's `client_secret` in a k8s secret as follows:
.. prompt:: bash
kubectl edit secret -n flyte flyte-propeller-auth
Add a new key under `stringData`:
.. code-block:: yaml
stringData:
client_secret: <client_secret> from the previous step
data:
...
Save and close your editor.
2. Edit FlytePropeller config to add `client_id` and configure auth as follows:
.. prompt:: bash
kubectl get deploy -n flyte flytepropeller -o yaml | grep "name: flyte-propeller-config"
This will output the name of the config map where the `client_id` needs to go.
.. prompt:: bash
kubectl edit configmap -n flyte <the name of the config map from previous command>
Follow the inline comments to make the necessary changes:
.. code-block:: yaml
admin:
# 1. Replace with the client_id provided by the OAuth2 Authorization Server above.
clientId: flytepropeller
Close the editor
3. Restart `flytepropeller` for the changes to take effect:
.. prompt:: bash
kubectl rollout restart deployment/flytepropeller -n flyte
***************************
Continuous Integration - CI
***************************
If your organization does any automated registration, then you'll need to authenticate with the `basic authentication <https://tools.ietf.org/html/rfc2617>`_ flow (username and password effectively). After retrieving an access token from the IDP, you can send it along to Flyte Admin as usual.
Flytekit configuration variables are automatically designed to look up values from relevant environment variables. However, to aid with continuous integration use-cases, Flytekit configuration can also reference other environment variables.
For instance, if your CI system is not capable of setting custom environment variables like ``FLYTE_CREDENTIALS_CLIENT_SECRET`` but does set the necessary settings under a different variable, you may use ``export FLYTE_CREDENTIALS_CLIENT_SECRET_FROM_ENV_VAR=OTHER_ENV_VARIABLE`` to redirect the lookup. A ``FLYTE_CREDENTIALS_CLIENT_SECRET_FROM_FILE`` redirect is available as well, where the value should be the full path to the file containing the value for the configuration setting, in this case, the client secret. We found this redirect behavior necessary when setting up registration within our own CI pipelines.
The following is a listing of the Flytekit configuration values we set in CI, along with a brief explanation.
* ``FLYTE_CREDENTIALS_CLIENT_ID`` and ``FLYTE_CREDENTIALS_CLIENT_SECRET``
When using basic authentication, this is the username and password.
* ``export FLYTE_CREDENTIALS_AUTH_MODE=basic``
This tells the SDK to use basic authentication. If not set, Flytekit will assume you want to use the standard OAuth based three-legged flow.
* ``export FLYTE_CREDENTIALS_AUTHORIZATION_METADATA_KEY=text``
At Lyft, the value is set to conform to this `header config <https://github.com/flyteorg/flyteadmin/blob/eaca2fb0e6018a2e261e9e2da8998906477cadb5/pkg/auth/config/config.go#L53>`_ on the Admin side.
* ``export FLYTE_CREDENTIALS_SCOPE=text``
When using basic authentication, you'll need to specify a scope to the IDP (instead of ``openid``, which is only for OAuth). Set that here.
* ``export FLYTE_PLATFORM_AUTH=True``
Set this to force Flytekit to use authentication, even if not required by Admin. This is useful as you're rolling out the requirement.
.. _migrating-auth-config:
####################################
Migrating Your Authentication Config
####################################
Using Okta as an example, you would have previously seen something like the following:
On the Okta side:
=================
* An Application (OpenID Connect Web) for Flyte Admin itself (e.g. **0oal5rch46pVhCGF45d6**).
* An Application (OpenID Native app) for Flyte-cli/flytectl (e.g. **0oal62nxuD6OSFSRq5d6**).
These two applications would be assigned to the relevant users.
* An Application (Web) for Flyte Propeller (e.g. **0abc5rch46pVhCGF9876**).
This application would either use the default Authorization server, or you would create a new one.
On the Admin side:
==================
.. code-block:: yaml
server:
# ... other settings
security:
secure: false
useAuth: true
allowCors: true
allowedOrigins:
- "*"
allowedHeaders:
- "Content-Type"
oauth:
baseUrl: https://dev-62129345.okta.com/oauth2/default/
scopes:
- profile
- openid
- email
claims:
iss: https://dev-62129345.okta.com/oauth2/default
aud: 0oal5rch46pVhCGF45d6
clientId: 0oal5rch46pVhCGF45d6
clientSecretFile: "/Users/ytong/etc/secrets/oauth/secret"
authorizeUrl: "https://dev-62129345.okta.com/oauth2/default/v1/authorize"
tokenUrl: "https://dev-62129345.okta.com/oauth2/default/v1/token"
callbackUrl: "http://localhost:8088/callback"
cookieHashKeyFile: "/Users/ytong/etc/secrets/hashkey/hashkey"
cookieBlockKeyFile: "/Users/ytong/etc/secrets/blockkey/blockkey"
redirectUrl: "/api/v1/projects"
thirdPartyConfig:
flyteClient:
clientId: 0oal62nxuD6OSFSRq5d6
redirectUri: http://localhost:12345/callback
From the Flyte-cli side, these two settings were needed:
.. code-block:: bash
FLYTE_PLATFORM_HTTP_URL=http://localhost:8088 FLYTE_CREDENTIALS_CLIENT_ID=0oal62nxuD6OSFSRq5d6 flyte-cli ...
**FLYTE_PLATFORM_HTTP_URL** is used because **flyte-cli** uses only gRPC to communicate with Admin. It needs to know the HTTP port (which Admin hosts on a different port because of limitations of the
grpc-gateway library). **flyte-cli** uses this setting to talk to **/.well-known/oauth-authorization-server** to retrieve information regarding the auth endpoints. Previously this redirected to the
Okta Authorization Server's metadata endpoint. With this change, Admin now hosts its own (even if still using the external Authorization Service).
After version `0.13.0 <https://github.com/flyteorg/flyte/tree/v0.13.0>`__ of the platform, you can still use the IdP as the Authorization Server if you so choose. That configuration would now become:
.. code-block:: yaml
server:
# ... other settings
security:
secure: false
useAuth: true
allowCors: true
allowedOrigins:
- "*"
allowedHeaders:
- "Content-Type"
auth:
authorizedUris:
# This should point at your public http Uri.
- https://flyte.mycompany.com
# This will be used by internal services in the same namespace as flyteadmin
- http://flyteadmin:80
# This will be used by internal services in the same cluster but different namespaces
- http://flyteadmin.flyte.svc.cluster.local:80
userAuth:
openId:
# Put the URL of the OpenID Connect provider.
baseUrl: https://dev-62129345.okta.com/oauth2/default # Okta with a custom Authorization Server
scopes:
- profile
- openid
- offline_access # Uncomment if OIdC supports issuing refresh tokens.
# Replace with the client id created for Flyte.
clientId: 0oal5rch46pVhCGF45d6
appAuth:
# External delegates app auth responsibilities to an external authorization server, Internal means Flyte Admin does it itself
authServerType: External
thirdPartyConfig:
flyteClient:
clientId: 0oal62nxuD6OSFSRq5d6
redirectUri: http://localhost:12345/callback
scopes:
- all
- offline
Specifically,
* The original **oauth** section has been moved two levels higher into its own section and renamed **auth** but enabling/disabling of authentication remains in the old location.
* Secrets by default will now be looked up in **/etc/secrets**. Use the following command to generate them:
.. code-block:: bash
flyteadmin secrets init -p /etc/secrets
This will generate the new cookie hash/block keys, as well as other secrets Admin needs to run the Authorization server.
* The **clientSecretFile** has been moved to **/etc/secrets/oidc_client_secret** so move that there.
* **claims** has been removed, just delete that.
* **authorizeUrl** and **tokenUrl** are no longer necessary.
* The **baseUrl** for the external Authorization Server is now in the **appAuth** section.
* The **thirdPartyConfig** has been moved to **appAuth** as well.
* **redirectUrl** has been defaulted to **/console**. If that's the value you want, then you no longer need this setting.
From Propeller side, you might have a configuration section that looks like this:
.. code-block:: yaml
admin:
endpoint: dns:///mycompany.domain.com
useAuth: true
clientId: flytepropeller
clientSecretLocation: /etc/secrets/client_secret
tokenUrl: https://demo.nuclyde.io/oauth2/token
scopes:
- all
This can now be simplified to:
.. code-block:: yaml
admin:
endpoint: dns:///mycompany.domain.com
# If you are using the built-in authorization server, you can delete the following two lines:
clientId: flytepropeller
clientSecretLocation: /etc/secrets/client_secret
Specifically,
* **useAuth** is deprecated and will be removed in a future version. Auth requirement will be discovered through an anonymous admin discovery call.
* **tokenUrl** and **scopes** will also be discovered through a metadata call.
* **clientId** and **clientSecretLocation** have defaults that work out of the box with the built-in authorization server (e.g. if you setup Google OpenID Connect).
.. _auth-references:
##########
References
##########
This collection of RFCs may be helpful to those who wish to investigate the implementation in more depth.
* `OAuth2 RFC 6749 <https://tools.ietf.org/html/rfc6749>`_
* `OAuth Discovery RFC 8414 <https://tools.ietf.org/html/rfc8414>`_
* `PKCE RFC 7636 <https://tools.ietf.org/html/rfc7636>`_
* `JWT RFC 7519 <https://tools.ietf.org/html/rfc7519>`_
"""
| 58.901734 | 1,298 | 0.770396 |
832ce2a6fdd9b1253e118982e0407dbebb7d9657 | 2,926 | py | Python | stellar_sdk/xdr/stellar_value_ext.py | MartinThoma/py-stellar-base | 07ab28cde7a7040f2262b224f9af8a3416c0e5ab | [
"Apache-2.0"
] | 1 | 2021-07-06T01:34:08.000Z | 2021-07-06T01:34:08.000Z | stellar_sdk/xdr/stellar_value_ext.py | MartinThoma/py-stellar-base | 07ab28cde7a7040f2262b224f9af8a3416c0e5ab | [
"Apache-2.0"
] | 36 | 2021-08-23T17:31:52.000Z | 2022-03-28T01:39:00.000Z | stellar_sdk/xdr/stellar_value_ext.py | MartinThoma/py-stellar-base | 07ab28cde7a7040f2262b224f9af8a3416c0e5ab | [
"Apache-2.0"
] | 1 | 2021-07-06T01:33:40.000Z | 2021-07-06T01:33:40.000Z | # This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from .ledger_close_value_signature import LedgerCloseValueSignature
from .stellar_value_type import StellarValueType
from ..exceptions import ValueError
__all__ = ["StellarValueExt"]
class StellarValueExt:
"""
XDR Source Code
----------------------------------------------------------------
union switch (StellarValueType v)
{
case STELLAR_VALUE_BASIC:
void;
case STELLAR_VALUE_SIGNED:
LedgerCloseValueSignature lcValueSignature;
}
----------------------------------------------------------------
"""
def __init__(
self,
v: StellarValueType,
lc_value_signature: LedgerCloseValueSignature = None,
) -> None:
self.v = v
self.lc_value_signature = lc_value_signature
def pack(self, packer: Packer) -> None:
self.v.pack(packer)
if self.v == StellarValueType.STELLAR_VALUE_BASIC:
return
if self.v == StellarValueType.STELLAR_VALUE_SIGNED:
if self.lc_value_signature is None:
raise ValueError("lc_value_signature should not be None.")
self.lc_value_signature.pack(packer)
return
@classmethod
def unpack(cls, unpacker: Unpacker) -> "StellarValueExt":
v = StellarValueType.unpack(unpacker)
if v == StellarValueType.STELLAR_VALUE_BASIC:
return cls(v)
if v == StellarValueType.STELLAR_VALUE_SIGNED:
lc_value_signature = LedgerCloseValueSignature.unpack(unpacker)
if lc_value_signature is None:
raise ValueError("lc_value_signature should not be None.")
return cls(v, lc_value_signature=lc_value_signature)
return cls(v)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "StellarValueExt":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "StellarValueExt":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return self.v == other.v and self.lc_value_signature == other.lc_value_signature
def __str__(self):
out = []
out.append(f"v={self.v}")
out.append(
f"lc_value_signature={self.lc_value_signature}"
) if self.lc_value_signature is not None else None
return f"<StellarValueExt {[', '.join(out)]}>"
| 33.25 | 88 | 0.619959 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.