id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
181226
|
from unittest import mock
import pytest
from django.contrib.auth import authenticate
from core import cms_slugs
from directory_sso_api_client import sso_api_client
from sso import models
from tests.helpers import reload_urlconf
@pytest.fixture
def sso_request(rf, settings, client):
request = rf.get('/')
request.COOKIES[settings.SSO_SESSION_COOKIE] = '123'
request.session = client.session
return request
@mock.patch.object(sso_api_client.user, 'get_session_user', wraps=sso_api_client.user.get_session_user)
def test_auth_ok(mock_get_session_user, sso_request, requests_mock, settings):
settings.AUTHENTICATION_BACKENDS = ['sso.backends.BusinessSSOUserBackend']
requests_mock.get(
'http://sso.trade.great:8003/api/v1/session-user/',
json={
'id': 1,
'email': '<EMAIL>',
'hashed_uuid': 'thing',
'user_profile': {
'first_name': 'Jim',
'last_name': 'Bloggs',
'job_title': 'Dev',
'mobile_phone_number': '555',
'profile_image': 'htts://image.com/image.png',
},
},
)
user = authenticate(sso_request)
assert isinstance(user, models.BusinessSSOUser)
assert user.pk == 1
assert user.id == 1
assert user.email == '<EMAIL>'
assert user.hashed_uuid == 'thing'
assert user.has_user_profile is True
assert user.first_name == 'Jim'
assert user.last_name == 'Bloggs'
assert user.job_title == 'Dev'
assert user.mobile_phone_number == '555'
assert user.profile_image == 'htts://image.com/image.png'
@pytest.mark.django_db
@mock.patch('authbroker_client.backends.AuthbrokerBackend.authenticate')
@mock.patch('directory_sso_api_client.backends.SSOUserBackend.authenticate')
@pytest.mark.parametrize(
'url,expected_staff_call_count,expected_business_call_count',
(
('/django-admin/', 1, 0),
('/admin/', 1, 0),
(cms_slugs.DASHBOARD_URL, 0, 1),
),
)
def test_sso_backends_admin_url_handling(
mock_business_auth, mock_staff_auth, url, expected_staff_call_count, expected_business_call_count, settings, rf
):
settings.FEATURE_ENFORCE_STAFF_SSO_ENABLED = True
settings.AUTHENTICATION_BACKENDS = settings.AUTHENTICATION_BACKENDS + ['sso.backends.StaffSSOUserBackend']
reload_urlconf()
request = rf.get(url)
authenticate(request)
assert mock_business_auth.call_count == expected_business_call_count
assert mock_staff_auth.call_count == expected_staff_call_count
|
181242
|
import numpy as np
from typing import Union
def cleanup_delete(coords_list_in: np.ndarray,
eps_grid: float = 1e-4,
cyclic_points: bool = True,
check_inline: bool = True,
) -> np.ndarray:
"""
From the passed coordinate list, returns a numpy array of bools of the same length where each value indicates
whether that point should be deleted from the coord_list.
Points that should be removed are either adjacent points that are the same, or points that are in a line.
Parameters
----------
coords_list_in : np.ndarray
The list of x-y coordinates composing a polygon shape
eps_grid :
grid resolution below which points are considered to be the same
cyclic_points : bool
True if the coords_list forms a closed polygon. If True, the start/end points might be removed.
False if the coords_list is not a closed polygon (ie, a path). If False, the start and end points will never be
removed.
check_inline : bool
True [default] to check for and remove center points that are in a line with their two adjacent neighbors.
False to skip this check
Returns
-------
delete_array : np.ndarray
Numpy array of bools telling whether to delete the coordinate or not
"""
coord_set_next = np.roll(coords_list_in, -1, axis=0)
coord_set_prev = np.roll(coords_list_in, 1, axis=0)
vec_to_next = coord_set_next - coords_list_in
vec_from_prev = coords_list_in - coord_set_prev
dx_next = vec_to_next[:, 0]
dy_next = vec_to_next[:, 1]
dx_prev = vec_from_prev[:, 0]
dy_prev = vec_from_prev[:, 1]
dx_next_abs = np.abs(dx_next)
dy_next_abs = np.abs(dy_next)
dx_prev_abs = np.abs(dx_prev)
dy_prev_abs = np.abs(dy_prev)
same_as_next = np.logical_and(dx_next_abs < eps_grid, dy_next_abs < eps_grid)
if check_inline:
same_as_prev = np.logical_and(dx_prev_abs < eps_grid, dy_prev_abs < eps_grid)
diff_from_lr = np.logical_not(np.logical_or(same_as_next, same_as_prev))
"""
if x&y coords are accurate, we should have dy2_acc/dx2_acc = dy1_acc/dx1_acc
equivalent to dx1_acc * dy2_acc =dx2_acc * dy1_acc,
because of inaccuracy in float numbers, we have
|dx1 * dy2 - dx2 * dy1| = |(dx1_acc + err1) * (dy2_acc + err2) - (dx2_acc + err3) * (dy1_acc + err4)|
~ |dx1 * err2 + dy2 * err1 - dx2 * err4 - dy1 * err3|
< sum(|dx1|, |dx2|, |dy1|, |dy2|) * |err_max|
# error_abs = np.abs(dx_l * dy_r - dx_r * dy_l)
# in_line = error_abs < eps_grid * (dx_l_abs + dy_l_abs + dx_r_abs + dy_r_abs)
"""
in_line = np.logical_or(np.logical_and(dx_next_abs < eps_grid, dx_prev_abs < eps_grid),
np.logical_and(dy_next_abs < eps_grid, dy_prev_abs < eps_grid))
in_line_and_diff_from_lr = np.logical_and(in_line, diff_from_lr)
else:
# If not checking for inline points, default all values of inline check to false
in_line_and_diff_from_lr = np.full_like(same_as_next, False)
# situation 1: the point is the same with its left neighbor
# situation 2: the point is not the same with its neighbors, but it is in a line with them
delete_array = np.logical_or(same_as_next, in_line_and_diff_from_lr)
# If cleaning a path rather than a polygon, never delete the first or last point
if not cyclic_points:
delete_array[0] = False
delete_array[-1] = False
return delete_array
def coords_cleanup(coords_list: np.ndarray,
eps_grid: float = 1e-4,
cyclic_points: bool = True,
check_inline: bool = True,
) -> np.ndarray:
"""
clean up coordinates in the list that are redundant or harmful for following geometry manipulation functions
Points that are cleaned are:
- Adjacent coincident points
- Collinear points (middle points removed)
Parameters
----------
coords_list : np.ndarray
list of coordinates that enclose a polygon
eps_grid : float
a size smaller than the resolution grid size,
if the difference of x/y coordinates of two points is smaller than it,
these two points should actually share the same x/y coordinate
cyclic_points : bool
True [default] if the coords_list forms a closed polygon. If True, the start/end points might be removed.
False if the coords_list is not a closed polygon (ie, a path). If False, the start and end points will never be
removed.
check_inline : bool
True [default] to check for and remove center points that are in a line with their two adjacent neighbors.
False to skip this check
Returns
----------
coords_set_out : np.ndarray
The cleaned coordinate set
"""
delete_array = cleanup_delete(coords_list, eps_grid=eps_grid,
cyclic_points=cyclic_points, check_inline=check_inline)
not_cleaned = np.sum(delete_array) > 0
# in some cases, some coordinates become on the line if the following coord is deleted,
# need to loop until no coord is deleted during one loop
while not_cleaned:
select_array = np.logical_not(delete_array)
coords_list = coords_list[select_array]
delete_array = cleanup_delete(coords_list, eps_grid=eps_grid,
cyclic_points=cyclic_points, check_inline=check_inline)
not_cleaned = np.sum(delete_array) > 0
return coords_list
def create_polygon_from_path_and_width(points_list: np.ndarray,
width: Union[float, int],
eps: float = 1e-4
) -> np.ndarray:
"""
Given a path (a numpy array of 2-D points) and a width (constant along the path), return the set of points forming
the polygon.
Checks to see if the radius of curvature is smaller than half the width. If so, the polygon will be self
intersecting, so raise an error.
Does not perform any rounding/snapping of points to a grid.
Parameters
----------
points_list : np.ndarray
A numpy array of points (n x 2) representing the center of the path.
width : Union[float, int]
The width of the path
eps : float
The tolerance for determining whether two points are coincident.
Returns
-------
polygon_points : np.ndarray
The polygon formed by the center path and width.
"""
tangent_vec = np.gradient(points_list, axis=0)
tangent_normalized_vec = \
tangent_vec / np.tile(np.linalg.norm(tangent_vec, axis=1, keepdims=True), (1, 2)) * width/2
# Find the points using the perpendicular to tangent line
pts0 = points_list + np.column_stack([-1 * tangent_normalized_vec[:, 1], tangent_normalized_vec[:, 0]])
pts1 = points_list + np.column_stack([tangent_normalized_vec[:, 1], -1 * tangent_normalized_vec[:, 0]])
# Concatenate into a polygon
points_out = np.concatenate((pts0, np.flipud(pts1)), axis=0)
# Clean up the polygon
polygon_points = coords_cleanup(points_out, eps_grid=eps, cyclic_points=True)
return polygon_points
|
181311
|
import logging
from typing import Dict
from threading import Lock
from prometheus_network_exporter.devices.basedevice import Device
__version__ = "1.1.2"
GLOBAL_GUARD: Lock = Lock()
CONNECTION_POOL: Dict[str, Device] = {}
COUNTER_DIR = ".tmp"
MAX_WAIT_SECONDS_BEFORE_SHUTDOWN = 60
MAX_WORKERS = 90
APP_LOGGER = logging.getLogger("network_exporter")
APP_LOGGER.setLevel(logging.INFO)
|
181316
|
import sublime, sublime_plugin
compJS = [
("req.params\treq", "req.params"),
("req.query\treq", "req.query"),
("req.body\treq", "req.body"),
("req.route\treq", "req.route"),
("req.cookies\treq", "req.cookies"),
("req.singnedCookies\treq", "req.singnedCookies"),
("req.accepts\treq", "req.accepts($1)"),
("req.ip\treq", "req.ip"),
("req.path\treq", "req.path"),
("req.host\treq", "req.host"),
("req.xhr\treq", "req.xhr"),
("req.protocol\treq", "req.protocol"),
("req.secure\treq", "req.secure"),
("req.url\treq", "req.url"),
("req.originalUrl\treq", "req.originalUrl"),
("req.acceptedLanguages\treq", "req.acceptedLanguages"),
("formdata\tContent-Type", "application/x-www-form-urlendcoded"),
("json\tContent-Type", "application/json"),
("res.status\tres", "res.status(${1:code})"),
("res.redirect\tres", "res.redirect(${1:url})"),
("res.send\tres", "res.send(${1:body})"),
("res.jsonp\tres", "res.jsonp(${1:json})"),
("res.json\tres", "res.json(${1:json})"),
("res.type\tres", "res.type(${1:type})"),
("res.sendFile\tres", "res.sendFile(${1:path,[option],[callback]})"),
("res.format\tres", "res.format(${1:object})"),
("res.render\tres", "res.render(${1:view,callback})"),
]
compAll = list(compJS) # could use different lists
class AndyJSCompletions(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
global compAll
if not (view.match_selector(locations[0],
'source.js -string -comment -constant') or
view.match_selector(locations[0],
'source.ts -string -comment -constant')):
return []
completions = []
pt = locations[0] - len(prefix) - 1
# get the character before the trigger
ch = view.substr(sublime.Region(pt, pt + 1)) if pt >= 0 else None
if ch == '.': pass
else: pass
word = view.word(pt - 1) if pt >= 0 else None
word = view.substr(word) if word is not None else None
if word is not None and len(word) > 1:
pass # could check for window or document
completions = compAll
compDefault = [view.extract_completions(prefix)]
compDefault = [(item + "\tDefault", item) for sublist in compDefault
for item in sublist if len(item) > 3] # flatten
compDefault = list(set(compDefault)) # make unique
compFull = list(completions)
compFull.extend(compDefault)
compFull.sort()
return (compFull, sublime.INHIBIT_WORD_COMPLETIONS |
sublime.INHIBIT_EXPLICIT_COMPLETIONS)
|
181326
|
import pytest
import regex
import context
from paroxython.flatten_ast import ast, flatten_ast, pseudo_hash
sources = r"""
<<< Examples of _type, _length, _pos and _hash (full output)
42
---
/_type=Module # Any node has a type, stored under _type
/body/_length=1 # Any sequence has a length, stored under _length
/body/1/_type=Expr # The first line of a body is numbered 1
/body/1/_pos=1:1- # _pos consists in the line number, followed by the path from the AST root
/body/1/value/_type=Num
/body/1/value/_hash=0x0001 # Each expression has a unique identifier, stored in _hash
/body/1/value/_pos=1:1-0-
/body/1/value/n=42
/type_ignores/_length=0
>>>
<<< Example of function
def foo():
bar()
buz()
---
/body/1/_type=FunctionDef
/body/1/_pos=1:1-
/body/1/name=foo
/body/1/args/_type=arguments
/body/1/args/args/_length=0
/body/1/args/vararg=None
/body/1/args/kwonlyargs/_length=0
/body/1/args/kw_defaults/_length=0
/body/1/args/kwarg=None
/body/1/args/defaults/_length=0
/body/1/decorator_list/_length=0 # moved before the body of the function
/body/1/returns=None # moved before the body of the function
/body/1/type_comment=None
/body/1/body/_length=2 # start of the body of the function
>>>
<<< Example of conditional else
if c:
pass
else:
pass
---
/body/1/_type=If
/body/1/_pos=1:1-
/body/1/test/_type=Name
/body/1/test/_hash=0x0001
/body/1/test/_pos=1:1-0-
/body/1/test/id=c
/body/1/test/ctx/_type=Load
/body/1/body/_length=1
/body/1/body/1/_type=Pass
/body/1/body/1/_pos=2:1-1-1-
/body/1/orelse/_length=1
/body/1/orelse/1/_type=Pass
/body/1/orelse/1/_pos=4:1-2-1-
>>>
<<< Example of loop else
while c:
pass
else:
pass
---
/body/1/_type=While
/body/1/_pos=1:1-
/body/1/test/_type=Name
/body/1/test/_hash=0x0001
/body/1/test/_pos=1:1-0-
/body/1/test/id=c
/body/1/test/ctx/_type=Load
/body/1/body/_length=1
/body/1/body/1/_type=Pass
/body/1/body/1/_pos=2:1-1-1-
/body/1/loopelse/_length=1 # renamed from "orelse" to "loopelse"
/body/1/loopelse/1/_type=Pass
/body/1/loopelse/1/_pos=4:1-2-1-
>>>
<<< Example of assignement
a = 1
---
/body/1/_type=Assign
/body/1/_pos=1:1-
/body/1/assigntargets/_length=1 # renamed from "targets" to "assigntargets"
/body/1/assigntargets/1/_type=Name
/body/1/assigntargets/1/_hash=0x0001
/body/1/assigntargets/1/_pos=1:1-0-1-
/body/1/assigntargets/1/id=a
/body/1/assigntargets/1/ctx/_type=Store
/body/1/assignvalue/_type=Num # renamed from "value" to "assignvalue"
/body/1/assignvalue/_hash=0x0002
/body/1/assignvalue/_pos=1:1-1-
/body/1/assignvalue/n=1
>>>
<<< Example of augmented assignement
a += 1
---
/body/1/_type=AugAssign
/body/1/_pos=1:1-
/body/1/assigntarget/_type=Name # renamed from "target" to "assigntarget"
/body/1/assigntarget/_hash=0x0001
/body/1/assigntarget/_pos=1:1-0-
/body/1/assigntarget/id=a
/body/1/assigntarget/ctx/_type=Store
/body/1/op/_type=Add
/body/1/assignvalue/_type=Num # renamed from "value" to "assignvalue"
/body/1/assignvalue/_hash=0x0002
/body/1/assignvalue/_pos=1:1-2-
/body/1/assignvalue/n=1
>>>
<<< Example of deletion
del a
---
/body/1/_type=Delete
/body/1/_pos=1:1-
/body/1/targets/_length=1 # note the use of "targets"
/body/1/targets/1/_type=Name
/body/1/targets/1/_hash=0x0001
/body/1/targets/1/_pos=1:1-0-1-
/body/1/targets/1/id=a
/body/1/targets/1/ctx/_type=Del
>>>
<<< Example of comprehension
[x for x in s]
---
/body/1/value/_type=ListComp # note the use of "value"
/body/1/value/_hash=0x0001
/body/1/value/_pos=1:1-0-
/body/1/value/elt/_type=Name
/body/1/value/elt/_hash=0x0002
/body/1/value/elt/_pos=1:1-0-0-
/body/1/value/elt/id=x
/body/1/value/elt/ctx/_type=Load
/body/1/value/generators/_length=1
/body/1/value/generators/1/_type=comprehension
/body/1/value/generators/1/target/_type=Name # note the use of "target"
/body/1/value/generators/1/target/_hash=0x0002
/body/1/value/generators/1/target/_pos=1:1-0-1-1-0-
/body/1/value/generators/1/target/id=x
/body/1/value/generators/1/target/ctx/_type=Store
>>>
<<< Examples of importation
from m import f1
from . import f2
---
/body/1/_type=ImportFrom
/body/1/_pos=1:1-
/body/1/module=m # no quotes
/body/1/names/_length=1
/body/1/names/1/_type=alias
/body/1/names/1/name=f1
/body/1/names/1/asname=None
/body/1/level=0
/body/2/_type=ImportFrom
/body/2/_pos=2:2-
/body/2/module=None # no problem, since "from None import f" would raise a syntax error
/body/2/names/_length=1
/body/2/names/1/_type=alias
/body/2/names/1/name=f2
/body/2/names/1/asname=None
/body/2/level=1
>>>
<<< Examples of strings
"hello world"
""
"\n"
---
/body/1/_type=Expr
/body/1/_pos=1:1-
/body/1/value/_type=Str
/body/1/value/_hash=0x0001
/body/1/value/_pos=1:1-0-
/body/1/value/s=hello world # stripped from its quote delimiters
/body/2/_type=Expr
/body/2/_pos=2:2-
/body/2/value/_type=Str
/body/2/value/_hash=0x0002
/body/2/value/_pos=2:2-0-
/body/2/value/s= # an empty string is followed by an \n
/body/3/_type=Expr
/body/3/_pos=3:3-
/body/3/value/_type=Str
/body/3/value/_hash=0x0003
/body/3/value/_pos=3:3-0-
/body/3/value/s=\n # which is distinct from the string "\n"
>>>
<<< Examples of function call (full output)
print("hello, world")
---
/_type=Module
/body/_length=1
/body/1/_type=Expr
/body/1/_pos=1:1-
/body/1/value/_type=Call
/body/1/value/_hash=0x0001
/body/1/value/_pos=1:1-0-
/body/1/value/func/_type=Name
/body/1/value/func/_hash=0x0002
/body/1/value/func/_pos=1:1-0-0-
/body/1/value/func/id=print
/body/1/value/func/ctx/_type=Load
/body/1/value/args/_length=1
/body/1/value/args/1/_type=Str
/body/1/value/args/1/_hash=0x0003
/body/1/value/args/1/_pos=1:1-0-1-1-
/body/1/value/args/1/s=hello, world
/body/1/value/keywords/_length=0
/type_ignores/_length=0
>>>
<<< Example of negative literal
-42
---
/body/1/_type=Expr
/body/1/_pos=1:1-
/body/1/value/_type=Num
/body/1/value/_hash=0x0001
/body/1/value/_pos=1:1-0-
/body/1/value/n=-42
>>>
"""
source_rex = regex.compile(r"(?ms)^<<< ([^\n]+)\n(.+?)\n---\n(.*?)\n>>>")
examples = [m for m in source_rex.findall(sources)]
@pytest.mark.parametrize("title, source, expected", examples)
def test_flatten_ast(title, source, expected):
expected = regex.sub(r" +# .+", "", expected)
pseudo_hash.reset()
print(title)
print("-" * len(title))
result = flatten_ast(ast.parse(source)).strip()
print(result)
assert expected in result
if __name__ == "__main__":
pytest.main(["-qq", __import__("sys").argv[0]])
|
181346
|
import curses
import os
import re
import time
from collections import namedtuple
from operator import itemgetter
from pg_view import flags
from pg_view.meta import __appname__, __version__, __license__
from pg_view.utils import enum
COLSTATUS = enum(cs_ok=0, cs_warning=1, cs_critical=2)
COLALIGN = enum(ca_none=0, ca_left=1, ca_center=2, ca_right=3)
COLTYPES = enum(ct_string=0, ct_number=1)
COLHEADER = enum(ch_default=0, ch_prepend=1, ch_append=2)
class ColumnType(namedtuple('ColumnType', 'value header header_position')):
__slots__ = ()
@property
def length(self):
return len(self.value) + (0 if not self.header_position else len(self.header) + 1)
class CommonOutput(object):
""" just a normal console output """
def __init__(self):
super(CommonOutput, self)
@staticmethod
def display(data):
print(data)
@staticmethod
def refresh():
os.system('clear')
class CursesOutput(object):
""" Show ncurses output """
CLOCK_FORMAT = '%H:%M:%S'
MIN_ELLIPSIS_FIELD_LENGTH = 10
MIN_TRUNCATE_FIELD_LENGTH = 50 # do not try to truncate fields lower than this size
MIN_TRUNCATED_LEAVE = 10 # do not leave the truncated field if it's less than this size
def __init__(self, screen):
super(CursesOutput, self)
self.screen = screen
self.data = {}
self.output_order = []
self.show_help = False
self.is_color_supported = True
self._init_display()
def _init_display(self):
""" Various ncurses initialization calls """
if hasattr(curses, 'curs_set'):
try:
curses.curs_set(0) # make the cursor invisible
except Exception:
pass
self.screen.nodelay(1) # disable delay when waiting for keyboard input
# initialize colors
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
curses.init_pair(1, -1, -1)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_RED)
curses.init_pair(4, curses.COLOR_WHITE, -1)
curses.init_pair(5, curses.COLOR_GREEN, -1)
curses.init_pair(6, curses.COLOR_BLACK, curses.COLOR_WHITE)
self.COLOR_NORMAL = curses.color_pair(1)
self.COLOR_WARNING = curses.color_pair(2)
self.COLOR_CRITICAL = curses.color_pair(3)
self.COLOR_HIGHLIGHT = curses.color_pair(4)
self.COLOR_INVERSE_HIGHLIGHT = curses.color_pair(5)
self.COLOR_MENU = curses.color_pair(2)
self.COLOR_MENU_SELECTED = curses.color_pair(6)
else:
self.is_color_supported = False
def display(self, data):
""" just collect the data """
collector_name = list(data.keys())[0]
self.data[collector_name] = list(data.values())[0]
self.output_order.append(collector_name)
def toggle_help(self):
self.show_help = self.show_help is False
def refresh(self):
""" actual data output goes here """
self.next_y = 0
# ncurses doesn't erase the old contents when the screen is refreshed,
# hence, we need to do it manually here.
# update screen coordinates
self.screen.erase()
self.update_screen_metrics()
if not self.show_help:
for collector in self.output_order:
if self.next_y < self.screen_y - 2:
self.show_collector_data(collector)
else:
break
else:
self.help()
# show clock if possible
self.show_clock()
self.show_help_bar()
self.screen.refresh()
self.output_order = []
def screen_erase(self):
self.screen.erase()
self.screen.refresh()
def update_screen_metrics(self):
self.screen_y, self.screen_x = self.screen.getmaxyx()
def print_text(self, starty, startx, text, attr=None, trim_middle=False):
""" output string, truncate it if it doesn't fit, return the new X position"""
if attr is None:
attr = self.COLOR_NORMAL
# bail out if we have hit the screen vertical limit
if starty > self.screen_y - 1:
return
remaining_len = min(self.screen_x - (startx + 1), len(text))
if remaining_len > 0:
self.screen.addnstr(starty, startx, text, remaining_len, attr)
return startx + remaining_len
else:
return startx
def show_help_bar_item(self, key, description, selected, x):
x = self.print_text(self.screen_y - 1, x, '{0}:'.format(key),
(self.COLOR_MENU_SELECTED if selected else self.COLOR_MENU) | curses.A_BOLD)
x = self.print_text(self.screen_y - 1, x, '{0} '.format(description),
self.COLOR_MENU_SELECTED if selected else self.COLOR_MENU)
return x
def show_help_bar(self):
# only show help if we have enough screen real estate
if self.next_y > self.screen_y - 1:
pass
menu_items = (
('s', 'system', not flags.filter_aux),
('f', 'freeze', flags.freeze),
('u', 'units', flags.display_units),
('a', 'autohide', flags.autohide_fields),
('t', 'trimming', flags.notrim),
('r', 'realtime', flags.realtime),
('h', 'help', self.show_help),
)
next_x = 0
for item in menu_items:
next_x = self.show_help_bar_item(x=next_x, *item)
self.print_text(self.screen_y - 1, next_x, 'v{0}'.format(__version__).rjust(self.screen_x - next_x - 1),
self.COLOR_MENU | curses.A_BOLD)
def show_clock(self):
clock_str_len = len(self.CLOCK_FORMAT)
clean = True
for pos in range(0, clock_str_len):
x = self.screen.inch(0, self.screen_x - clock_str_len - 1 + pos) & 255
if x != ord(' '):
clean = False
break
if clean:
clock_str = time.strftime(self.CLOCK_FORMAT, time.localtime())
self.screen.addnstr(0, self.screen_x - clock_str_len, clock_str, clock_str_len)
def _status_to_color(self, status, highlight):
if status == COLSTATUS.cs_critical:
return self.COLOR_CRITICAL
if status == COLSTATUS.cs_warning:
return self.COLOR_WARNING
if highlight:
return self.COLOR_HIGHLIGHT | curses.A_BOLD
return self.COLOR_NORMAL
def color_text(self, status_map, highlight, text, header, header_position):
""" for a given header and text - decide on the position and output color """
result = []
xcol = 0
# header_position is either put the header before the value, or after
# if header_position is empty, no header is present
if header_position == COLHEADER.ch_prepend:
xcol = self.color_header(header, xcol, result)
# the text might be empty, if it was truncated by truncate_column_value
if text:
self.color_value(text, xcol, status_map, highlight, result)
elif header_position == COLHEADER.ch_append:
xcol = self.color_value(text, xcol, status_map, highlight, result)
# ditto for the header
if header:
self.color_header(header, xcol, result)
else:
self.color_value(text, 0, status_map, highlight, result)
return result
def color_header(self, header, xcol, result):
""" add a header outout information"""
result.append({
'start': xcol,
'width': len(header),
'word': header,
'color': self.COLOR_NORMAL,
})
return xcol + len(header) + 1
def color_value(self, val, xcol, status_map, highlight, result):
""" add a text optut information """
# status format: field_no -> color
# if the status field contain a single value of -1 - just
# highlight everything without splitting the text into words
# get all words from the text and their relative positions
if len(status_map) == 1 and -1 in status_map:
color = self._status_to_color(status_map[-1], highlight)
result.append({
'start': xcol,
'word': val,
'width': len(val),
'color': color,
})
xcol += len(val) + 1
else:
# XXX: we are calculating the world boundaries again here
# (first one in calculate_output_status) and using a different method to do so.
words = list(re.finditer(r'(\S+)', val))
last_position = xcol
for no, word in enumerate(words):
if no in status_map:
status = status_map[no]
color = self._status_to_color(status, highlight)
elif -1 in status_map:
# -1 is catchall for all fields (i.e for queries)
status = status_map[-1]
color = self._status_to_color(status, highlight)
else:
color = self.COLOR_NORMAL
word_len = word.end(0) - word.start(0)
# convert the relative start to the absolute one
result.append({
'start': xcol + word.start(0),
'word': word.group(0),
'width': word_len,
'color': color,
})
last_position = xcol + word.end(0)
xcol += last_position + 1
return xcol
def help(self):
y = 0
self.print_text(y, 0, '{0} {1} - a monitor for PostgreSQL related system statistics'.format(__appname__,
__version__),
self.COLOR_NORMAL | curses.A_BOLD)
y += 1
self.print_text(y, 0, 'Distributed under the terms of {0} license'.format(__license__))
y += 2
self.print_text(y, 0, 'The following hotkeys are supported:')
y += 1
x = self.print_text(y, 5, 's: ', self.COLOR_NORMAL | curses.A_BOLD)
self.print_text(y, x, 'toggle system processes display')
y += 1
x = self.print_text(y, 5, 'f: ', self.COLOR_NORMAL | curses.A_BOLD)
self.print_text(y, x, 'freeze/unfreeze output')
y += 1
x = self.print_text(y, 5, 'u: ', self.COLOR_NORMAL | curses.A_BOLD)
self.print_text(y, x, 'toggle measurement units display (MB, s)')
y += 1
x = self.print_text(y, 5, 'a: ', self.COLOR_NORMAL | curses.A_BOLD)
self.print_text(y, x, 'toggle auto-hiding of non-essential attributes')
y += 1
x = self.print_text(y, 5, 't: ', self.COLOR_NORMAL | curses.A_BOLD)
self.print_text(y, x, 'toggle trimming of attributes in the middle (user and database names)')
y += 1
x = self.print_text(y, 5, 'r: ', self.COLOR_NORMAL | curses.A_BOLD)
self.print_text(y, x, 'update information in real time (may cause additional load)')
y += 1
x = self.print_text(y, 5, 'q: ', self.COLOR_NORMAL | curses.A_BOLD)
self.print_text(y, x, 'exit program')
y += 2
self.print_text(y, 0, "Press 'h' to exit this screen")
def show_collector_data(self, collector, clock=False):
if collector not in self.data or len(self.data[collector]) <= 0 or \
len(self.data[collector].get('rows', ())) <= 0 and not self.data[collector]['prefix']:
return
rows = self.data[collector]['rows']
statuses = self.data[collector]['statuses']
align = self.data[collector]['align']
header = self.data[collector].get('header', False) or False
prepend_column_headers = self.data[collector].get('prepend_column_headers', False)
highlights = self.data[collector]['highlights']
types = self.data[collector]['types']
start_x = 1
prefix_mod = self.display_prefix(collector, header)
if prefix_mod < 0:
self.next_y += 1
else:
start_x += prefix_mod
# if the block doesn't fit to screen - just return
if self.next_y + header + 1 > self.screen_y - 1 or len(rows) == 0:
return
# calculate X layout
layout = self.calculate_fields_position(collector, start_x)
if header:
self.display_header(layout, align, types)
self.next_y += 1
for i, (row, status) in enumerate(zip(rows, statuses)):
# if no more rows fit the screen - show '...' instead of the last row that fits
if self.next_y > self.screen_y - 3 and i != len(rows) - 1:
for field in layout:
self.print_text(self.screen_y - 2, layout[field]['start'], '.' * layout[field]['width'])
self.next_y += 1
break
self.show_status_of_invisible_fields(layout, status, 0)
for field in layout:
# calculate colors and alignment for the data value
column_alignment = (align.get(field,
COLALIGN.ca_none) if not prepend_column_headers else COLALIGN.ca_left)
w = layout[field]['width']
# now check if we need to add ellipsis to indicate that the value has been truncated.
# we don't do this if the value is less than a certain length or when the column is marked as
# containing truncated values, but the actual value is not truncated.
if layout[field].get('truncate', False):
# XXX: why do we truncate even when truncate for the column is set to False?
header, text = self.truncate_column_value(row[field], w, w > self.MIN_ELLIPSIS_FIELD_LENGTH)
else:
header, text = row[field].header, row[field].value
text = self._align_field(text, header, w, column_alignment, types.get(field, COLTYPES.ct_string))
color_fields = self.color_text(status[field], highlights[field],
text, header, row[field].header_position)
for f in color_fields:
self.screen.addnstr(self.next_y, layout[field]['start'] + f['start'], f['word'], f['width'],
f['color'])
self.next_y += 1
@staticmethod
def truncate_column_value(cv, maxlen, ellipsis=True):
""" make sure that a pair of header and value fits into the allocated field length """
value = cv.value
header = cv.header
header_position = cv.header_position
h_len = len(header)
v_len = len(value)
maxlen = (maxlen - 3) if ellipsis else maxlen
if header_position:
if header_position == COLHEADER.ch_prepend:
if h_len + 1 >= maxlen:
# prepend the header, consider if we have to truncate the header and omit the value altogether
header = header[:maxlen] + (' ' if maxlen == h_len + 1 else '') + ('...' if ellipsis else '')
value = ''
else:
value = value[:maxlen - h_len - 1] + ('...' if ellipsis else '')
elif header_position == COLHEADER.ch_append:
if v_len + 1 >= maxlen:
# prepend the value, consider if we have to truncate it and omit the header altogether
value = value[:maxlen] + (' ' if maxlen == v_len + 1 else '') + ('...' if ellipsis else '')
header = ''
else:
header = header[:maxlen - v_len - 1] + ('...' if ellipsis else '')
else:
# header is set to '' by the collector
value = value[:maxlen] + ('...' if ellipsis else '')
return header, value
def display_prefix(self, collector, header):
prefix = self.data[collector]['prefix']
if prefix:
prefix_len = len(prefix)
prefix_newline = prefix[-1] == '\n'
# truncate the prefix if it doesn't fit the screen
if prefix_len >= self.screen_x and prefix_newline:
prefix = prefix[:max(self.screen_x - 1, 0)]
elif prefix_len >= self.screen_x / 5 and not prefix_newline:
return 0
color = self.COLOR_INVERSE_HIGHLIGHT if prefix_newline else self.COLOR_NORMAL
self.screen.addnstr(self.next_y, 1, str(prefix), len(str(prefix)), color)
if prefix_newline:
return -1
else:
return prefix_len
else:
return 0
def display_header(self, layout, align, types):
for field in layout:
text = self._align_field(field, '', layout[field]['width'], align.get(field, COLALIGN.ca_none),
types.get(field, COLTYPES.ct_string))
self.screen.addnstr(self.next_y, layout[field]['start'], text, layout[field]['width'], self.COLOR_NORMAL |
curses.A_BOLD)
def calculate_fields_position(self, collector, xstart):
width = self.data[collector]['w']
fields = self._get_fields_sorted_by_position(collector)
to_hide = self.data[collector]['hide']
noautohide = self.data[collector]['noautohide']
candrop = [name for name in fields if name not in to_hide and not noautohide.get(name, False)]
return self.layout_x(xstart, width, fields, to_hide, candrop)
def show_status_of_invisible_fields(self, layout, status, xstart):
"""
Show red/blue bar to the left of the screen representing the most critical
status of the fields that are now shown.
"""
status_rest = self._invisible_fields_status(layout, status)
if status_rest != COLSTATUS.cs_ok:
color_rest = self._status_to_color(status_rest, False)
self.screen.addch(self.next_y, 0, ' ', color_rest)
@staticmethod
def _align_field(text, header, width, align, typ):
if align == COLALIGN.ca_none:
if typ == COLTYPES.ct_number:
align = COLALIGN.ca_right
else:
align = COLALIGN.ca_left
textlen = len(text) + len(header) + (1 if header and text else 0)
width_left = width - textlen
if align == COLALIGN.ca_right:
return '{0}{1}'.format(' ' * width_left, text)
if align == COLALIGN.ca_center:
left_space = width_left / 2
right_space = width_left - left_space
return '{0}{1}{2}'.format(' ' * left_space, text, ' ' * right_space)
return str(text)
def _get_fields_sorted_by_position(self, collector):
pos = self.data[collector]['pos']
sorted_by_pos = sorted(((x, pos[x]) for x in pos if pos[x] != -1), key=itemgetter(1))
return [f[0] for f in sorted_by_pos]
@staticmethod
def _invisible_fields_status(layout, statuses):
highest_status = COLSTATUS.cs_ok
invisible = [col for col in statuses if col not in layout]
for col in invisible:
for no in statuses[col]:
if statuses[col][no] > highest_status:
highest_status = statuses[col][no]
if highest_status == COLSTATUS.cs_critical:
return COLSTATUS.cs_critical
return highest_status
def layout_x(self, xstart, colwidth, colnames, colhidden, colcandrop):
""" Figure out width and X start position for each column. Some of the columns
can be hidden, if they are not important (determined at column defintion) and
if we don't have enough space for them.
"""
layout = {}
# get only the columns that are not hidden
col_remaining = [name for name in colnames if name not in colhidden]
# calculate the available screen X dimensions and the width required by all columns
width_available = self.screen_x - (xstart + 1)
# we add width of all N fields + N-1 spaces between fields
width_required = sum(colwidth[name] for name in col_remaining) + len(col_remaining) - 1
if width_available < width_required and colcandrop and len(colcandrop) > 0:
for name in colcandrop:
if name in col_remaining:
# remove a column, re-calculate width
col_remaining.remove(name)
width_required -= colwidth[name] + 1
# drop non-essential columns
if width_required <= width_available:
break
# we dropped what we can, now show the rest. Track the accumulated width to
# figure out which columns won't fit.
x = xstart
total_remaining = len(col_remaining)
for idx, name in enumerate(col_remaining):
w = colwidth[name]
layout[name] = {'start': x, 'width': w}
x += w
if idx != total_remaining - 1:
x += 1
# the last possible X position is screen_x - 1, the position of the last character
# of the current word is layout[name]['start'] + w - 1. The comparison below checks
# that the field width doesn't exceed the screen boundaries.
if layout[name]['start'] + w > self.screen_x:
# if we can't fit even one character - just bail out and don't show the field
if layout[name]['start'] > self.screen_x - 1:
del layout[name]
else:
# truncate it to the length that fits the screen
layout[name]['truncate'] = True
layout[name]['width'] = self.screen_x - layout[name]['start']
# oops, we ran across the screen boundary
# all the columns after this one should be dropped
break
return layout
|
181393
|
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from flask_restful import Resource
from marshmallow import fields
from webargs.flaskparser import use_args
from redisolar.api.base import DaoResource
from redisolar.models import MeterReading
from redisolar.schema import MeterReadingsSchema
MAX_RECENT_FEEDS = 1000
DEFAULT_RECENT_FEEDS = 100
def get_feed_count(count: Optional[int]):
"""Decide a safe number of feeds to return."""
if count is None or count < 0:
return DEFAULT_RECENT_FEEDS
if count > MAX_RECENT_FEEDS:
return MAX_RECENT_FEEDS
return count
class GlobalMeterReadingResource(Resource):
"""A RESTful resource representing meter readings for all sites."""
def __init__(self, meter_reading_dao: Any, feed_dao: Any):
self.meter_reading_dao = meter_reading_dao
self.feed_dao = feed_dao
@use_args(MeterReadingsSchema)
def post(self, meter_readings: Dict[str, List[MeterReading]]) -> Tuple[str, int]:
"""Create a new meter reading."""
for reading in meter_readings['readings']:
self.meter_reading_dao.add(reading)
return "Accepted", 202
@use_args({"count": fields.Int()}, location="query")
def get(self, args: Dict[str, int]) -> Dict[str, Dict]:
"""Get a list of meter readings."""
count = args.get('count')
readings = self.feed_dao.get_recent_global(get_feed_count(count))
return MeterReadingsSchema().dump({"readings": readings})
class SiteMeterReadingResource(DaoResource):
"""A RESTful resource representing meter readings for specific sites."""
@use_args({"count": fields.Int()}, location="query")
def get(self, args, site_id):
"""Get recent meter readings for a specific site."""
count = args.get('count')
readings = self.dao.get_recent_for_site(site_id, get_feed_count(count))
return MeterReadingsSchema().dump({"readings": readings})
|
181398
|
class Solution:
def setZeroes(self, matrix):
rows, cols = set(), set()
for i, r in enumerate(matrix):
for j, c in enumerate(r):
if c == 0:
rows.add(i)
cols.add(j)
l = len(matrix[0])
for r in rows:
matrix[r] = [0] * l
for c in cols:
for r in matrix:
r[c] = 0
|
181433
|
import pytest
from statsmodels.tsa.statespace.sarimax import SARIMAX
from etna.models import SARIMAXModel
from etna.pipeline import Pipeline
def test_sarimax_forecaster_run(example_tsds):
"""
Given: I have dataframe with 2 segments
When:
Then: I get 7 periods per dataset as a forecast
"""
horizon = 7
model = SARIMAXModel()
model.fit(example_tsds)
future_ts = example_tsds.make_future(future_steps=horizon)
res = model.forecast(future_ts)
res = res.to_pandas(flatten=True)
assert not res.isnull().values.any()
assert len(res) == 14
def test_sarimax_save_regressors_on_fit(example_reg_tsds):
model = SARIMAXModel()
model.fit(ts=example_reg_tsds)
for segment_model in model._models.values():
assert sorted(segment_model.regressor_columns) == example_reg_tsds.regressors
def test_sarimax_select_regressors_correctly(example_reg_tsds):
model = SARIMAXModel()
model.fit(ts=example_reg_tsds)
for segment, segment_model in model._models.items():
segment_features = example_reg_tsds[:, segment, :].droplevel("segment", axis=1)
segment_regressors_expected = segment_features[example_reg_tsds.regressors]
segment_regressors = segment_model._select_regressors(df=segment_features.reset_index())
assert (segment_regressors == segment_regressors_expected).all().all()
def test_sarimax_forecaster_run_with_reg(example_reg_tsds):
"""
Given: I have dataframe with 2 segments
When:
Then: I get 7 periods per dataset as a forecast
"""
horizon = 7
model = SARIMAXModel()
model.fit(example_reg_tsds)
future_ts = example_reg_tsds.make_future(future_steps=horizon)
res = model.forecast(future_ts)
res = res.to_pandas(flatten=True)
assert not res.isnull().values.any()
assert len(res) == 14
def test_sarimax_forececaster_run_with_reg_custom_order(example_reg_tsds):
"""
Given: I have dataframe with 2 segments
When: Sarimax have non standard `order` param
Then: I get 7 periods per dataset as a forecast
"""
horizon = 7
model = SARIMAXModel(order=(3, 1, 0))
model.fit(example_reg_tsds)
future_ts = example_reg_tsds.make_future(future_steps=horizon)
res = model.forecast(future_ts)
res = res.to_pandas(flatten=True)
assert not res.isnull().values.any()
assert len(res) == 14
def test_prediction_interval_run_insample(example_tsds):
model = SARIMAXModel()
model.fit(example_tsds)
forecast = model.forecast(example_tsds, prediction_interval=True, quantiles=[0.025, 0.975])
for segment in forecast.segments:
segment_slice = forecast[:, segment, :][segment]
assert {"target_0.025", "target_0.975", "target"}.issubset(segment_slice.columns)
# N.B. inplace forecast will not change target values, because `combine_first` in `SARIMAXModel.forecast` only fill nan values
# assert (segment_slice["target_0.975"] - segment_slice["target"] >= 0).all()
# assert (segment_slice["target"] - segment_slice["target_0.025"] >= 0).all()
assert (segment_slice["target_0.975"] - segment_slice["target_0.025"] >= 0).all()
def test_prediction_interval_run_infuture(example_tsds):
model = SARIMAXModel()
model.fit(example_tsds)
future = example_tsds.make_future(10)
forecast = model.forecast(future, prediction_interval=True, quantiles=[0.025, 0.975])
for segment in forecast.segments:
segment_slice = forecast[:, segment, :][segment]
assert {"target_0.025", "target_0.975", "target"}.issubset(segment_slice.columns)
assert (segment_slice["target_0.975"] - segment_slice["target"] >= 0).all()
assert (segment_slice["target"] - segment_slice["target_0.025"] >= 0).all()
assert (segment_slice["target_0.975"] - segment_slice["target_0.025"] >= 0).all()
def test_forecast_raise_error_if_not_fitted(example_tsds):
"""Test that SARIMAX raise error when calling forecast without being fit."""
model = SARIMAXModel()
with pytest.raises(ValueError, match="model is not fitted!"):
_ = model.forecast(ts=example_tsds)
def test_get_model_before_training():
"""Check that get_model method throws an error if per-segment model is not fitted yet."""
etna_model = SARIMAXModel()
with pytest.raises(ValueError, match="Can not get the dict with base models, the model is not fitted!"):
_ = etna_model.get_model()
def test_get_model_after_training(example_tsds):
"""Check that get_model method returns dict of objects of SARIMAX class."""
pipeline = Pipeline(model=SARIMAXModel())
pipeline.fit(ts=example_tsds)
models_dict = pipeline.model.get_model()
assert isinstance(models_dict, dict)
for segment in example_tsds.segments:
assert isinstance(models_dict[segment], SARIMAX)
def test_sarimax_forecast_1_point(example_tsds):
"""Check that SARIMAX work with 1 point forecast."""
horizon = 1
model = SARIMAXModel()
model.fit(example_tsds)
future_ts = example_tsds.make_future(future_steps=horizon)
pred = model.forecast(future_ts)
assert len(pred.df) == horizon
pred_quantiles = model.forecast(future_ts, prediction_interval=True, quantiles=[0.025, 0.8])
assert len(pred_quantiles.df) == horizon
|
181443
|
from channels import route
from channels_snake_server.consumers import ws_add, ws_disconnect, ws_message
channel_routing = [
route("websocket.connect", ws_add),
route("websocket.receive", ws_message),
route("websocket.disconnect", ws_disconnect),
]
|
181469
|
from django.utils.translation import ugettext_lazy as _
from mayan.apps.navigation.classes import Link
from .icons import (
icon_download_file_delete, icon_download_file_download,
icon_download_file_list
)
link_download_file_delete = Link(
args='resolved_object.pk', icon=icon_download_file_delete,
tags='dangerous', text=_('Delete'), view='storage:download_file_delete'
)
link_download_file_download = Link(
args='resolved_object.pk', icon=icon_download_file_download,
text=_('Download'), view='storage:download_file_download'
)
link_download_file_list = Link(
icon=icon_download_file_list, text=_('Download files'),
view='storage:download_file_list'
)
|
181470
|
from moviepy.editor import VideoFileClip
import os
import base64
import hmac
import hashlib
import time
import requests
# https://docs.acrcloud.com/reference/identification-api
def login(acr_credentials):
access_key = acr_credentials["access_key"]
access_secret = acr_credentials["secret_key"]
requrl = f'https://{acr_credentials["host"]}/v1/identify'
timestamp = time.time()
string_to_sign = f'POST\n/v1/identify\n{access_key}\naudio\n1\n{str(timestamp)}'
sign = base64.b64encode(hmac.new(bytes(access_secret, encoding="utf8"), bytes(string_to_sign, encoding="utf8"),
digestmod=hashlib.sha1).digest())
data = {'access_key': access_key,
'sample_bytes': 0,
'timestamp': str(timestamp),
'signature': sign,
'data_type': "audio",
"signature_version": "1"}
response = requests.post(requrl, files=None, data=data)
if response.status_code != 200:
raise Exception(f'An error occured while authenticating ACRCloud: {response.json()}')
if response.json()["status"]["code"] != 3006:
raise Exception("An error occured while authenticating ACRCloud: invalid credentials",
response.json()["status"]["msg"])
return acr_credentials
def is_copyright(file_path, acr_credentials):
access_key = acr_credentials["access_key"]
access_secret = acr_credentials["secret_key"]
requrl = f'https://{acr_credentials["host"]}/v1/identify'
temp_dir = os.path.dirname(os.path.realpath(file_path))
audio_file_path = os.path.join(temp_dir, 'temp.mp3')
video = VideoFileClip(file_path)
audio = video.audio
audio.write_audiofile(audio_file_path, logger=None)
audio.close()
video.close()
timestamp = time.time()
string_to_sign = f'POST\n/v1/identify\n{access_key}\naudio\n1\n{str(timestamp)}'
sign = base64.b64encode(hmac.new(bytes(access_secret, encoding="utf8"), bytes(string_to_sign, encoding="utf8"),
digestmod=hashlib.sha1).digest())
f = open(audio_file_path, "rb")
sample_bytes = os.path.getsize(audio_file_path)
files = [
('sample', ('temp.mp3', f, 'audio/mpeg'))
]
data = {'access_key': access_key,
'sample_bytes': sample_bytes,
'timestamp': str(timestamp),
'signature': sign,
'data_type': "audio",
"signature_version": "1"}
response = requests.post(requrl, files=files, data=data)
response.encoding = "utf-8"
f.close()
os.remove(audio_file_path)
if response.status_code != 200:
raise Exception(response.json())
error_code = response.json()["status"]["code"]
if error_code != 0 and error_code != 1001:
raise Exception(response.json()["status"]["msg"])
return error_code == 0
|
181487
|
from urllib.request import urlopen
import json
from i3pystatus import IntervalModule
class LastFM(IntervalModule):
"""
Displays currently playing song as reported by last.fm. Get your API key
from http://www.last.fm/api.
"""
settings = (
("apikey", "API key used to make calls to last.fm."),
("user", "Name of last.fm user to track."),
("playing_format", "Output format when a song is playing"),
("stopped_format", "Output format when nothing is playing"),
"playing_color",
"stopped_color",
"interval",
)
required = ("apikey", "user")
playing_color = 'FFFFFF'
stopped_color = '000000'
interval = 5
playing_format = "{artist} - {track}"
stopped_format = ""
def run(self):
apiurl = 'http://ws.audioscrobbler.com/2.0/'
uri = '?method=user.getrecenttracks'\
'&user=%s&api_key=%s' \
'&format=json&'\
'limit=1' % (self.user, self.apikey)
content = urlopen(apiurl + uri).read()
responsestr = content.decode('utf-8')
response = json.loads(responsestr)
try:
track = response['recenttracks']['track'][0]
if track['@attr']['nowplaying'] == 'true':
cdict = {
"artist": track['artist']['#text'],
"track": track['name'],
"album": track['album']['#text'],
}
self.data = cdict
self.output = {
"full_text": self.playing_format.format(**cdict),
"color": self.playing_color
}
except KeyError:
self.output = {
"full_text": self.stopped_format,
"color": self.stopped_color
}
|
181498
|
from dataclasses import dataclass
from data import GetAllTypeGamesRepository
from .usecase import UseCase
@dataclass
class Params:
user_id: str
class GetTypeGamesByUser(UseCase):
def __init__(self, type_games_repository: GetAllTypeGamesRepository):
self.type_games_repository = type_games_repository
def run(self, params):
return self.type_games_repository.get_type_games_by_user(params.user_id)
|
181550
|
import time
import json
import requests
from bs4 import BeautifulSoup
URL = 'https://lenta.ru/rubrics/{}/'
categories = ('russia', 'world', 'ussr', 'economics', 'forces', 'science', 'culture', 'sport', 'media', 'style', 'travel', 'life', 'realty')
for category in categories:
print('-'*100)
print(category)
print('-'*100)
with open('data/lenta/{}.json'.format(category), 'a') as file:
# Получаем html-код
html = requests.get(URL.format(category)).text
# Парсинг
soup = BeautifulSoup(html, 'html.parser') # lxml
section1 = soup.select('div.js-rubric__content')[0]
section2 = section1.find('div', {'class': 'js-content'})
divs = section2.findAll('div', {'class': 'item'})
for div in divs:
try:
a = div.h3.a
except:
continue
url = 'https://lenta.ru' + a.attrs['href']
name = a.text.strip()
row = url + ' ' + name
if len(row) > 125:
row = row[:122] + '...'
print('{:125}'.format(row), end=' ')
# Получаем html-код
html2 = requests.get(url.format(category)).text
# Парсинг
soup2 = BeautifulSoup(html2, 'html.parser')
# title = soup2.h1.text
# subtitle = soup2.h2.text
body = soup2.find('div', {'itemprop': 'articleBody'})
if not body:
body = soup2.find('article', {'class': 'b-topic'})
if not body:
print('❌')
continue
cont = body.text
print('✔', len(cont.split()))
req = {
'url': url,
'name': name,
'cont': cont,
}
data = json.dumps(req, ensure_ascii=False)
print(data, file=file)
time.sleep(1)
|
181557
|
from typing import Tuple
import numpy as np
import torch
import torch.nn as nn
from rlcycle.common.abstract.action_selector import ActionSelector
from rlcycle.common.utils.common_utils import np2tensor
class SACActionSelector(ActionSelector):
"""Action selector for (vanilla) DDPG policy
Attributes:
action_dim (int): size of action space dimension
action_min (np.ndarray): lower bound for continuous actions
action_max (np.ndarray): upper bound for continuous actions
"""
def __init__(self, action_dim: int, action_range: list, use_cuda: bool):
ActionSelector.__init__(self, use_cuda)
self.action_dim = action_dim
self.action_min = np.array(action_range[0])
self.action_max = np.array(action_range[1])
def __call__(
self, policy: nn.Module, state: np.ndarray
) -> Tuple[torch.Tensor, ...]:
"""Generate action via policy"""
if state.ndim == 1:
state = state.reshape(1, -1)
mu, sigma, z, log_pi = policy.sample(np2tensor(state, self.use_cuda))
action = torch.tanh(z)
action_np = action.cpu().detach().view(-1).numpy()
return action_np
def rescale_action(self, action: np.ndarray) -> np.ndarray:
"""Rescale actions to fit continuous action spaces"""
action_rescaled = (
action * (self.action_max - self.action_min) / 2.0
+ (self.action_max + self.action_min) / 2.0
)
return action_rescaled
|
181566
|
from argparse import ArgumentParser
import json
from user_agent import generate_navigator_js
def script_ua():
parser = ArgumentParser()
parser.add_argument('-e', '--extended', action='store_true',
default=False)
parser.add_argument('-o', '--os')
parser.add_argument('-n', '--navigator')
parser.add_argument('-d', '--device-type')
opts = parser.parse_args()
nav = generate_navigator_js(os=opts.os,
navigator=opts.navigator,
device_type=opts.device_type)
if opts.extended:
print(json.dumps(nav, indent=2))
else:
print(nav['userAgent'])
|
181595
|
from rich.columns import Columns
from textual.widget import Widget
from kaskade.renderables.cluster_info import ClusterInfo
from kaskade.renderables.kaskade_name import KaskadeName
from kaskade.renderables.shortcuts_header import ShortcutsHeader
class Header(Widget):
def on_mount(self) -> None:
self.layout_size = 6
def render(self) -> Columns:
cluster_info = ClusterInfo(self.app.cluster)
kaskade_name = KaskadeName()
shortcuts = ShortcutsHeader()
return Columns([kaskade_name, cluster_info, shortcuts], padding=3)
|
181631
|
import os.path
import urllib.parse
import urllib.request
from typing import List
from . import msgparts as mp
from .lib.log import warning
from .paths import TMP_PATH
from .version import SERVER_COMPATIBILITY
# old value used by some features (stats, ...)
METASERVER_URL = "http://jlpo.free.fr/soundrts/metaserver/"
MAIN_METASERVER_URL = open("cfg/metaserver.txt").read().strip()
DEFAULT_SERVERS_PATH = "cfg/default_servers.txt"
RECENT_SERVERS_PATH = os.path.join(TMP_PATH, "recent_servers.txt")
def _add_time_and_version(line):
words = line.split()
words = ["0"] + words[:1] + [SERVER_COMPATIBILITY] + words[1:]
return " ".join(words)
def _default_servers():
lines = open(DEFAULT_SERVERS_PATH).readlines()
return [
_add_time_and_version(line)
for line in lines
if line.strip() and not line.startswith(";")
]
def servers_list(voice) -> List[str]:
# The header is an arbitrary string that the metaserver will include
# in the reply to make sure that the PHP script is executed.
header = "SERVERS"
query = "header=%s&include_ports=1" % header
servers_url = MAIN_METASERVER_URL + "servers.php?" + query
try:
f = urllib.request.urlopen(servers_url)
if f.read(len(header)).decode() == header:
s = f.read().decode()
servers = s.split("\n")
try:
with open(RECENT_SERVERS_PATH, "w") as t:
t.write(s)
except OSError:
warning("couldn't save the list of servers")
else:
raise OSError(f"wrong header")
except OSError as e:
voice.alert(mp.BEEP + [str(e)]) # type: ignore
warning(str(e))
warning("couldn't get the servers list from the metaserver")
try:
with open(RECENT_SERVERS_PATH) as t:
servers = t.read().split("\n")
warning(f"using {RECENT_SERVERS_PATH} instead")
except OSError:
warning(f"couldn't read {RECENT_SERVERS_PATH}")
warning(f"using {DEFAULT_SERVERS_PATH} instead")
servers = _default_servers()
return servers
|
181713
|
with (a, c,):
pass
with (a as b, c):
pass
async with (a, c,):
pass
async with (a as b, c):
pass
|
181751
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
import cv2
from utils.misc import get_center
Rectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height'])
def get_gauss_filter_weight(width, height, mu_x, mu_y, sigma=7):
xy = np.indices((height,width))
x = xy[1,:,:]
y = xy[0,:,:]
psf = np.exp(-(((x-mu_x)**2+(y-mu_y)**2)/(2*sigma**2))) # not multiple by 2
return psf
def get_template_correlation_response(im_size=225, out_size=None):
# out_size = [width, height]
# output = [H,W]
gauss_response = get_gauss_filter_weight(im_size, im_size, im_size//2, im_size//2)
if out_size is not None:
gauss_response = cv2.resize(gauss_response, tuple(out_size))
return gauss_response
def batch_fft2d(inputs, transpose=True):
# inputs: [B,H,W,C]
if inputs.dtype != tf.complex64:
inputs = tf.cast(inputs, tf.complex64)
if transpose:
inputs = tf.transpose(inputs, [0,3,1,2])
outputs = tf.fft2d(inputs) # [B,C,H,W]
if transpose:
outputs = tf.transpose(outputs, [0,2,3,1]) # [B,H,W,C]
return outputs
def batch_ifft2d(inputs, transpose=True):
# inputs: [B,H,W,C]
if transpose:
inputs = tf.transpose(inputs, [0,3,1,2])
outputs = tf.ifft2d(inputs)
if transpose:
outputs = tf.transpose(outputs, [0,2,3,1]) # [B,H,W,C]
return outputs
def get_cx(rect):
return (rect[0]+rect[2])*0.5
def get_cy(rect):
return (rect[1]+rect[3])*0.5
def get_width(rect):
return (rect[2]-rect[0])
def get_height(rect):
return (rect[3]-rect[1])
def get_area(rect):
return (rect[2]-rect[0]) * (rect[3]-rect[1])
def get_intersection(rect1, rect2):
x1 = max(rect1[0], rect2[0])
y1 = max(rect1[1], rect2[1])
x2 = min(rect1[2], rect2[2])
y2 = min(rect1[3], rect2[3])
return np.array([x1,y1,x2,y2], dtype=rect1.dtype)
def get_IoU(rect1, rect2):
inter = get_intersection(rect1, rect2)
area1 = get_area(rect1)
area2 = get_area(rect2)
area_I = get_area(inter)
IoU = float(area_I) / float(area1 + area2 - area_I)
return IoU
def im2rgb(im):
if len(im.shape) != 3:
im = np.stack([im, im, im], -1)
return im
def convert_bbox_format(bbox, to):
x, y, target_width, target_height = bbox.x, bbox.y, bbox.width, bbox.height
if to == 'top-left-based':
x -= get_center(target_width)
y -= get_center(target_height)
elif to == 'center-based':
y += get_center(target_height)
x += get_center(target_width)
else:
raise ValueError("Bbox format: {} was not recognized".format(to))
return Rectangle(x, y, target_width, target_height)
def get_exemplar_images(images, exemplar_size, targets_pos=None):
"""Crop exemplar image from input images"""
with tf.name_scope('get_exemplar_image'):
batch_size, x_height, x_width = images.get_shape().as_list()[:3]
z_height, z_width = exemplar_size
if targets_pos is None:
# crop from the center
target_pos_single = [[get_center(x_height), get_center(x_width)]]
targets_pos_ = tf.tile(target_pos_single, [batch_size, 1])
else:
targets_pos_ = targets_pos
# convert to top-left corner based coordinates
top = tf.to_int32(tf.round(targets_pos_[:, 0] - get_center(z_height)))
bottom = tf.to_int32(top + z_height)
left = tf.to_int32(tf.round(targets_pos_[:, 1] - get_center(z_width)))
right = tf.to_int32(left + z_width)
def _slice(x):
f, t, l, b, r = x
c = f[t:b, l:r]
return c
exemplar_img = tf.map_fn(_slice, (images, top, left, bottom, right), dtype=images.dtype)
exemplar_img.set_shape([batch_size, z_height, z_width, 3])
return exemplar_img
def get_crops(im, bbox, size_z, size_x, context_amount):
"""Obtain image sub-window, padding with avg channel if area goes outside of border
Adapted from https://github.com/bertinetto/siamese-fc/blob/master/ILSVRC15-curation/save_crops.m#L46
Args:
im: Image ndarray
bbox: Named tuple (x, y, width, height) x, y corresponds to the crops center
size_z: Target + context size
size_x: The resultant crop size
context_amount: The amount of context
Returns:
image crop: Image ndarray
"""
cy, cx, h, w = bbox.y, bbox.x, bbox.height, bbox.width
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = size_z / s_z
d_search = (size_x - size_z) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
scale_x = size_x / s_x
image_crop_x, _, _, _, _ = get_subwindow_avg(im, [cy, cx],
[size_x, size_x],
[np.round(s_x), np.round(s_x)])
return image_crop_x, scale_x
def get_subwindow_avg(im, pos, model_sz, original_sz):
# avg_chans = np.mean(im, axis=(0, 1)) # This version is 3x slower
avg_chans = [np.mean(im[:, :, 0]), np.mean(im[:, :, 1]), np.mean(im[:, :, 2])]
if not original_sz:
original_sz = model_sz
sz = original_sz
im_sz = im.shape
# make sure the size is not too small
assert im_sz[0] > 2 and im_sz[1] > 2
c = [get_center(s) for s in sz]
# check out-of-bounds coordinates, and set them to avg_chans
context_xmin = np.int(np.round(pos[1] - c[1]))
context_xmax = np.int(context_xmin + sz[1] - 1)
context_ymin = np.int(np.round(pos[0] - c[0]))
context_ymax = np.int(context_ymin + sz[0] - 1)
left_pad = np.int(np.maximum(0, -context_xmin))
top_pad = np.int(np.maximum(0, -context_ymin))
right_pad = np.int(np.maximum(0, context_xmax - im_sz[1] + 1))
bottom_pad = np.int(np.maximum(0, context_ymax - im_sz[0] + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
if top_pad > 0 or bottom_pad > 0 or left_pad > 0 or right_pad > 0:
R = np.pad(im[:, :, 0], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[0]))
G = np.pad(im[:, :, 1], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[1]))
B = np.pad(im[:, :, 2], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[2]))
im = np.stack((R, G, B), axis=2)
im_patch_original = im[context_ymin:context_ymax + 1,
context_xmin:context_xmax + 1, :]
if not (model_sz[0] == original_sz[0] and model_sz[1] == original_sz[1]):
im_patch = cv2.resize(im_patch_original, tuple(model_sz))
else:
im_patch = im_patch_original
return im_patch, left_pad, top_pad, right_pad, bottom_pad
def normalize_01(inputs):
# inputs: [B,H,W,C], tf.float32
mins = tf.reduce_min(inputs, axis=[1,2,3], keep_dims=True)
maxs = tf.reduce_max(inputs, axis=[1,2,3], keep_dims=True)
outputs = (inputs - mins) / (maxs-mins+1e-6)
return outputs
def spatial_softmax(logits):
shape = tf.shape(logits)
flatten = tf.layers.flatten(logits)
softmax = tf.nn.softmax(flatten)
softmax = tf.reshape(softmax, shape)
return softmax
def detect_hard_peak_position(inputs):
# inputs: [B,H,W,1] filter responses
# This function is non-differentiable
# Return: peak positions ([B,2] x,y coordinates, tf.int32)
batch_size, height, width, channels = tf.unstack(tf.shape(inputs))
inputs_flat = tf.layers.flatten(inputs) # [B, H*W]
argmax_inds = tf.argmax(inputs_flat, axis=1, output_type=tf.int32)
argmax_x = tf.cast(tf.mod(argmax_inds, width), tf.int32)
argmax_y = tf.cast(tf.divide(argmax_inds, width), tf.int32)
peak_pos = tf.concat([argmax_x[:,None], argmax_y[:,None]], axis=1) # [B,2]
return peak_pos
|
181781
|
import pickle
import torch
import trimesh
from .util import set_module, create_quads
@set_module('deep_surfel')
def export_mesh(file, deep_surfel_scene, only_filled=False, features_as_colors=False, surfel_transformation=None):
inside_inds = ~torch.isinf(deep_surfel_scene.locations).any(-1)
if only_filled:
inside_inds = inside_inds & (deep_surfel_scene.counts > 0)
surfel_loc = deep_surfel_scene.locations[inside_inds]
if features_as_colors:
s_colors = deep_surfel_scene.features[inside_inds][..., :3]
if surfel_transformation is not None:
s_colors = surfel_transformation(s_colors)
else:
s_colors = torch.ones_like(surfel_loc) * 127
surfel_orientations = deep_surfel_scene.orientations[inside_inds]
s_vertices, s_faces = create_quads(surfel_loc, surfel_orientations, deep_surfel_scene.surfel_size)
mesh = trimesh.Trimesh(
vertices=s_vertices.cpu().numpy(),
faces=s_faces.cpu().numpy(),
vertex_normals=s_vertices.repeat_interleave(4, dim=0).cpu().numpy(),
vertex_colors=s_colors.repeat_interleave(4, dim=0).cpu().numpy()
)
mesh.export(file)
@set_module('deep_surfel')
def save(file, scene):
if not file.endswith('.dsurf'):
file = f'{file}.dsurf'
with open(file, 'wb') as f:
pickle.dump(scene, f, protocol=pickle.HIGHEST_PROTOCOL)
@set_module('deep_surfel')
def load(file):
if not file.endswith('.dsurf'):
file = f'{file}.dsurf'
with open(file, 'rb') as f:
scene = pickle.load(f)
return scene
@set_module('deep_surfel')
def save_sdf(dst_file, sdf, scale, translation):
if not dst_file.endswith('.sdf'):
dst_file = f'{dst_file}.sdf'
with open(dst_file, 'wb') as f:
pickle.dump((sdf, scale, translation), f, protocol=pickle.HIGHEST_PROTOCOL)
@set_module('deep_surfel')
def load_sdf(src_file):
if not src_file.endswith('.sdf'):
src_file = f'{src_file}.sdf'
with open(src_file, 'rb') as f:
sdf, scale, translation = pickle.load(f)
return sdf, scale, translation
|
181823
|
def KadaneAlgo(alist, start, end):
#Returns (l, r, m) such that alist[l:r] is the maximum subarray in
#A[start:end] with sum m. Here A[start:end] means all A[x] for start <= x <
#end.
max_ending_at_i = max_seen_so_far = alist[start]
max_left_at_i = max_left_so_far = start
# max_right_at_i is always i + 1
max_right_so_far = start + 1
for i in range(start + 1, end):
if max_ending_at_i > 0:
max_ending_at_i += alist[i]
else:
max_ending_at_i = alist[i]
max_left_at_i = i
if max_ending_at_i > max_seen_so_far:
max_seen_so_far = max_ending_at_i
max_left_so_far = max_left_at_i
max_right_so_far = i + 1
return max_left_so_far, max_right_so_far, max_seen_so_far
alist = input('Enter the elements: ')
alist = alist.split()
alist = [int(x) for x in alist]
start, end, maximum = KadaneAlgo(alist, 0, len(alist))
print('The maximum subarray starts at index {}, ends at index {}'
' and has sum {}.'.format(start, end - 1, maximum))
|
181840
|
import mimetypes
from typing import List
from requests import RequestException
from dothttp import DotHttpException, Config, HttpDef, Allhttp, BaseModelProcessor, UndefinedHttpToExtend
from dothttp.request_base import CurlCompiler, RequestCompiler, HttpFileFormatter, dothttp_model
from . import logger
from ..models import Command, Result, BaseHandler
class ContextConfig(Config):
contexts: List[str] = None
class RunHttpFileHandler(BaseHandler):
name = "/file/execute"
def get_method(self):
return RunHttpFileHandler.name
def run(self, command: Command) -> Result:
config = self.get_config(command)
try:
if config.curl:
req = self.get_curl_comp(config)
result = req.get_curl_output()
result = Result(id=command.id, result={
"body": result,
"headers": {
"Content-Type": mimetypes.types_map['.sh'],
}
})
else:
comp = self.get_request_comp(config)
result = self.get_request_result(command, comp)
except DotHttpException as exc:
logger.error(f'dothttp exception happened {exc}', exc_info=True)
result = Result(id=command.id,
result={
"error_message": exc.message, "error": True})
except RequestException as exc:
logger.error(f'exception from requests {exc}', exc_info=True)
result = Result(id=command.id,
result={
"error_message": str(exc), "error": True})
except Exception as exc:
logger.error(f'unknown error happened {exc}', exc_info=True)
result = Result(id=command.id,
result={
"error_message": str(exc), "error": True})
return result
def get_curl_comp(self, config):
return CurlCompiler(config)
def get_config(self, command):
params = command.params
filename = params.get("file")
envs = params.get("env", [])
target = params.get("target", '1')
nocookie = params.get("nocookie", False)
curl = params.get("curl", False)
properties = [f"{i}={j}" for i, j in params.get('properties', {}).items()]
content = params.get("content", None)
contexts = params.get("contexts")
if contexts is None:
contexts = []
if content:
try:
content = "\n".join(content.splitlines())
except:
content = None
config = ContextConfig(file=filename, env=envs, properties=properties, curl=curl, property_file=None,
debug=True,
no_cookie=nocookie, format=False, info=False, target=target, content=content)
config.contexts = contexts
return config
def get_request_result(self, command, comp: RequestCompiler):
resp = comp.get_response()
if output := comp.httpdef.output:
# body = f"Output stored in {output}"
try:
comp.write_to_output(resp)
except Exception as e:
output = f"Not!. unhandled error happened : {e}"
logger.warning("unable to write because", exc_info=True)
script_result = comp.execute_script(resp).as_json()
body = resp.text
response_data = {
"response": {
"headers":
{key: value for key, value in resp.headers.items()},
"body": body, # for binary out, it will fail, check for alternatives
"status": resp.status_code,
"method": resp.request.method,
"output_file": output or "",
"url": resp.url},
"script_result": script_result,
}
# will be used for response
data = {}
data.update(response_data['response']) # deprecated
data.update(response_data)
try:
data.update({"http": self.get_http_from_req(comp.httpdef)})
except Exception as e:
logger.error("ran into error regenerating http def from parsed object")
data.update({"http": f"ran into error \n Exception: `{e}` message:{e.args}"})
result = Result(id=command.id,
result=data)
return result
def get_request_comp(self, config):
return RequestCompiler(config)
@staticmethod
def get_http_from_req(request: HttpDef):
http_def = Allhttp([request.get_http_from_req()])
return HttpFileFormatter.format(http_def)
CONTEXT_SEP = """
# include contexts from context, to resolve properties
"""
class ContentBase(BaseModelProcessor):
def __init__(self, config: ContextConfig):
super().__init__(config)
self.args = config
def load_content(self):
# joining contexts to content is not correct approach
# as any error in one of context could bring down main usecase
self.original_content = self.content = self.args.content
def load_model(self):
# reqcomp will try to resolve properties right after model is generated
super(ContentBase, self).load_model()
##
## context has varibles defined
## for resolving purpose, including them into content
self.content = self.content + CONTEXT_SEP + CONTEXT_SEP.join(
self.args.contexts)
def select_target(self):
try:
# first try to resolve target from current context
super().select_target()
except UndefinedHttpToExtend as ex:
# if it weren't able to figure out context, try to resolve from contexts
for context in self.args.contexts:
try:
# if model is generated, try to figure out target
model: Allhttp = dothttp_model.model_from_str(context)
# by including targets in to model
self.model.allhttps = self.model.allhttps + model.allhttps
self.content += context + "\n\n" + context
return super(ContentBase, self).select_target()
except Exception as e:
# contexts, can not always be correct syntax
# in such scenarios, don't complain, try to resolve with next contexts
logger.info("ignoring exception, context is not looking good")
raise ex
class ContentRequestCompiler(ContentBase, RequestCompiler):
pass
class ContentCurlCompiler(ContentBase, CurlCompiler):
pass
class ContentExecuteHandler(RunHttpFileHandler):
name = "/content/execute"
def get_config(self, command):
config = super().get_config(command)
# config.file = command.params.get('content')
return config
def get_method(self):
return ContentExecuteHandler.name
def get_request_comp(self, config):
return ContentRequestCompiler(config)
def get_curl_comp(self, config):
return ContentCurlCompiler(config)
class FormatHttpFileHandler(BaseHandler):
method = "/file/format"
def get_method(self):
return FormatHttpFileHandler.method
def run(self, command: Command) -> Result:
result = Result(id=command.id, result=command.params)
return result
class GetNameReferencesHandler(BaseHandler):
name = "/file/names"
def get_method(self):
return GetNameReferencesHandler.name
def run(self, command: Command) -> Result:
filename = command.params.get("file")
try:
result = self.execute(command, filename)
except DotHttpException as ex:
result = Result(id=command.id,
result={
"error_message": ex.message, "error": True})
except Exception as e:
result = Result(id=command.id,
result={
"error_message": str(e), "error": True})
return result
def execute(self, command: Command, filename):
with open(filename) as f:
http_data = f.read()
all_names, all_urls = self.parse_n_get(http_data)
result = Result(id=command.id, result={"names": all_names, "urls": all_urls})
return result
def parse_n_get(self, http_data):
model = dothttp_model.model_from_str(http_data)
all_names = []
all_urls = []
for index, http in enumerate(model.allhttps):
if http.namewrap:
name = http.namewrap.name if http.namewrap else str(index)
start = http.namewrap._tx_position
end = http._tx_position_end
else:
start = http.urlwrap._tx_position
end = http._tx_position_end
name = str(index + 1)
name = {
'name': name,
'method': http.urlwrap.method,
'start': start,
'end': end
}
url = {
'url': http.urlwrap.url,
'method': http.urlwrap.method or 'GET',
'start': http.urlwrap._tx_position,
'end': http.urlwrap._tx_position_end,
}
all_names.append(name)
all_urls.append(url)
return all_names, all_urls
class ContentNameReferencesHandler(GetNameReferencesHandler):
name = "/content/names"
def get_method(self):
return ContentNameReferencesHandler.name
def execute(self, command, filename):
http_data = command.params.get("content", "")
all_names, all_urls = self.parse_n_get(http_data)
result = Result(id=command.id, result={"names": all_names, "urls": all_urls})
return result
|
181860
|
from jsonmodels import models, fields, errors, validators
class Email(models.Base):
"""EmailModel - model class for email formats"""
Message_ID = fields.StringField(required=True)
Email_Folder = fields.StringField()
Original_Filename = fields.StringField()
Date = fields.StringField()
From = fields.StringField()
To = fields.StringField()
Cc = fields.StringField()
Bcc = fields.StringField()
Subject = fields.StringField()
Content_Type = fields.StringField()
|
181865
|
class Solution:
def findMedianSortedArrays(self, nums1, nums2) -> float:
x = len(nums1)
y = len(nums2)
if y < x:
# Making sure nums1 is the smaller length array
return self.findMedianSortedArrays(nums2, nums1)
maxV = float('inf')
minV = float('-inf')
start, end, median = 0, x, 0
# We know
# partitionx + partitiony = (x+y+1)//2
while start <= end:
# px -> partitionx and py -> partitiony
px = start + (end - start) // 2
py = (x + y + 1) // 2 - px
# leftx, rightx -> edge elements on nums1
# lefty, righty -> edge elements on nums2
leftx, rightx, lefty, righty = 0, 0, 0, 0
leftx = minV if px == 0 else nums1[px - 1]
rightx = maxV if px == x else nums1[px]
lefty = minV if py == 0 else nums2[py - 1]
righty = maxV if py == y else nums2[py]
if leftx <= righty and lefty <= rightx:
# We found the spot for median
if (x + y) % 2 == 0:
median = (max(leftx, lefty) + min(rightx, righty)) / 2
return median
else:
median = max(leftx, lefty)
return median
elif leftx > righty:
# We are too much in the right, move towards left
end = px - 1
else:
# We are too much in the left, move towards right
start = px + 1
return -1
|
181871
|
from django.utils.translation import ugettext_lazy as _
from mayan.apps.permissions import PermissionNamespace
namespace = PermissionNamespace(label=_('Common'), name='common')
permission_object_copy = namespace.add_permission(
label=_('Copy object'), name='object_copy'
)
|
181886
|
import asyncio
import random
import time
from fennel import App
app = App(name="example", results_ttl=120, interface="sync")
@app.task
def square(n: int) -> int:
return n**2
@app.task
def sync_sleep(seconds: int) -> None:
time.sleep(seconds)
@app.task
async def async_sleep(seconds: int) -> None:
await asyncio.sleep(seconds)
@app.task
def sometimes_fail(*args, **kwargs) -> None:
if random.random() < 0.1:
raise ValueError("sometimes_fail")
@app.task(retries=1)
def always_fail(*args, **kwargs) -> None:
raise ValueError("always_fail")
|
181936
|
from typing import List
from pydantic import parse_obj_as
from sqlalchemy import select
from app.domain.access_levels import dto
from app.domain.access_levels.interfaces.persistence import IAccessLevelReader
from app.domain.access_levels.models.access_level import AccessLevel
from app.domain.user.exceptions.user import UserNotExists
from app.domain.user.models.user import TelegramUser
from app.infrastructure.database.exception_mapper import exception_mapper
from app.infrastructure.database.repositories.repo import SQLAlchemyRepo
class AccessLevelReader(SQLAlchemyRepo, IAccessLevelReader):
@exception_mapper
async def all_access_levels(self) -> List[dto.AccessLevel]:
query = select(AccessLevel)
result = await self.session.execute(query)
access_levels = result.scalars().all()
return parse_obj_as(List[dto.AccessLevel], access_levels)
@exception_mapper
async def user_access_levels(self, user_id: int) -> List[dto.AccessLevel]:
user = await self.session.get(TelegramUser, user_id)
if not user:
raise UserNotExists
return parse_obj_as(List[dto.AccessLevel], user.access_levels)
|
181990
|
from WatchDbg.common import EventHandler
NEXT_ID = 0
def get_next_id():
global NEXT_ID
ret = NEXT_ID
NEXT_ID += 1
return ret
class WatchListItem():
nextid = 0
def __init__(self, address, wtype):
self.address = address
self.type = wtype
self.name = ""
self._id = get_next_id()
@property
def id(self):
return self._id
class WatchList:
def __init__(self):
self._watches = []
self.on_change = EventHandler()
def __iter__(self):
return iter(self._watches)
def __len__(self):
return len(self._watches)
def add(self, address, name, type):
item = WatchListItem(address, type)
item.name = name
self._watches.append(item)
self.on_change.notify()
return item.id
def clear(self):
self._watches = []
self.on_change.notify()
def exists(self, address):
i = self._indexByAddress(address)
return i != None
def delete(self, address):
i = self._indexByAddress(address)
if i != None:
item = self._watches.pop(i)
self.on_change.notify()
return item
return None
def get(self, address):
idx = self._indexByAddress(address)
if idx != None:
return self._watches[idx]
return None
def _indexByAddress(self, address):
for idx, watch in enumerate(self._watches):
if watch.address == address:
return idx
return None
|
182009
|
a = [int(x) for x in input().split()]
a.sort() #this command sorts the list in ascending order
if a[-2]==a[-1]:
print(a[-3]+a[1])
else:
print(a[-2] + a[1])
|
182014
|
from collections import defaultdict, Counter
import numpy as np
import tensorflow as tf
from capreolus import ConfigOption, Dependency, constants, get_logger
from capreolus.utils.common import padlist
from capreolus.utils.exceptions import MissingDocError
from . import Extractor
from .common import load_pretrained_embeddings
logger = get_logger(__name__)
@Extractor.register
class EmbedText(Extractor):
module_name = "embedtext"
requires_random_seed = True
dependencies = [
Dependency(key="benchmark", module="benchmark", name=None),
Dependency(
key="index", module="index", name="anserini", default_config_overrides={"indexstops": True, "stemmer": "none"}
),
Dependency(key="tokenizer", module="tokenizer", name="anserini"),
]
config_spec = [
ConfigOption("embeddings", "glove6b", "embeddings to use: fasttext, glove6b, glove6b.50d, or w2vnews"),
ConfigOption("calcidf", True),
ConfigOption("maxqlen", 4, "maximum query length (shorter will be truncated)"),
ConfigOption("maxdoclen", 800, "maximum doc length (shorter will be truncated)"),
]
pad_tok = "<pad>"
def build(self):
self._embedding_cache = constants["CACHE_BASE_PATH"] / "embeddings"
self._numpy_cache = self._embedding_cache / (self.config["embeddings"] + ".npy")
self._vocab_cache = self._embedding_cache / (self.config["embeddings"] + ".vocab.txt")
self.embeddings, self.stoi, self.itos = None, None, None
self._next_oov_index = -1
def _load_pretrained_embeddings(self):
if self.embeddings is not None:
return
self.embeddings, self.itos, self.stoi = load_pretrained_embeddings(self.config["embeddings"])
def get_tf_feature_description(self):
feature_description = {
"query": tf.io.FixedLenFeature([self.config["maxqlen"]], tf.int64),
"query_idf": tf.io.FixedLenFeature([self.config["maxqlen"]], tf.float32),
"posdoc": tf.io.FixedLenFeature([self.config["maxdoclen"]], tf.int64),
"negdoc": tf.io.FixedLenFeature([self.config["maxdoclen"]], tf.int64),
"label": tf.io.FixedLenFeature([2], tf.float32, default_value=tf.convert_to_tensor([1, 0], dtype=tf.float32)),
}
return feature_description
def create_tf_feature(self, sample):
"""
sample - output from self.id2vec()
return - a tensorflow feature
"""
query, query_idf, posdoc, negdoc = (sample["query"], sample["query_idf"], sample["posdoc"], sample["negdoc"])
feature = {
"query": tf.train.Feature(int64_list=tf.train.Int64List(value=query)),
"query_idf": tf.train.Feature(float_list=tf.train.FloatList(value=query_idf)),
"posdoc": tf.train.Feature(int64_list=tf.train.Int64List(value=posdoc)),
"negdoc": tf.train.Feature(int64_list=tf.train.Int64List(value=negdoc)),
}
return feature
def parse_tf_example(self, example_proto):
feature_description = self.get_tf_feature_description()
parsed_example = tf.io.parse_example(example_proto, feature_description)
posdoc = parsed_example["posdoc"]
negdoc = parsed_example["negdoc"]
query = parsed_example["query"]
query_idf = parsed_example["query_idf"]
label = parsed_example["label"]
return (posdoc, negdoc, query, query_idf), label
def _get_idf(self, toks):
return [self.idf.get(tok, 0) for tok in toks]
def preprocess(self, qids, docids, topics):
self._load_pretrained_embeddings()
self.index.create_index()
self.qid2toks = {}
self.docid2toks = {}
self.idf = defaultdict(lambda: 0)
for qid in qids:
if qid not in self.qid2toks:
self.qid2toks[qid] = self.tokenizer.tokenize(topics[qid])
self._add_oov_to_vocab(self.qid2toks[qid])
query_lengths = Counter(len(toks) for toks in self.qid2toks.values())
if any(qlen > self.config["maxqlen"] for qlen in query_lengths):
logger.warning(
"Some queries are longer than maxqlen; longest: %s; counter: %s",
max(query_lengths),
sorted(query_lengths.items()),
)
def get_doc_tokens(self, docid):
if docid not in self.docid2toks:
self.docid2toks[docid] = self.tokenizer.tokenize(self.index.get_doc(docid))
self._add_oov_to_vocab(self.docid2toks[docid])
return self.docid2toks[docid]
def _add_oov_to_vocab(self, tokens):
for tok in tokens:
if tok not in self.stoi:
self.stoi[tok] = self._next_oov_index
self.itos[self._next_oov_index] = tok
self._next_oov_index -= 1
def _tok2vec(self, toks):
return [self.stoi[tok] for tok in toks]
def id2vec(self, qid, posid, negid=None, **kwargs):
query = self.qid2toks[qid]
# TODO find a way to calculate qlen/doclen stats earlier, so we can log them and check sanity of our values
qlen, doclen = self.config["maxqlen"], self.config["maxdoclen"]
posdoc = self.get_doc_tokens(posid)
if not posdoc:
raise MissingDocError(qid, posid)
idfs = padlist(self._get_idf(query), qlen, 0)
query = self._tok2vec(padlist(query, qlen, self.pad_tok))
posdoc = self._tok2vec(padlist(posdoc, doclen, self.pad_tok))
# TODO determine whether pin_memory is happening. may not be because we don't place the strings in a np or torch object
data = {
"qid": qid,
"posdocid": posid,
"idfs": np.array(idfs, dtype=np.float32),
"query": np.array(query, dtype=np.long),
"posdoc": np.array(posdoc, dtype=np.long),
"query_idf": np.array(idfs, dtype=np.float32),
"negdocid": "",
"negdoc": np.zeros(self.config["maxdoclen"], dtype=np.long),
}
if negid:
negdoc = self.get_doc_tokens(negid)
if not negdoc:
raise MissingDocError(qid, negid)
negdoc = self._tok2vec(padlist(negdoc, doclen, self.pad_tok))
data["negdocid"] = negid
data["negdoc"] = np.array(negdoc, dtype=np.long)
return data
|
182027
|
import contextlib
import os
import site
import sys
from pathlib import Path
import pytest
from editables import EditableException, EditableProject
def build_project(target, structure):
target.mkdir(exist_ok=True, parents=True)
for name, content in structure.items():
path = target / name
if isinstance(content, str):
path.write_text(content, encoding="utf-8")
else:
build_project(path, content)
# to test in-process:
# Put stuff in somedir
# sys.path.append("somedir")
# site.addsitedir("somedir")
# Check stuff is visible
@contextlib.contextmanager
def import_state(extra_site=None):
extra_site = os.fspath(extra_site)
orig_modules = set(sys.modules.keys())
orig_path = list(sys.path)
orig_meta_path = list(sys.meta_path)
orig_path_hooks = list(sys.path_hooks)
orig_path_importer_cache = sys.path_importer_cache
if extra_site:
sys.path.append(extra_site)
site.addsitedir(extra_site)
try:
yield
finally:
remove = [key for key in sys.modules if key not in orig_modules]
for key in remove:
del sys.modules[key]
sys.path[:] = orig_path
sys.meta_path[:] = orig_meta_path
sys.path_hooks[:] = orig_path_hooks
sys.path_importer_cache.clear()
sys.path_importer_cache.update(orig_path_importer_cache)
@pytest.fixture
def project(tmp_path):
project = tmp_path / "project"
structure = {
"foo": {
"__init__.py": "print('foo')",
"bar": {"__init__.py": "print('foo.bar')"},
"baz": {"__init__.py": "print('foo.baz')"},
}
}
build_project(project, structure)
yield project
def test_nonexistent_module(project):
p = EditableProject("myproject", project)
with pytest.raises(EditableException):
p.map("foo", "xxx")
def test_not_toplevel(project):
p = EditableProject("myproject", project)
with pytest.raises(EditableException):
p.map("foo.bar", "foo/bar")
def test_dependencies(project):
p = EditableProject("myproject", project)
assert len(p.dependencies()) == 0
p.map("foo", "foo")
assert len(p.dependencies()) == 1
def test_simple_pth(tmp_path, project):
p = EditableProject("myproject", project)
p.add_to_path(".")
structure = {name: content for name, content in p.files()}
site_packages = tmp_path / "site-packages"
build_project(site_packages, structure)
with import_state(extra_site=site_packages):
import foo
assert Path(foo.__file__) == project / "foo/__init__.py"
def test_make_project(project, tmp_path):
p = EditableProject("myproject", project)
p.map("foo", "foo")
structure = {name: content for name, content in p.files()}
site_packages = tmp_path / "site-packages"
build_project(site_packages, structure)
with import_state(extra_site=site_packages):
import foo
assert Path(foo.__file__) == project / "foo/__init__.py"
|
182052
|
import os
import cv2
import json
import pandas as pd
import numpy as np
from glob import glob
from tqdm import tqdm
from IPython import embed
import base64
from labelme import utils
image_path = "./images/"
csv_file = "./train_labels.csv"
annotations = pd.read_csv(csv_file,header=None).values
total_csv_annotations = {}
for annotation in annotations:
key = annotation[0].split(os.sep)[-1]
value = np.array([annotation[1:]])
if key in total_csv_annotations.keys():
total_csv_annotations[key] = np.concatenate((total_csv_annotations[key],value),axis=0)
else:
total_csv_annotations[key] = value
for key,value in total_csv_annotations.items():
height,width = cv2.imread(image_path+key).shape[:2]
labelme_format = {
"version":"3.6.16",
"flags":{},
"lineColor":[0,255,0,128],
"fillColor":[255,0,0,128],
"imagePath":key,
"imageHeight":height,
"imageWidth":width
}
with open(image_path+key,"rb") as f:
imageData = f.read()
imageData = base64.b64encode(imageData).decode('utf-8')
#img = utils.img_b64_to_arr(imageData)
labelme_format["imageData"] = imageData
shapes = []
for shape in value:
label = shape[-1]
s = {"label":label,"line_color":None,"fill_color":None,"shape_type":"rectangle"}
points = [
[shape[0],shape[1]],
[shape[2],shape[3]]
]
s["points"] = points
shapes.append(s)
labelme_format["shapes"] = shapes
json.dump(labelme_format,open("%s/%s/"%(image_path,key.replace(".jpg",".json")),"w"),ensure_ascii=False, indent=2)
|
182073
|
import torch
import torch.nn as nn
from diffuse.models.components.conv_glu import ConvGLU
class DownsampleBlock(nn.Module):
def __init__(self, hidden, main_op=ConvGLU):
"""
Halve the spatial dimensions and double the channels
:param hidden:
"""
super(DownsampleBlock, self).__init__()
self.down = nn.Conv2d(hidden, hidden * 2, kernel_size=2, stride=2)
self.conv = main_op(hidden * 2)
def forward(self, x, c=None):
down = self.down(x)
return self.conv(down, c)
class UpsampleBlock(nn.Module):
def __init__(self, hidden, main_op=ConvGLU):
super(UpsampleBlock, self).__init__()
self.up = nn.ConvTranspose2d(hidden, hidden // 2, kernel_size=2, stride=2)
self.conv1 = nn.Conv2d(hidden, hidden // 2, 1)
self.main = main_op(hidden // 2)
def forward(self, x1, x2, c=None):
x1 = self.up(x1)
feats = torch.cat((x1, x2), dim=1)
feats = self.conv1(feats)
return self.main(feats, c)
|
182110
|
from __future__ import division
from skimage import img_as_float, io
from skimage.filters import threshold_otsu
import numpy as np
def quantize(image, L=1, N=4):
"""Quantize an image.
Parameters
----------
image : array_like
Input image.
L : float
Maximum input value.
N : int
Number of quantization levels.
"""
T = np.linspace(0, L, N, endpoint=False)[1:]
return np.digitize(image.flat, T).reshape(image.shape)
def dither(image, N=4, positions=None, weights=None):
"""Quantize an image, using dithering.
Parameters
----------
image : ndarray
Input image.
N : int
Number of quantization levels.
positions : list of (i, j) offsets
Position offset to which the quantization error is distributed.
By default, implement Sierra's "Filter Lite".
weights : list of ints
Weights for propagated error.
By default, implement Sierra's "Filter Lite".
References
----------
http://www.efg2.com/Lab/Library/ImageProcessing/DHALF.TXT
"""
image = img_as_float(image.copy())
if positions is None or weights is None:
positions = [(0, 1), (1, -1), (1, 0)]
weights = [2, 1, 1]
weights = weights / np.sum(weights)
T = np.linspace(0, 1, N, endpoint=False)[1:]
rows, cols = image.shape
out = np.zeros_like(image, dtype=float)
for i in range(rows):
for j in range(cols):
# Quantize
out[i, j], = np.digitize([image[i, j]], T)
# Propagate quantization noise
d = (image[i, j] - out[i, j] / (N - 1))
for (ii, jj), w in zip(positions, weights):
ii = i + ii
jj = j + jj
if ii < rows and jj < cols:
image[ii, jj] += d * w
return out
def floyd_steinberg(image, N):
offsets = [(0, 1), (1, -1), (1, 0), (1, 1)]
weights = [ 7,
3, 5, 1]
return dither(image, N, offsets, weights)
def stucki(image, N):
offsets = [(0, 1), (0, 2), (1, -2), (1, -1),
(1, 0), (1, 1), (1, 2),
(2, -2), (2, -1), (2, 0), (2, 1), (2, 2)]
weights = [ 8, 4,
2, 4, 8, 4, 2,
1, 2, 4, 2, 1]
return dither(image, N, offsets, weights)
# Image with 255 color levels
img = img_as_float(io.imread('data/david.png'))
# Quantize to N levels
N = 2
img_quant = quantize(img, N=N)
img_dither_random = img + np.abs(np.random.normal(size=img.shape,
scale=1./(3 * N)))
img_dither_random = quantize(img_dither_random, L=1, N=N)
img_dither_fs = floyd_steinberg(img, N=N)
img_dither_stucki = stucki(img, N=N)
import matplotlib.pyplot as plt
f, ax = plt.subplots(2, 3, subplot_kw={'xticks': [], 'yticks': []})
ax[0, 0].imshow(img, cmap=plt.cm.gray, interpolation='nearest')
ax[0, 1].imshow(img_quant, cmap=plt.cm.gray, interpolation='nearest')
ax[0, 2].imshow(img > threshold_otsu(img), cmap=plt.cm.gray, interpolation='nearest')
#ax[0, 2].set_visible(False)
ax[1, 0].imshow(img_dither_random, cmap=plt.cm.gray, interpolation='nearest')
ax[1, 1].imshow(img_dither_fs, cmap=plt.cm.gray, interpolation='nearest')
ax[1, 2].imshow(img_dither_stucki, cmap=plt.cm.gray, interpolation='nearest')
ax[0, 0].set_title('Input')
ax[0, 1].set_title('Quantization (N=%d)' % N)
ax[0, 2].set_title('Otsu threshold')
ax[1, 0].set_title('Dithering: Image + Noise')
ax[1, 1].set_title('Floyd-Steinberg')
ax[1, 2].set_title('Stucki')
plt.show()
|
182116
|
import numpy as np
import dolfin as df
import matplotlib.pyplot as plt
def column_chart(results, solvers, preconditioners, offset=None, ymax=10):
slowest = results.max(0)
fastest = results.min(0)
default = results[0]
no_prec = results[1]
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot(111)
width = 0.2
ind = np.arange(len(solvers))
ax.axhline(results[0, 0], color=(0.3, 0.3, 0.3), ls=":", zorder=0)
rects_fastest = ax.bar(ind, fastest, width, color="green", label="fastest prec.")
ax.bar(width + ind, default, width, color=(0.3, 0.3, 0.3), label="default prec.")
ax.bar(2 * width + ind, no_prec, width, color=(0.8, 0.8, 0.8), label="no prec.")
ax.bar(3 * width + ind, slowest, width, color="red", label="slowest prec.")
# annotate fastest runs with name of preconditioner
fastest_ind = results.argmin(0)
for i, rect in enumerate(rects_fastest):
height = rect.get_height()
offset = offset if offset is not None else 1.05 * height
ax.text(rect.get_x() + rect.get_width() / 2.0, height + offset,
preconditioners[fastest_ind[i]],
ha='center', va='bottom', rotation=90)
ax.set_xlabel("method")
ax.set_ylabel("time (ms)")
ax.set_ylim((0, ymax))
ax.legend()
ax.set_xticks(ind + 2 * width)
xtickNames = plt.setp(ax, xticklabels=solvers)
plt.setp(xtickNames, rotation=0)
return fig
if __name__ == "__main__":
ms = 1e3
solvers = [s[0] for s in df.krylov_solver_methods()]
preconditioners = [p[0] for p in df.krylov_solver_preconditioners()]
ymax = [[6, 6], [6, 10]]
for i, system in enumerate(["ball", "film"]):
for j, potential in enumerate(["1", "2"]):
results = ms * np.ma.load(system + "_" + potential + ".pickle")
with open(system + "_" + potential + ".txt", "w") as f:
f.write("& {} \\\\\n".format(" & ".join(solvers)))
f.write("\\hline\n")
for pi, p in enumerate(preconditioners):
numbers = ["{:.3}".format(r) for r in results[pi]]
f.write("{} & {} \\\\\n".format(p, " & ".join(numbers)))
fig = column_chart(results, solvers, preconditioners, offset=0.2, ymax=ymax[i][j])
plt.savefig(system + "_" + potential + ".png")
plt.close()
|
182122
|
import unittest
from testutils import getZserioApi
class SetTopLevelPackageTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "set_top_level_package.zs", extraArgs=[
"-setTopLevelPackage", "company.appl"
], topLevelPackage="company")
def testEmptyConstructor(self):
simpleStructure = self.api.appl.SimpleStructure()
simpleStructure.simple_choice = self.api.appl.SimpleChoice(simpleStructure.value)
simpleStructure.simple_template = self.api.appl.SimpleTemplate_Enumeration(True, 0)
self.assertEqual(32, simpleStructure.bitsizeof())
|
182144
|
import sagemaker
import shortuuid
from yaspin import yaspin
from yaspin.spinners import Spinners
import time
import datetime
import tarfile
import re
import boto3
import glob
import os
import shutil
import pkg_resources
import subprocess
from sagemaker.multidatamodel import MultiDataModel
from sagemaker.model import Model
import ast
import csv
import json
import pickle
# from .model_handler import * # this works but make sure you have all the packages mentioned in the model_handler import; i.e., make this generic
class Deploy(object):
def __init__(
self,
model,
script,
framework=None,
requirements=None,
name=None,
autoscale=False,
autoscaletarget=1000,
wait=True,
bucket=None,
prefix='',
session=None,
image=None,
dockerfilepath=None,
instance_type=None,
instance_count=1,
budget=100,
ei=None,
monitor=False,
):
self.frameworklist = ["tensorflow", "pytorch", "mxnet", "sklearn"]
self.frameworkinstalls = {
"tensorflow": ["tensorflow"],
"pytorch": ["torch"],
"mxnet": ["mxnet", "gluon"],
"sklearn": ["sklearn"],
}
self.wait = wait
self.budget = budget
self.instance_count = instance_count
self.instance_type = instance_type
self.image = image
self.dockerfilepath = dockerfilepath
self.ei = ei
self.prefix = prefix
self.monitor = monitor
self.deployed = False
self.autoscaletarget = autoscaletarget
# ------ load cost types dict ---------
costpath = pkg_resources.resource_filename("ezsmdeploy", "data/cost.csv")
self.costdict = {}
with open(costpath, mode="r") as infile:
reader = csv.reader(infile)
for rows in reader:
# cost for each instance
self.costdict[rows[0]] = float(rows[1])
# ------- basic instance type check --------
if (
self.instance_type == None
): # since we will not select a a GPU instance in automatic instance selection
self.gpu = False
self.multimodel = True
else:
if (
(self.instance_type in list(self.costdict.keys()))
or "local" in self.instance_type
) and self.instance_type != None:
if "local" in self.instance_type:
if (
self.instance_type == "local_gpu"
): # useful if you intend to do local testing. No change vs. local
self.gpu = True
self.multimodel = False
self.instance_type == "local"
else:
self.gpu = False
self.multimodel = True
else:
if self.instance_type.split(".")[1][0] in [
"p",
"g",
]: # if gpu instance
self.gpu = True
self.multimodel = False
else:
self.gpu = False
self.multimodel = (
True # multi model works well with local endpoints ....
)
else: # throw wrong instance error
raise ValueError(
"Please choose an instance type in",
list(self.costdict.keys()),
", or choose local for local testing. Don't pass in any instance or pass in None if you want to automatically choose an instance type.",
)
# ------- Model checks --------
if type(model) == str:
self.model = [model]
self.multimodel = False
elif type(model) == list:
self.model = model
self.multimodel = True
elif model == None: # assume you are loading from a hub or from a dockerfile
with open("tmpmodel", "w") as fp:
pass
self.model = ["tmpmodel"]
self.multimodel = False
else:
raise ValueError(
"model must be a single serialized file (like 'model.pkl') or a \
list of files ([model.pkl, model2.pkl]). If you are downloading a model in the script \
or packaging with the container, pass in model = None"
)
# ------- Script checks ---------
if script[-2:] != "py":
raise ValueError(
"please provide a valid python script with .py extension. "
+ script
+ " is invalid"
)
else:
self.script = script
filename = self.script
with open(filename) as file:
node = ast.parse(file.read())
functions = [n.name for n in node.body if isinstance(n, ast.FunctionDef)]
if ("load_model" not in functions) and ("predict" not in functions):
raise ValueError(
"please implement a load_model(modelpath) that \
returns a loaded model, and predict(inputdata) function that returns a prediction in your"
+ script
)
# ------- session checks --------
if session == None:
self.session = sagemaker.session.Session()
else:
self.session = session # leave session as none since users may want to do local testing.
# ------- name checks --------
if name == None:
self.name = shortuuid.uuid().lower()
elif type(name) == str:
self.name = name
if name.islower() == False:
raise ValueError(
"please enter a name with lower case letters; we will be using this name for s3 bucket prefixes, model names, ECR repository names etc. that have various restrictions"
)
else:
raise ValueError(
"enter string for a name or don't pass in a name; type of name passed in is "
+ str(type(name))
)
# ------- bucket checks --------
if bucket == None:
self.bucket = self.session.default_bucket()
else:
self.bucket = bucket
self.requirements = requirements
# ------- framework --------
if requirements == None and framework in self.frameworklist:
self.framework = framework
self.requirements = self.frameworkinstalls[framework]
elif requirements == None and framework not in self.frameworklist:
raise ValueError(
"If requirements=None, please provide a value for framework; \
choice should be one of 'tensorflow','pytorch','mxnet','sklearn'"
)
self.autoscale = autoscale
self.wait = wait
self.deploy()
def process_instance_type(self):
# ------ instance checks --------
self.instancedict = {}
if self.instance_type == None:
# ------ load instance types dict ---------
instancetypepath = pkg_resources.resource_filename(
"ezsmdeploy", "data/instancetypes.csv"
)
with open(instancetypepath, mode="r") as infile:
reader = csv.reader(infile)
for rows in reader: # memGb / vcpu, cost, cost/memGb-per-vcpu
self.instancedict[rows[0]] = (
float(rows[2]) / (2 * float(rows[1])),
self.costdict[rows[0]],
self.costdict[rows[0]] / float(rows[2]) / (2 * float(rows[1])),
)
# ------ auto instance selection ---------
self.choose_instance_type()
else:
if (self.instance_type in list(self.costdict.keys())) or (
self.instance_type in ["local", "local_gpu"]
):
if self.instance_type not in ["local", "local_gpu"]:
self.costperhour = self.costdict[self.instance_type]
if self.ei != None:
eicosts = {
"ml.eia2.medium": 0.12,
"ml.eia2.large": 0.24,
"ml.eia2.xlarge": 0.34,
"ml.eia.medium": 0.13,
"ml.eia.large": 0.26,
"ml.eia.xlarge": 0.52,
}
self.costperhour = self.costperhour + eicosts[self.ei]
else:
self.costperhour = 0
else:
raise ValueError(
"Please choose an instance type in",
list(self.costdict.keys()),
", or choose local for local testing.",
)
def choose_instance_type(self):
# TO DO : add heuristic for auto selection of instance size
if self.prefix =='':
tmppath = "ezsmdeploy/model-" + self.name + "/"
else:
tmppath = self.prefix+"/ezsmdeploy/model-" + self.name + "/"
size = self.get_size(self.bucket, tmppath )
self.instancetypespath = pkg_resources.resource_filename(
"ezsmdeploy", "data/instancetypes.csv"
)
# Assume you need at least 4 workers, each model is deployed redundantly to every vcpu.
# So we base this decision on memory available per vcpu. If model is being downloaded from a hub
# one should ideally pass in an instance since we don't know the size of model.
# list includes some extremely large CPU instance and all GPU instances. For all instances that have the same
# memory per vcpu, what is done to tie break is min (cost/total vpcus). Also 'd' instances are preferred to others for
# faster load times at the same cost since they have NvMe. If budget is supplied, we can try to satisfy this.
choseninstance = None
mincost = 1000
for instance in list(self.instancedict.keys()):
# cost and memory per worker
memperworker = self.instancedict[instance][0]
cost = self.instancedict[instance][1]
costpermem = self.instancedict[instance][2]
#
if self.budget == 100:
# even though budget is unlimited, minimize cost
if memperworker > size and cost < mincost:
mincost = cost
choseninstance = instance
# print("instance ={}, size={}, memperworker={}, choseninstance = {}, mincost = {}".format(instance, size, memperworker, choseninstance,mincost))
else:
if memperworker > size and cost <= self.budget:
choseninstance = instance
break
if choseninstance == None and self.budget != 100:
raise ValueError(
"Could not find an instance that satisfies your budget of "
+ str(self.budget)
+ " per hour and can host your models with a total size of "
+ str(size)
+ " Gb. Please choose a higher budget per hour."
)
elif choseninstance == None and self.budget == 100:
raise ValueError(
"You may be using large models with a total size of "
+ str(size)
+ " Gb. Please choose a high memory GPU instance and launch without multiple models (if applicable)"
)
self.instance_type = choseninstance
self.costperhour = self.costdict[self.instance_type]
def add_model(self, s3path, relativepath):
self.sagemakermodel.add_model(s3path, relativepath)
def create_model(self):
if not self.multimodel:
self.sagemakermodel = Model(
name="model-" + self.name,
model_data=self.modelpath[0],
image_uri=self.image,
role=sagemaker.get_execution_role(),
# sagemaker_session=self.session,
predictor_cls=sagemaker.predictor.Predictor,
)
else:
self.sagemakermodel = MultiDataModel(
name="model-" + self.name,
model_data_prefix="/".join(self.modelpath[0].split("/")[:-1]) + "/",
image_uri=self.image,
role=sagemaker.get_execution_role(),
# sagemaker_session=self.session,
predictor_cls=sagemaker.predictor.Predictor,
)
for path in self.modelpath:
self.add_model(path, "serving/")
self.ei = False
def deploy_model(self):
if self.monitor:
from sagemaker.model_monitor import DataCaptureConfig
if prefix == '':
tmps3uri = "s3://{}/ezsmdeploy/model-{}/datacapture".format(
self.bucket, self.name
)
else:
tmps3uri = "s3://{}/{}/ezsmdeploy/model-{}/datacapture".format(
self.bucket, self.prefix, self.name
)
data_capture_config = DataCaptureConfig(
enable_capture=True,
sampling_percentage=100,
destination_s3_uri=tmps3uri
)
else:
data_capture_config = None
self.predictor = self.sagemakermodel.deploy(
initial_instance_count=self.instance_count,
instance_type=self.instance_type,
accelerator_type=self.ei,
endpoint_name="ezsmdeploy-endpoint-" + self.name,
update_endpoint=False,
wait=self.wait,
data_capture_config=data_capture_config,
)
self.endpoint_name = "ezsmdeploy-endpoint-" + self.name
def get_size(self, bucket, path):
s3 = boto3.resource("s3")
my_bucket = s3.Bucket(bucket)
total_size = 0.0
for obj in my_bucket.objects.filter(Prefix=path):
total_size = total_size + obj.size
return total_size / ((1024.0) ** 3)
def upload_model(self):
i = 1
if self.prefix == '':
tmppath = "ezsmdeploy/model-"
else:
tmppath = self.prefix + "/ezsmdeploy/model-"
self.modelpath = []
for name in self.model:
self.modelpath.append(
self.session.upload_data(
path="model{}.tar.gz".format(i),
bucket=self.bucket,
key_prefix=tmppath + self.name,
)
)
i += 1
def tar_model(self):
i = 1
for name in self.model:
if "tar.gz" in name and 's3' in name:
# download and uncompress
self.session.download_data(
path="./downloads/{}".format(i),
bucket=name.split("/")[2],
key_prefix="/".join(name.split("/")[3:]),
)
with tarfile.open(
glob.glob("./downloads/{}/*.tar.gz".format(i))[0]
) as tar:
tar.extractall("./extractedmodel/{}/".format(i))
name = "extractedmodel/{}/".format(i)
elif 'tar.gz' in name and 's3' not in name:
self.makedir_safe("./downloads/{}/".format(i))
shutil.copy(name, "./downloads/{}/".format(i))
with tarfile.open(
glob.glob("./downloads/{}/*.tar.gz".format(i))[0]
) as tar:
tar.extractall("./extractedmodel/{}/".format(i))
name = "extractedmodel/{}/".format(i)
tar = tarfile.open("model{}.tar.gz".format(i), "w:gz")
if "/" in name:
tar.add(name, arcname=".")
else:
tar.add(name)
tar.close()
i += 1
def makedir_safe(self, directory):
try:
shutil.rmtree(directory)
except:
pass
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError as err:
if err.errno != 17:
print(err.errno)
raise
def handle_requirements(self):
# ------- requirements checks -------
self.makedir_safe("src")
if type(self.requirements) == str:
if os.path.exists(self.requirements):
# move file to src
shutil.copy(self.requirements, "src/requirements.txt")
else:
raise (self.requirements + " does not exist!")
elif type(self.requirements) == list:
f = open("src/requirements.txt", "w")
l1 = map(lambda x: x + "\n", self.requirements)
f.writelines(l1)
f.close()
else:
raise ValueError(
"pass in a path/to/requirements.txt or a list of requirements ['scikit-learn',...,...]"
)
def build_docker(self):
cmd = "chmod +x src/build-docker.sh & sudo ./src/build-docker.sh {}"
with open('src/dockeroutput.txt', 'w') as f:
#print("Start process")
p = subprocess.Popen(cmd.format(self.name), stdout=f, shell=True)
#print("process running in background")
acct = (
os.popen("aws sts get-caller-identity --query Account --output text")
.read()
.split("\n")[0]
)
region = os.popen("aws configure get region").read().split("\n")[0]
self.image = "{}.dkr.ecr.{}.amazonaws.com/ezsmdeploy-image-{}".format(
acct, region, self.name
)
while not os.path.exists("src/done.txt"):
time.sleep(3)
self.dockeroutput = "Please see src/dockeroutput.txt"
def autoscale_endpoint(self):
response = boto3.client("sagemaker").describe_endpoint(
EndpointName=self.endpoint_name
)
in1 = response["EndpointName"]
in2 = response["ProductionVariants"][0]["VariantName"]
client = boto3.client("application-autoscaling")
response = client.register_scalable_target(
ServiceNamespace="sagemaker",
ResourceId="endpoint/{}/variant/{}".format(in1, in2),
ScalableDimension="sagemaker:variant:DesiredInstanceCount",
MinCapacity=1,
MaxCapacity=10,
)
response = client.put_scaling_policy(
PolicyName="scaling-policy-{}".format(self.name),
ServiceNamespace="sagemaker",
ResourceId="endpoint/{}/variant/{}".format(in1, in2),
ScalableDimension="sagemaker:variant:DesiredInstanceCount",
PolicyType="TargetTrackingScaling",
TargetTrackingScalingPolicyConfiguration={
"TargetValue": self.autoscaletarget,
"PredefinedMetricSpecification": {
"PredefinedMetricType": "SageMakerVariantInvocationsPerInstance",
},
"ScaleOutCooldown": 600,
"ScaleInCooldown": 600,
"DisableScaleIn": False,
},
)
self.scalingresponse = response
def test(
self, input_data, target_model=None, usercount=10, hatchrate=5, timeoutsecs=5
):
if self.multimodel and target_model == None:
raise ValueError(
"since this is a multimodel endpoint, please pass in a target model that you wish to test"
)
if self.deployed:
path1 = pkg_resources.resource_filename("ezsmdeploy", "data/smlocust.py")
shutil.copy(path1, "src/smlocust.py")
start = datetime.datetime.now()
with yaspin(Spinners.point, color="green", text="") as sp:
sp.hide()
sp.write(
str(datetime.datetime.now() - start)
+ " | Starting test with Locust"
)
sp.show()
if self.multimodel:
with open("src/locustdata.txt", "w") as outfile:
json.dump(
{
"endpoint_name": self.endpoint_name,
"target_model": "model1.tar.gz",
},
outfile,
)
else:
with open("src/locustdata.txt", "w") as outfile:
json.dump(
{"endpoint_name": self.endpoint_name, "target_model": ""},
outfile,
)
pickle.dump(input_data, open("src/testdata.p", "wb"))
cmd = "locust -f src/smlocust.py --no-web -c {} -r {} --run-time {}s --csv=src/locuststats; touch src/testdone.txt".format(
usercount, hatchrate, timeoutsecs
)
p = os.system(cmd)
while not os.path.exists("src/testdone.txt"):
time.sleep(3)
os.remove("src/testdone.txt")
sp.hide()
sp.write(
str(datetime.datetime.now() - start)
+ " | Done! Please see the src folder for locuststats* files"
)
sp.show()
else:
raise ValueError("Deploy model to endpoint first before testing")
def deploy(self):
# print(self.__dict__)
start = datetime.datetime.now()
with yaspin(Spinners.point, color="green", text="") as sp:
try:
shutil.rmtree("src/")
except:
pass
# compress model files
self.tar_model()
sp.hide()
if self.model == ["tmpmodel"]:
sp.write(
str(datetime.datetime.now() - start)
+ " | No model was passed. Assuming you are downloading a model in the script or in the container"
)
else:
sp.write(
str(datetime.datetime.now() - start) + " | compressed model(s)"
)
sp.show()
# upload model file(s)
self.upload_model()
# Process instance type
self.process_instance_type()
sp.hide()
sp.write(
str(datetime.datetime.now() - start)
+ " | uploaded model tarball(s) ; check returned modelpath"
)
sp.show()
# if self.gpu and self.image == None:
# raise ValueError("The default container image used here is based on the multi-model server which does not support GPU instances. Please provide a docker image (ECR repository link) to proceed with model build and deployment.")
# else:
# handle requirements
if self.requirements == None:
rtext = (
str(datetime.datetime.now() - start)
+ " | no additional requirements found"
)
self.makedir_safe("src")
else:
self.handle_requirements()
rtext = (
str(datetime.datetime.now() - start) + " | added requirements file"
)
sp.hide()
sp.write(rtext)
sp.show()
# move script to src
shutil.copy(self.script, "src/transformscript.py")
sp.hide()
sp.write(str(datetime.datetime.now() - start) + " | added source file")
sp.show()
# ------ Dockerfile checks -------
if self.dockerfilepath == None and self.multimodel == True:
self.dockerfilepath = pkg_resources.resource_filename(
"ezsmdeploy", "data/Dockerfile"
)
elif self.dockerfilepath == None and self.multimodel == False:
self.dockerfilepath = pkg_resources.resource_filename(
"ezsmdeploy", "data/Dockerfile_flask"
)
# move Dockerfile to src
shutil.copy(self.dockerfilepath, "src/Dockerfile")
sp.hide()
sp.write(str(datetime.datetime.now() - start) + " | added Dockerfile")
sp.show()
# move model_handler and build scripts to src
if self.multimodel:
# Use multi model
path1 = pkg_resources.resource_filename(
"ezsmdeploy", "data/model_handler.py"
)
path2 = pkg_resources.resource_filename(
"ezsmdeploy", "data/dockerd-entrypoint.py"
)
path3 = pkg_resources.resource_filename(
"ezsmdeploy", "data/build-docker.sh"
)
shutil.copy(path1, "src/model_handler.py")
shutil.copy(path2, "src/dockerd-entrypoint.py")
shutil.copy(path3, "src/build-docker.sh")
self.ei = None
else:
# Use Flask stack
path1 = pkg_resources.resource_filename("ezsmdeploy", "data/nginx.conf")
path2 = pkg_resources.resource_filename(
"ezsmdeploy", "data/predictor.py"
)
path3 = pkg_resources.resource_filename("ezsmdeploy", "data/serve")
path4 = pkg_resources.resource_filename("ezsmdeploy", "data/train")
path5 = pkg_resources.resource_filename("ezsmdeploy", "data/wsgi.py")
path6 = pkg_resources.resource_filename(
"ezsmdeploy", "data/build-docker.sh"
)
shutil.copy(path1, "src/nginx.conf")
shutil.copy(path2, "src/predictor.py")
shutil.copy(path3, "src/serve")
shutil.copy(path4, "src/train")
shutil.copy(path5, "src/wsgi.py")
shutil.copy(path6, "src/build-docker.sh")
if self.gpu and self.ei != None:
self.ei = None
sp.hide()
sp.write(
str(datetime.datetime.now() - start)
+ " | Setting Elastic Inference \
to None since you selected a GPU instance"
)
sp.show()
sp.hide()
sp.write(
str(datetime.datetime.now() - start)
+ " | added model_handler and docker utils"
)
sp.show()
# build docker container
if self.image == None:
sp.write(
str(datetime.datetime.now() - start)
+ " | building docker container"
)
self.build_docker()
sp.hide()
sp.write(
str(datetime.datetime.now() - start) + " | built docker container"
)
sp.show()
# create sagemaker model
self.create_model()
sp.hide()
sp.write(
str(datetime.datetime.now() - start)
+ " | created model(s). Now deploying on "
+ self.instance_type
)
sp.show()
# deploy model
self.deploy_model()
sp.hide()
sp.write(str(datetime.datetime.now() - start) + " | deployed model")
sp.show()
if self.autoscale and self.instance_type not in ["local", "local_gpu"]:
self.autoscale_endpoint()
sp.hide()
sp.write(str(datetime.datetime.now() - start) + " | set up autoscaling")
sp.show()
elif self.autoscale and self.instance_type in ["local", "local_gpu"]:
sp.hide()
sp.write(
str(datetime.datetime.now() - start)
+ " | not setting up autoscaling; deploying locally"
)
sp.show()
if self.instance_type not in ["local", "local_gpu"]:
sp.hide()
sp.write(
str(datetime.datetime.now() - start)
+ " | estimated cost is $"
+ str(self.costperhour)
+ " per hour"
)
sp.show()
if self.monitor:
sp.hide()
sp.write(
str(datetime.datetime.now() - start)
+ " | model monitor data capture location is "
+ "s3://{}/ezsmdeploy/model-{}/datacapture".format(
self.bucket, self.name
)
)
sp.show()
# finalize
sp.green.ok(str(datetime.datetime.now() - start) + " | " "Done! ✔")
self.deployed = True
try:
# Cleanup
os.remove("src/done.txt")
os.remove("src")
os.remove("downloads")
os.remove("extractedmodel")
os.remove("tmpmodel")
except:
pass
return self.predictor
|
182171
|
import sys
import unittest
from itertools import product
import numpy as np
import torch
from metal.label_model.class_balance import ClassBalanceModel
sys.path.append("../synthetic")
class ClassBalanceModelTest(unittest.TestCase):
def _set_seed(self, seed):
torch.manual_seed(seed)
np.random.seed(seed)
def _generate_class_balance(self, k):
"""Generate class balance"""
p_Y = np.random.random(k)
p_Y /= p_Y.sum()
return p_Y
def _generate_cond_probs(self, k, m, bias_diag=True, abstains=False):
"""Generate conditional probability tables for the m conditionally ind.
LFs, such that:
cpts[i, y1, y2] = P(\lambda_i = y1 | Y = y2)
Args:
k: (int) Number of classes
m: (int) Number of LFs
bias_diag: (bool) If True, adds a bias (proportional to (k-1)) to
the diagonal of the randomly generated conditional probability
tables, to enforce assumption that LFs are better than random
abstains: (bool) Incorporate abstains
Outputs:
C: (np.array) An (m, k, k) tensor, if abstains=False; or, if
abstains=True, (m, k+1, k)
"""
cpts = []
k_lf = k + 1 if abstains else k
for i in range(m):
a = np.random.random((k_lf, k))
if bias_diag:
if abstains:
a[1:, :] += (k - 1) * np.eye(k)
else:
a += (k - 1) * np.eye(k)
cpts.append(a @ np.diag(1 / a.sum(axis=0)))
return np.array(cpts)
def _generate_L(self, p_Y, C, n, abstains=False):
"""Generate a label matrix L, with entries in {0,1,...,k} if
abstains=True, else in {1,...,k}, given the true class balance, p_Y, and
a conditional probabilities table C of m cond. ind. LFs"""
k = len(p_Y)
m = C.shape[0]
# Generate true data labels for n data points
Y = np.random.choice(range(1, k + 1), n, p=p_Y)
# Generate label matrix L with entries in {0,1,...,k} if abstains=True,
# else in {1,...,k}
lf_0 = 0 if abstains else 1
L = np.zeros((n, m))
for i, y in enumerate(Y):
for j in range(m):
L[i, j] = np.random.choice(range(lf_0, k + 1), p=C[j, :, y - 1])
return L
def _test_model(self, model, p_Y, C, O=None, L=None, tol=1e-3, verbose=True):
model.train_model(O=O, L=L)
if verbose:
print(f"True class balance: {p_Y}")
print(f"Estimated class balance: {model.class_balance}")
self.assertLess(np.mean(np.abs(p_Y - model.class_balance)), tol)
self.assertLess(np.mean(np.abs(C - model.cond_probs)), tol)
def _test_class_balance_estimation(self, k, m, abstains=False, verbose=True):
model = ClassBalanceModel(k, abstains=abstains)
p_Y = self._generate_class_balance(k)
C = self._generate_cond_probs(k, m, bias_diag=True, abstains=abstains)
# Compute O; mask out diagonal entries
mask = model.get_mask(m)
O = np.einsum("aby,cdy,efy,y->acebdf", C, C, C, p_Y)
O = torch.from_numpy(O).float()
O[1 - mask] = 0
# Test recovery of the class balance
self._test_model(model, p_Y, C, O=O)
def _test_class_balance_estimation_noisy(
self, k, m, n, abstains=False, verbose=True
):
model = ClassBalanceModel(k, abstains=abstains)
p_Y = self._generate_class_balance(k)
C = self._generate_cond_probs(k, m, bias_diag=True, abstains=abstains)
# Generate label matrix L
L = self._generate_L(p_Y, C, n, abstains=abstains)
# Test recovery of the class balance
self._test_model(model, p_Y, C, L=L, tol=1e-2)
def test_class_balance_estimation_2(self):
self._set_seed(123)
self._test_class_balance_estimation(2, 25)
def test_class_balance_estimation_3(self):
self._set_seed(123)
self._test_class_balance_estimation(3, 25)
# Note: This should pass! However, commented out because too slow...
# def test_class_balance_estimation_5(self):
# self._set_seed(123)
# self._test_class_balance_estimation(5, 25)
def test_class_balance_estimation_2_abstains(self):
self._set_seed(123)
self._test_class_balance_estimation(2, 25, abstains=True)
def test_class_balance_estimation_2_noisy(self):
self._set_seed(123)
self._test_class_balance_estimation_noisy(2, 25, 10000, abstains=True)
if __name__ == "__main__":
unittest.main()
|
182186
|
from typing import Tuple, List
from tracardi.domain.event import Event
from tracardi.domain.payload.tracker_payload import TrackerPayload
from tracardi.domain.profile import Profile
from tracardi.domain.session import Session
from .debug_info import DebugInfo
from .flow import Flow
from .flow_history import FlowHistory
from ..utils.dag_error import DagGraphError
from ..utils.dag_processor import DagProcessor
from ..utils.flow_graph_converter import FlowGraphConverter
from tracardi.service.plugin.domain.console import Log
class WorkFlow:
def __init__(self, flow_history: FlowHistory, tracker_payload: TrackerPayload = None):
self.tracker_payload = tracker_payload
self.flow_history = flow_history
async def invoke(self, flow: Flow, event: Event, profile, session, ux: list, debug=False) -> Tuple[DebugInfo, List[Log], 'Event', 'Profile', 'Session']:
"""
Invokes workflow and returns DebugInfo and list of saved Logs.
"""
if event is None:
raise DagGraphError(
"Flow `{}` has no context event defined.".format(
flow.id))
if not flow.flowGraph:
raise DagGraphError("Flow {} is empty".format(flow.id))
if self.flow_history.is_acyclic(flow.id):
# Convert Editor graph to exec graph
converter = FlowGraphConverter(flow.flowGraph.dict())
dag_graph = converter.convert_to_dag_graph()
dag = DagProcessor(dag_graph)
try:
exec_dag = dag.make_execution_dag(debug=debug)
except DagGraphError as e:
raise DagGraphError("Flow `{}` returned the following error: `{}`".format(flow.id, str(e)))
# Init and run with event
await exec_dag.init(flow,
self.flow_history,
event,
session,
profile,
self.tracker_payload,
ux)
debug_info, log_list, profile, session = await exec_dag.run(
payload={},
flow=flow,
event=event,
profile=profile,
session=session
)
await exec_dag.close()
return debug_info, log_list, event, profile, session
raise RuntimeError("Workflow has circular reference.")
|
182216
|
from setuptools import setup
setup(
name='foucluster',
description='Clustering of songs using Fourier Transform',
long_description='Similarities among songs are computed using Fast Fourier '
'Transform. With this information, unsupervised machine learning'
' is applied.',
url='https://github.com/cperales/foucluster',
version='2.0',
author='<NAME>',
author_email='<EMAIL>',
keywords=['cluster', 'Fourier', 'music', 'song',
'machine learning', 'kmeans'],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: POSIX :: Linux',
'Topic :: Artistic Software',
'Topic :: Scientific/Engineering',
'Topic :: Multimedia :: Sound/Audio :: Analysis'
],
packages=['foucluster'
],
zip_safe=False,
install_requires=['numpy',
'scipy',
'pandas',
'sklearn',
'seaborn'
],
include_package_data=True,
setup_requires=[],
tests_require=['pytest',
'pytest-cov'],
extras_require={
'docs': [
'sphinx'
]
},
)
|
182247
|
from servicenow import Utils
ttl_cache=0
class Base(object):
__table__ = None
def __init__(self, Connection):
self.Connection = Connection
@Utils.cached(ttl=ttl_cache)
def list_by_query(self, query, **kwargs):
return self.format(self.Connection._list_by_query(self.__table__, query, **kwargs))
@Utils.cached(ttl=ttl_cache)
def list(self, meta, **kwargs):
return self.format(self.Connection._list(self.__table__, meta, **kwargs))
@Utils.cached(ttl=ttl_cache)
def fetch_all(self, meta, **kwargs):
return self.format(self.Connection._get(self.__table__, meta, **kwargs))
@Utils.cached(ttl=ttl_cache)
def fetch_all_by_query(self, query, **kwargs):
return self.format(self.Connection._get_by_query(self.__table__, query, **kwargs))
@Utils.cached(ttl=ttl_cache)
def fetch_one(self, meta, **kwargs):
response = self.fetch_all(meta, **kwargs)
if 'records' in response:
if len(response['records']) > 0:
return response['records'][0]
else:
if len(response) > 0:
return response[0]
return {}
def create(self, data, **kwargs):
return self.format(self.Connection._post(
self.__table__, data, **kwargs))
def create_multiple(self, data, **kwargs):
return self.format(self.Connection._post_multiple(
self.__table__, data, **kwargs))
def update(self, where, data, **kwargs):
return self.format(self.Connection._update(
self.__table__, where, data, **kwargs))
def delete(self, id, **kwargs):
return self.format(self.Connection._delete(
self.__table__, id, **kwargs))
def delete_multiple(self, query, **kwargs):
return self.format(self.Connection._delete_multiple(
self.__table__, query, **kwargs))
def format(self, response):
return self.Connection._format(response)
def last_updated(self, minutes, meta={}, **kwargs):
metaon = {'sys_updated_on':
'Last {0} minutes@javascript:gs.minutesAgoStart({1})@'
'javascript:gs.minutesAgoEnd(0)'.format(minutes, minutes)}
return self.format(self.Connection._get(
self.__table__, meta, metaon=metaon, **kwargs))
class Call(Base):
__table__ = 'u_new_call.do'
class Change(Base):
__table__ = 'change_request.do'
class Group(Base):
__table__ = 'sys_user_group.do'
class Incident(Base):
__table__ = 'incident.do'
class Journal(Base):
__table__ = 'sys_journal_field.do'
class Problem(Base):
__table__ = 'problem.do'
class Request(Base):
__table__ = 'sc_request.do'
class Rtask(Base):
__table__ = 'sc_task.do'
class Server(Base):
__table__ = 'cmdb_ci_server.do'
class Ticket(Base):
__table__ = 'u_service_desk.do'
class Task(Base):
__table__ = 'task_ci_list.do'
class User(Base):
__table__ = 'sys_user.do'
class Customer(Base):
__table__ = 'core_company.do'
class Router(Base):
__table__ = 'cmdb_ci_ip_router.do'
class Switch(Base):
__table__ = 'cmdb_ci_ip_switch.do'
class Cluster(Base):
__table__ = 'cmdb_ci_cluster.do'
class Firewall(Base):
__table__ = 'u_firewall.do'
class VPN(Base):
__table__ = 'cmdb_ci_vpn.do'
class ConfigurationItem(Base):
__table__ = 'cmdb_ci.do'
|
182267
|
from transformers import GPT2TokenizerFast
import tokenizers
import click
from pathlib import Path
@click.command()
@click.option("--text_path", type=str, help="Path to input .txt file.")
@click.option("--out_directory", type=str, help="Path to tokenizer output directory.")
def main(text_path, out_directory):
Path(out_directory).mkdir(exist_ok=True, parents=True)
english_tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
german_tokenizer = tokenizers.ByteLevelBPETokenizer()
german_tokenizer.train(
[text_path],
vocab_size=english_tokenizer.vocab_size,
special_tokens=["<|endoftext|>"],
show_progress=True,
)
german_tokenizer.save_model(out_directory)
if __name__ == "__main__":
main()
|
182283
|
from botocore.exceptions import ClientError
from unittest.mock import MagicMock
from senza.aws import (get_security_group, resolve_security_groups,
get_account_id, get_account_alias, list_kms_keys,
encrypt, get_vpc_attribute, resolve_referenced_resource,
parse_time, get_required_capabilities, StackReference,
resolve_topic_arn, matches_any, get_tag)
def test_get_security_group(monkeypatch):
ec2 = MagicMock()
monkeypatch.setattr('boto3.resource', MagicMock(return_value=ec2))
results = None
assert results == get_security_group('myregion', 'group_inexistant')
def test_get_security_group_by_tag_name(monkeypatch):
def mock_filter(Filters):
if Filters[0]['Name'] == 'tag:Name' and Filters[0]['Values'] == ['my-sg']:
sg = MagicMock()
sg.id = 'sg-123'
return [sg]
ec2 = MagicMock()
ec2.security_groups.filter = mock_filter
monkeypatch.setattr('boto3.resource', MagicMock(return_value=ec2))
assert get_security_group('myregion', 'my-sg').id == 'sg-123'
def test_resolve_security_groups(monkeypatch):
ec2 = MagicMock()
ec2.security_groups.filter = MagicMock(side_effect=[
[MagicMock(name='app-test', id='sg-test')],
[MagicMock(name='physical-resource-id', id='sg-resource')]])
def my_resource(rtype, *args):
if rtype == 'ec2':
return ec2
else:
return MagicMock()
def my_client(rtype, *args):
if rtype == 'cloudformation':
cf = MagicMock()
resource = {
'StackResourceDetail': {'ResourceStatus': 'CREATE_COMPLETE',
'ResourceType': 'AWS::EC2::SecurityGroup',
'PhysicalResourceId': 'physical-resource-id'}}
cf.describe_stack_resource.return_value = resource
return cf
else:
return MagicMock()
monkeypatch.setattr('boto3.resource', my_resource)
monkeypatch.setattr('boto3.client', my_client)
security_groups = []
security_groups.append({'Fn::GetAtt': ['RefSecGroup', 'GroupId']})
security_groups.append('sg-007')
security_groups.append('app-test')
security_groups.append({'Stack': 'stack', 'LogicalId': 'id'})
result = []
result.append({'Fn::GetAtt': ['RefSecGroup', 'GroupId']})
result.append('sg-007')
result.append('sg-test')
result.append('sg-resource')
assert result == resolve_security_groups(security_groups, 'myregion')
def test_create(monkeypatch):
sns = MagicMock()
topic = MagicMock(arn='arn:123:mytopic')
sns.topics.all.return_value = [topic]
monkeypatch.setattr('boto3.resource', MagicMock(return_value=sns))
assert 'arn:123:mytopic' == resolve_topic_arn('myregion', 'mytopic')
def test_encrypt(monkeypatch):
boto3 = MagicMock()
boto3.encrypt.return_value = {'CiphertextBlob': b'Hello World'}
monkeypatch.setattr('boto3.client', MagicMock(return_value=boto3))
assert b'Hello World' == encrypt(region=None, key_id='key_a',
plaintext='Hello World', b64encode=False)
assert 'SGVsbG8gV29ybGQ=' == encrypt(region=None, key_id='key_a',
plaintext='Hello World',
b64encode=True)
def test_list_kms_keys(monkeypatch):
boto3 = MagicMock()
boto3.list_keys.return_value = {
'Keys': [{'KeyId': 'key_a'}, {'KeyId': 'key_b'}]}
boto3.list_aliases.return_value = {
'Aliases': [{'AliasName': 'a', 'TargetKeyId': 'key_a'}]}
boto3.describe_key.return_value = {
'KeyMetadata': {'Description': 'This is key a'}}
monkeypatch.setattr('boto3.client', MagicMock(return_value=boto3))
assert len(list_kms_keys(region=None, details=True)) == 2
def test_get_vpc_attribute(monkeypatch):
from collections import namedtuple
ec2 = MagicMock()
ec2.Vpc.return_value = namedtuple('a', 'VpcId')('dummy')
monkeypatch.setattr('boto3.resource', MagicMock(return_value=ec2))
assert get_vpc_attribute('r', 'a', 'VpcId') == 'dummy'
assert get_vpc_attribute('r', 'a', 'nonexistent') is None
def test_get_account_id(monkeypatch):
boto3 = MagicMock()
boto3.get_user.return_value = {
'User': {'Arn': 'arn:aws:iam::0123456789:user/admin'}}
monkeypatch.setattr('boto3.client', MagicMock(return_value=boto3))
assert '0123456789' == get_account_id()
boto3 = MagicMock()
boto3.get_user.side_effect = ClientError({'Error': {}}, 'test')
boto3.list_roles.return_value = {
'Roles': [{'Arn': 'arn:aws:iam::0123456789:role/role-test'}]}
monkeypatch.setattr('boto3.client', MagicMock(return_value=boto3))
assert '0123456789' == get_account_id()
boto3 = MagicMock()
boto3.get_user.side_effect = ClientError({'Error': {}}, 'test')
boto3.list_roles.return_value = {'Roles': []}
boto3.list_users.return_value = {
'Users': [{'Arn': 'arn:aws:iam::0123456789:user/user-test'}]}
monkeypatch.setattr('boto3.client', MagicMock(return_value=boto3))
assert '0123456789' == get_account_id()
boto3 = MagicMock()
boto3.get_user.side_effect = ClientError({'Error': {}}, 'test')
boto3.list_roles.return_value = {'Roles': []}
boto3.list_users.return_value = {'Users': []}
boto3.list_saml_providers.return_value = {'SAMLProviderList': [
{'Arn': 'arn:aws:iam::0123456789:saml-provider/saml-test'}]}
monkeypatch.setattr('boto3.client', MagicMock(return_value=boto3))
assert '0123456789' == get_account_id()
boto3 = MagicMock()
boto3.get_user.side_effect = ClientError({'Error': {}}, 'test')
boto3.list_roles.return_value = {'Roles': []}
boto3.list_users.return_value = {'Users': []}
boto3.list_saml_providers.return_value = {'SAMLProviderList': []}
monkeypatch.setattr('boto3.client', MagicMock(return_value=boto3))
assert get_account_id() is None
def test_get_account_alias(monkeypatch):
boto3 = MagicMock()
boto3.list_account_aliases.return_value = {'AccountAliases': ['org-dummy']}
monkeypatch.setattr('boto3.client', MagicMock(return_value=boto3))
assert 'org-dummy' == get_account_alias()
def test_resolve_referenced_resource(monkeypatch):
boto3 = MagicMock()
resource = {'StackResourceDetail': {'ResourceStatus': 'CREATE_COMPLETE',
'ResourceType': 'AWS::EC2::Something',
'PhysicalResourceId': 'some-resource'}}
boto3.describe_stack_resource.return_value = resource
stack = {'StackStatus': 'CREATE_COMPLETE',
'Outputs': [{'OutputKey': 'DatabaseHost',
'OutputValue': 'localhost'}]}
boto3.describe_stacks.return_value = {'Stacks': [stack]}
monkeypatch.setattr('boto3.client', MagicMock(return_value=boto3))
ref = {'Fn::GetAtt': ['RefSecGroup', 'GroupId']}
assert ref == resolve_referenced_resource(ref, 'region')
ref = {'Stack': 'stack', 'LogicalId': 'id'}
assert 'some-resource' == resolve_referenced_resource(ref, 'region')
resource['StackResourceDetail']['ResourceStatus'] = 'CREATE_IN_PROGRESS'
try:
resolve_referenced_resource(ref, 'region')
except ValueError:
pass
else:
assert False, "resolving referenced resource failed"
ref = {'Stack': 'stack', 'Output': 'DatabaseHost'}
assert 'localhost' == resolve_referenced_resource(ref, 'region')
stack['StackStatus'] = 'CREATE_IN_PROGRESS'
try:
resolve_referenced_resource(ref, 'region')
except ValueError:
pass
else:
assert False, "resolving referenced resource failed"
stack['StackStatus'] = 'CREATE_COMPLETE'
del stack['Outputs']
assert resolve_referenced_resource(ref, 'region') is None
stack['Outputs'] = []
assert resolve_referenced_resource(ref, 'region') is None
def test_resolve_referenced_resource_with_update_complete_status(monkeypatch):
resource_id = 'some-resource'
boto3 = MagicMock()
boto3.describe_stack_resource.return_value = {
'StackResourceDetail': {
'ResourceStatus': 'UPDATE_COMPLETE',
'ResourceType': 'AWS::EC2::Something',
'PhysicalResourceId': resource_id
}
}
boto3.describe_stacks.return_value = {
'Stacks': [{'StackStatus': 'CREATE_COMPLETE'}]}
monkeypatch.setattr('boto3.client', MagicMock(return_value=boto3))
ref = {'Stack': 'stack', 'LogicalId': 'id'}
assert resource_id == resolve_referenced_resource(ref, 'any-region')
def test_resolve_referenced_output_when_stack_is_in_update_complete_status(
monkeypatch):
output_value = 'some-resource'
output_key = 'some-key'
boto3 = MagicMock()
boto3.describe_stacks.return_value = {
'Stacks': [
{'StackStatus': 'UPDATE_COMPLETE', 'Outputs': [
{'OutputKey': output_key, 'OutputValue': output_value}]}
]
}
monkeypatch.setattr('boto3.client', MagicMock(return_value=boto3))
ref = {'Stack': 'stack', 'Output': output_key}
assert output_value == resolve_referenced_resource(ref, 'any-region')
def test_parse_time():
assert parse_time('2015-04-14T19:09:01.000Z') == 1429038541.0
def test_required_capabilities():
assert get_required_capabilities({}) == []
data = {'Resources': {'MyRole': {'Type': 'AWS::IAM::Role',
'a': 'b'}}}
assert get_required_capabilities(data) == ['CAPABILITY_IAM']
def test_resolve_topic_arn():
assert resolve_topic_arn(None, 'arn:123') == 'arn:123'
def test_matches_any():
assert not matches_any(None, [StackReference(name='foobar', version=None)])
assert not matches_any('foobar-1', [])
assert matches_any('foobar-1',
[StackReference(name='foobar', version=None)])
assert matches_any('foobar-1',
[StackReference(name='foobar', version='1')])
assert not matches_any('foobar-1',
[StackReference(name='foobar', version='2')])
assert matches_any('foobar-1',
[StackReference(name='foob.r', version='\d')])
def test_get_tag():
tags = [{'Key': 'aws:cloudformation:stack-id',
'Value': 'arn:aws:cf:eu-west-1:123:stack/test'},
{'Key': 'Name',
'Value': 'test-123'},
{'Key': 'StackVersion',
'Value': '123'}]
assert get_tag(tags, 'StackVersion') == '123'
assert get_tag(tags,
'aws:cloudformation:stack-id') == 'arn:aws:cf:eu-west-1:123:stack/test'
assert get_tag(tags, 'notfound') is None
|
182320
|
from django import template
import resumator
register = template.Library()
@register.simple_tag
def get_version():
return resumator.__version__
|
182328
|
import os
import shutil
import logging
import jsonpickle
from typing import List, Tuple, Optional, Dict
from medcat.cdb import CDB
from medcat.utils.decorators import check_positive
class Checkpoint(object):
jsonpickle.set_encoder_options('json', sort_keys=True, indent=2)
log = logging.getLogger(__package__)
@check_positive
def __init__(self, dir_path: str, *, steps: int = 1000, max_to_keep: int = 1, metadata: Optional[Dict] = None) -> None:
""" Initialise the checkpoint object
Args:
dir_path (str):
The path to the checkpoint directory.
steps (int):
The number of processed sentences/documents before a checkpoint is saved.
N.B.: A small number could result in error "no space left on device".
max_to_keep (int):
The maximum number of checkpoints to keep.
N.B.: A large number could result in error "no space left on device".
metadata (Optional[Dict]):
The extra training metadata need to be persisted.
"""
self._dir_path = os.path.abspath(dir_path)
self._steps = steps
self._max_to_keep = max_to_keep
self._file_paths: List[str] = []
self._count = 0
self._metadata = metadata
os.makedirs(self._dir_path, exist_ok=True)
@property
def steps(self) -> int:
return self._steps
@steps.setter # type: ignore
# [https://github.com/python/mypy/issues/1362]
@check_positive
def steps(self, value: int) -> None:
self._steps = value
@property
def max_to_keep(self) -> int:
return self._max_to_keep
@max_to_keep.setter # type: ignore
# [https://github.com/python/mypy/issues/1362]
@check_positive
def max_to_keep(self, value: int) -> None:
self._max_to_keep = value
@property
def count(self) -> int:
return self._count
@property
def metadata(self) -> Optional[Dict]:
return self._metadata
@classmethod
def restore(cls, dir_path: str) -> "Checkpoint":
if not os.path.isdir(dir_path):
raise Exception("Checkpoints not found. You need to train from scratch.")
ckpt_file_paths = cls._get_ckpt_file_paths(dir_path)
if not ckpt_file_paths:
raise Exception("Checkpoints not found. You need to train from scratch.")
metadata = cls._load_metadata(dir_path)
latest_ckpt = ckpt_file_paths[-1]
steps, count = cls._get_steps_and_count(latest_ckpt)
checkpoint = cls(dir_path, steps=steps, metadata=metadata)
checkpoint._file_paths = ckpt_file_paths
checkpoint._count = count
return checkpoint
def purge(self) -> None:
shutil.rmtree(self._dir_path)
os.makedirs(self._dir_path)
self._file_paths = []
self._count = 0
def save_metadata(self) -> None:
metadata_file_path = os.path.join(os.path.abspath(self._dir_path), "checkpoint-metadata.json")
if self._metadata is not None:
with open(metadata_file_path, "w") as f:
f.write(jsonpickle.encode(self._metadata))
self.log.info("Checkpoint metadata saved: %s", metadata_file_path)
else:
raise Exception("Checkpoints metadata not found.")
def save(self, cdb: CDB, count: int) -> None:
ckpt_file_path = os.path.join(os.path.abspath(self._dir_path), "checkpoint-%s-%s" % (self.steps, count))
while len(self._file_paths) >= self._max_to_keep:
to_remove = self._file_paths.pop(0)
os.remove(to_remove)
cdb.save(ckpt_file_path)
self.log.info("Checkpoint saved: %s", ckpt_file_path)
self._file_paths.append(ckpt_file_path)
self._count = count
async def save_async(self, cdb: CDB, count: int) -> None:
ckpt_file_path = os.path.join(os.path.abspath(self._dir_path), "checkpoint-%s-%s" % (self.steps, count))
await cdb.save_async(ckpt_file_path)
self.log.info("Checkpoint saved: %s", ckpt_file_path)
self._file_paths.append(ckpt_file_path)
self._file_paths.sort(key=lambda f: self._get_steps_and_count(f)[1])
self._count = count
while len(self._file_paths) > self._max_to_keep:
to_remove = self._file_paths.pop(0)
os.remove(to_remove)
def populate(self, cdb: CDB) -> None:
if not self._file_paths:
raise Exception("Checkpoints not found. You need to restore or train from scratch.")
cdb.load(self._file_paths[-1])
@staticmethod
def _get_ckpt_file_paths(dir_path: str) -> List[str]:
ckpt_file_paths = [os.path.abspath(os.path.join(dir_path, f)) for f in os.listdir(dir_path)]
ckpt_file_paths = [f for f in ckpt_file_paths if os.path.isfile(f) and "checkpoint-" in f]
if ckpt_file_paths:
ckpt_file_paths.sort(key=lambda f: Checkpoint._get_steps_and_count(f)[1])
return ckpt_file_paths
@staticmethod
def _get_steps_and_count(file_path) -> Tuple[int, int]:
file_name_parts = os.path.basename(file_path).split('-')
return int(file_name_parts[1]), int(file_name_parts[2])
@staticmethod
def _load_metadata(dir_path: str) -> Optional[Dict]:
ckpt_file_paths = [os.path.abspath(os.path.join(dir_path, f)) for f in os.listdir(dir_path)]
metadata_file_paths = [f for f in ckpt_file_paths if os.path.isfile(f) and f.endswith("checkpoint-meta.json")]
if metadata_file_paths:
with open(metadata_file_paths[0]) as f:
return jsonpickle.decode(f.read())
else:
return None
|
182345
|
from .mean_tau_x import mean_tau_x
from .simple import *
from .overturning import *
__all__ = [
"mean_tau_x",
"annual_scalar",
"drake_passage",
"sea_surface_temperature",
"sea_surface_salinity",
"psi_avg",
"zonal_mean",
"mixed_layer_depth",
]
|
182366
|
import numpy as np
from paddle import fluid
class MaskedMultiHeadAttention(object):
def __init__(self, model_dim, num_heads, dropout=0.0):
assert model_dim % num_heads == 0
self.model_dim = model_dim
self.num_heads = num_heads
self.per_head_dim = model_dim // num_heads
self.dropout = dropout
def _split(self, x):
"""Split state to query, key and value"""
_, seq_len, qkv_dim = x.shape
x = fluid.layers.reshape(
x, [-1, seq_len, 3, qkv_dim // 3], inplace=True)
return fluid.layers.unstack(x, axis=2)
def _split_heads(self, x):
"""Split single head for multi-heads"""
split_x = fluid.layers.reshape(
x, [0, 0, self.num_heads, self.per_head_dim], inplace=True)
split_x = fluid.layers.transpose(split_x, perm=[0, 2, 1, 3])
return split_x
def _merge_heads(self, x):
"""Merge multi-heads for single head"""
merged_x = fluid.layers.transpose(x, perm=[0, 2, 1, 3])
merged_dim = merged_x.shape[2] * merged_x.shape[3]
merged_x = fluid.layers.reshape(
merged_x, [0, 0, merged_dim], inplace=True)
return merged_x
def _apply_attn_score_mask(self, product, attn_mask):
product = product * attn_mask - 1e10 * (1 - attn_mask)
return product
def _scaled_dot_product_attention(self, query, key, value, attn_mask,
d_key, attn_bias=None, dropout=0.0):
# Q is in shape [bs, nheads, tgt_seq_len, per_head_dim]
# K and V are in shape [bs, nheads, src_seq_len, per_head_dim]
# attn_mask is in shape [bs, tgt_seq_len, src_seq_len]
product = fluid.layers.matmul(query, key, transpose_y=True,
alpha=d_key**-0.5)
if attn_bias is not None:
product += attn_bias
attn_mask = fluid.layers.expand(
fluid.layers.unsqueeze(attn_mask, axes=[1]),
[1, self.num_heads, 1, 1])
product = self._apply_attn_score_mask(product, attn_mask)
# weights is in shape [bs, nheads, tgt_seq_len, src_seq_len]
weights = fluid.layers.softmax(product)
weights = weights * attn_mask
if dropout > 0:
weights = fluid.layers.dropout(
weights, dropout, dropout_implementation='upscale_in_train')
# attn is in shape [bs, nheads, tgt_seq_len, per_head_dim]
attn = fluid.layers.matmul(weights, value)
return attn, weights
def __call__(self, x, attn_mask, past_kv=None, attn_bias=None):
# Parameters:
# qkv_fc: x_dim * model_dim * 3
# scaled_dot_product_attention: 0
# out_fc: model_dim * model_dim
# Computation (assume bs = 1):
# let N1 = tgt_seq_len * x_dim * model_dim * 3
# N2 = nheads * tgt_seq_len * src_seq_len
# N3 = nheads * tgt_seq_len * per_head_dim = tl * md
# ph = per_head_dim; md = model_dim
# sl = src_seq_len; tl = tgt_seq_len
# qkv_fc: N1 * (model_dim (mul_op) + model_dim (add_op))
# scaled_dot_product_attention:
# N2 * (2*ph(mul_op) + 2*ph(add_op) + ph(div_op) + ph(exp_op))
# + N3 * (sl (mul_op) + sl (add_op))
# out_fc: tl * md * (md (mul_op) + md (add_op))
#
# for sl = tl = 200, md = 512, ph = 64, nh = 8, around 10^12
assert len(x.shape) == 3
# TODO: add customize parameter initializer for QKV project.
c = fluid.layers.fc(x, self.model_dim * 3, num_flatten_dims=2,
bias_attr=False, name='qkv_fc')
# Q, K, V is in shape [bs, tgt_seq_len, model_dim]
# attn_mask is in shape [bs, tgt_seq_len, src_seq_len]
# past_kv is None or in [bs, 2, nheads, past_seq_len, per_head_dim]
# when past_kv is None, tgt_seq_len = src_seq_len
# otherwise, src_seq_len = past_seq_len + tgt_seq_len
query, key, value = self._split(c)
assert len(query.shape) == len(key.shape) == len(value.shape) == 3
query = self._split_heads(query)
key = self._split_heads(key)
value = self._split_heads(value)
present_kv = fluid.layers.stack([key, value], axis=1)
if past_kv is not None:
pk, pv = fluid.layers.unstack(past_kv, axis=1)
key = fluid.layers.concat([pk, key], axis=-2)
value = fluid.layers.concat([pv, value], axis=-2)
attn, attn_weights = self._scaled_dot_product_attention(
query, key, value, attn_mask, self.per_head_dim,
attn_bias=attn_bias, dropout=self.dropout)
attn = self._merge_heads(attn)
attn = fluid.layers.fc(attn, self.model_dim, num_flatten_dims=2,
bias_attr=False, name='out_fc')
return attn, present_kv, attn_weights
class TransformerDecoderBlock(object):
def __init__(self, model_dim, num_heads, ffn_dim,
dropout=0.0, normalize_before=False):
self.model_dim = model_dim
self.num_heads = num_heads
self.ffn_dim = ffn_dim
self.dropout = dropout
self.normalize_before = normalize_before
self.masked_self_attn = MaskedMultiHeadAttention(
model_dim, num_heads, dropout=dropout)
def _merge_mask(self, attn_mask, padding_mask):
pm = fluid.layers.unsqueeze(padding_mask, 2)
pm_t = fluid.layers.unsqueeze(padding_mask, 1)
new_pm = fluid.layers.matmul(pm, pm_t)
attn_mask = fluid.layers.elementwise_mul(attn_mask, new_pm, axis=0)
return attn_mask
def _with_frame_emb(self, x, frame_emb):
return x if frame_emb is None else x + frame_emb
def _pad_past_attn_mask(self, attn_mask, past_padding_mask):
def _np_func(m, pm):
m, pm = np.array(m), np.array(pm)
pad = np.ones((m.shape[0], m.shape[1], pm.shape[1]),
dtype=m.dtype)
m_ = np.concatenate([pad, m], axis=2)
return m_
name = fluid.unique_name.generate(attn_mask.name)
new_mask = fluid.default_main_program().current_block().create_var(
name=name, dtype=attn_mask.dtype, shape=attn_mask.shape)
fluid.layers.py_func(
func=_np_func, x=[attn_mask, past_padding_mask], out=new_mask)
return new_mask
def _mlp(self, x, n_state, dropout=0.0):
# TODO: try other activation
nx = x.shape[-1]
h1 = fluid.layers.fc(x, n_state, num_flatten_dims=2, act='gelu')
if dropout > 0:
h1 = fluid.layers.dropout(
h1, dropout, dropout_implementation='upscale_in_train')
h2 = fluid.layers.fc(h1, nx, num_flatten_dims=2)
return h2
def _forward_post(self, x, frame_emb, attn_mask, padding_mask,
past_kv, past_padding_mask):
x = self._with_frame_emb(x, frame_emb)
if past_padding_mask is not None:
# [bs, src_seq_len]
padding_mask = fluid.layers.concat(
[past_padding_mask, padding_mask], axis=-1)
attn_mask = self._merge_mask(attn_mask, padding_mask)
attn, present_kv, attn_weights = self.masked_self_attn(
x, attn_mask, past_kv=past_kv)
if self.dropout > 0:
attn = fluid.layers.dropout(
attn, self.dropout, dropout_implementation='upscale_in_train')
x = x + attn
x = fluid.layers.layer_norm(
x, begin_norm_axis=2, epsilon=1e-6,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.)))
m = self._mlp(x, self.ffn_dim, dropout=self.dropout)
if self.dropout > 0:
m = fluid.layers.dropout(
m, self.dropout, dropout_implementation='upscale_in_train')
x = x + m
x = fluid.layers.layer_norm(
x, begin_norm_axis=2, epsilon=1e-6,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.)))
return x, present_kv, attn_weights
def _forward_pre(self, x, frame_emb, attn_mask, padding_mask,
past_kv, past_padding_mask):
x_ = fluid.layers.layer_norm(
x, begin_norm_axis=2, epsilon=1e-6,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.)))
x_ = self._with_frame_emb(x_, frame_emb)
if past_padding_mask is not None:
# [bs, src_seq_len]
padding_mask = fluid.layers.concat(
[past_padding_mask, padding_mask], axis=-1)
attn_mask = self._merge_mask(attn_mask, padding_mask)
attn, present_kv, attn_weights = self.masked_self_attn(
x_, attn_mask, past_kv=past_kv)
if self.dropout > 0:
attn = fluid.layers.dropout(
attn, self.dropout, dropout_implementation='upscale_in_train')
x = x + attn
x_ = fluid.layers.layer_norm(
x, begin_norm_axis=2, epsilon=1e-6,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.)))
m = self._mlp(x_, self.ffn_dim, dropout=self.dropout)
if self.dropout > 0:
m = fluid.layers.dropout(
m, self.dropout, dropout_implementation='upscale_in_train')
x = x + m
return x, present_kv, attn_weights
def __call__(self, x, frame_emb, attn_mask, padding_mask,
past_kv=None, past_padding_mask=None):
# x: [bs, tgt_seq_len, model_dim]
# frame_emb: [bs, tgt_seq_len, model_dim]
# attn_mask: [bs, tgt_seq_len, tgt_seq_len]
# padding_mask: [bs, tgt_seq_len]
# past_kv: [bs, 2, nheads, past_seq_len, per_head_dim]
# past_padding_mask: [bs, past_seq_len]
# src_seq_len = tgt_seq_len + past_seq_len
if past_padding_mask is not None:
# Now attn_mask: [bs, tgt_seq_len, src_seq_len]
attn_mask = self._pad_past_attn_mask(attn_mask, past_padding_mask)
if self.normalize_before:
return self._forward_pre(x, frame_emb, attn_mask,
padding_mask, past_kv,
past_padding_mask)
return self._forward_post(x, frame_emb, attn_mask, padding_mask,
past_kv, past_padding_mask)
class TransformerDecoder(object):
def __init__(self, num_blocks, model_dim, num_heads, ffn_dim,
tokens_per_frame=10, dropout=0.0, normalize_before=False):
self.num_blocks = num_blocks
self.tokens_per_frame = tokens_per_frame
self.blocks = []
for _ in range(num_blocks):
decoder_block = TransformerDecoderBlock(
model_dim, num_heads, ffn_dim,
dropout=dropout,
normalize_before=normalize_before)
self.blocks.append(decoder_block)
def _apply_padding_mask(self, x, padding_mask):
h = padding_mask * x - 1e10 * (1 - padding_mask)
return h
def _pooling_over_frames(self, x, padding_mask):
_, tgt_seq_len, model_dim = x.shape
num_frames = tgt_seq_len // self.tokens_per_frame
padding_mask_ = fluid.layers.expand(
fluid.layers.unsqueeze(padding_mask, [2]),
[1, 1, model_dim])
h = self._apply_padding_mask(x, padding_mask_)
h = fluid.layers.reshape(
h, [-1, num_frames, self.tokens_per_frame, model_dim])
h = fluid.layers.reduce_max(h, dim=2)
return h
def __call__(self, x, frame_emb, attn_mask, padding_mask,
past_kv_arr=None, past_padding_mask=None):
assert x.shape[1] % self.tokens_per_frame == 0
if past_kv_arr is not None:
past_kv_arr = fluid.layers.unstack(past_kv_arr, axis=1)
else:
past_kv_arr = [None] * self.num_blocks
present_kv_lst, attn_weights_lst = [], []
for i, past_kv in enumerate(past_kv_arr):
x, present_kv, attn_weights = self.blocks[i](
x, frame_emb, attn_mask, padding_mask,
past_kv=past_kv, past_padding_mask=past_padding_mask)
present_kv_lst.append(present_kv)
attn_weights_lst.append(attn_weights)
present_kv_arr = fluid.layers.stack(present_kv_lst, axis=1)
attn_weights_arr = fluid.layers.stack(attn_weights_lst, axis=1)
frame_hid = self._pooling_over_frames(x, padding_mask)
return x, frame_hid, present_kv_arr, attn_weights_arr
|
182376
|
from .user_importer import UserImporter # noqa
from .node_importer import NodeImporter # noqa
from .subject_importer import SubjectImporter # noqa
from .period_importer import PeriodImporter # noqa
from .assignment_importer import AssignmentImporter # noqa
from .assignmentgroup_importer import AssignmentGroupImporter # noqa
from .pointrange_to_grade_importer import PointToGradeMapImporter # noqa
from .pointrange_to_grade_importer import PointRangeToGradeImporter # noqa
from .relateduser_importer import RelatedExaminerImporter # noqa
from .relateduser_importer import RelatedStudentImporter # noqa
from .candidate_examiner_importer import CandidateImporter # noqa
from .candidate_examiner_importer import ExaminerImporter # noqa
from .feedbackset_importer import FeedbackSetImporter # noqa
from .delivery_feedback_importers import DeliveryImporter # noqa
from .delivery_feedback_importers import StaticFeedbackImporter # noqa
from .delivery_feedback_importers import FileMetaImporter # noqa
from .delivery_feedback_importers import CommentFileContentImporter # noqa
from .qualifiesforexam_importer import StatusImporter # noqa
from .qualifiesforexam_importer import QualifiesForFinalExamImporter # noqa
|
182415
|
from color_utils import blendColors, hex2rgb
from utils import range_check, tup2str
class ThemeConfig:
def __init__(self, colors, wallpaper_data, light_blend_multiplier=1, dark_blend_multiplier=1):
colors_best = colors['bestColors']
tones_primary = colors['primaryTones']
tones_neutral = colors['neutralTones']
lbm = range_check(light_blend_multiplier,0,4)
dbm = range_check(dark_blend_multiplier,0,4)
tone = 30
pywal_colors_dark = ()
pywal_colors_dark = (blendColors(
tones_neutral['8'], colors['dark']['Primary'], .01),)
for x in range(7):
str_x = str(x)
if str_x in colors_best.keys():
pywal_colors_dark += (blendColors(
colors['dark']['OnSurface'], colors_best[str_x], .55),)
else:
pywal_colors_dark += (blendColors(
colors['dark']['OnSurface'], tones_primary[str(tone)], .58),)
tone += 10
tone = 30
pywal_colors_light = ()
pywal_colors_light = (blendColors(
tones_neutral['98'], colors['light']['Primary'], .01),)
for x in range(7):
str_x = str(x)
if str_x in colors_best.keys():
pywal_colors_light += (blendColors(
colors['light']['OnSurface'], colors_best[str_x], .70),)
else:
pywal_colors_light += (blendColors(
colors['light']['OnSurface'], tones_primary[str(tone)], .8),)
tone += 10
# Base text states taken from Breeze Color Scheme
base_text_states = {
"Link": "#2980b9",
"Visited": "#9b59b6",
"Negative": "#da4453",
"Neutral": "#f67400",
"Positive": "#27ae60"
}
# Blend some extra colors by factor left(0.0) to right(1.0)
extras = {
"LightSurface1": blendColors(colors['light']['Background'], colors['light']['Primary'], .08*lbm),
"DarkSurface1": blendColors(colors['dark']['Background'], colors['dark']['Primary'], .05*dbm),
"LightSurface2": blendColors(colors['light']['Background'], colors['light']['Primary'], .11*lbm),
"DarkSurface2": blendColors(colors['dark']['Background'], colors['dark']['Primary'], .08*dbm),
"LightSurface3": blendColors(colors['light']['Background'], colors['light']['Primary'], .14*lbm),
"DarkSurface3": blendColors(colors['dark']['Background'], colors['dark']['Primary'], .11*dbm),
"LightSurface": blendColors(colors['light']['Surface'], colors['light']['Primary'], 0.05*lbm),
"DarkSurface": blendColors(colors['dark']['Surface'], colors['dark']['Primary'], 0.02*dbm),
"LinkOnPrimaryLight": blendColors(colors['light']['OnPrimary'], base_text_states['Link'], .5),
"LinkVisitedOnPrimaryLight": blendColors(colors['light']['OnPrimary'], base_text_states['Visited'], .8),
"NegativeOnPrimaryLight": blendColors(colors['light']['OnPrimary'], base_text_states['Negative'], .8),
"PositiveOnPrimaryLight": blendColors(colors['light']['OnPrimary'], base_text_states['Positive'], .8),
"NeutralOnPrimaryLight": blendColors(colors['light']['OnPrimary'], base_text_states['Neutral'], .8),
"LinkOnPrimaryDark": blendColors(colors['dark']['OnPrimary'], base_text_states['Link'], .5),
"LinkVisitedOnPrimaryDark": blendColors(colors['dark']['OnPrimary'], base_text_states['Visited'], .8),
"NegativeOnPrimaryDark": blendColors(colors['dark']['OnPrimary'], base_text_states['Negative'], .8),
"PositiveOnPrimaryDark": blendColors(colors['dark']['OnPrimary'], base_text_states['Positive'], .8),
"NeutralOnPrimaryDark": blendColors(colors['dark']['OnPrimary'], base_text_states['Neutral'], .8),
"LightSelectionAlt": blendColors(colors['light']['Surface'], colors['light']['Secondary'], .02*lbm),
"DarkSelectionAlt": blendColors(colors['dark']['Background'], colors['dark']['Secondary'], .3*dbm),
"LightSelectionAltActive": blendColors(colors['light']['Background'], colors['light']['Secondary'], .5),
"DarkSelectionAltActive": blendColors(colors['dark']['Background'], colors['dark']['Secondary'], .5),
}
self._light_scheme = f"""[ColorEffects:Disabled]
Color={extras['LightSurface1']}
ColorAmount=0.55
ColorEffect=3
ContrastAmount=0.65
ContrastEffect=0
IntensityAmount=0.1
IntensityEffect=0
[ColorEffects:Inactive]
ChangeSelectionColor=false
Color={colors['light']['SurfaceVariant']}
ColorAmount=1
ColorEffect=0
ContrastAmount=1
ContrastEffect=0
Enable=false
IntensityAmount=10
IntensityEffect=10
[Colors:Button]
BackgroundAlternate={colors['light']['SurfaceVariant']}
BackgroundNormal={extras['LightSelectionAlt']}
DecorationFocus={colors['light']['Primary']}
DecorationHover={colors['light']['Primary']}
ForegroundActive={colors['light']['OnSurface']}
ForegroundInactive={colors['light']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['light']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
ForegroundNormal={colors['light']['OnSurface']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Visited']}
[Colors:Header]
BackgroundNormal={extras['LightSurface3']}
[Colors:Selection]
BackgroundAlternate={colors['light']['Primary']}
BackgroundNormal={colors['light']['Primary']}
DecorationFocus={colors['light']['Primary']}
DecorationHover={colors['light']['Primary']}
ForegroundActive={colors['light']['OnPrimary']}
ForegroundInactive={colors['light']['OnPrimary']}
ForegroundLink={extras['LinkOnPrimaryLight']}
ForegroundNegative={extras['NegativeOnPrimaryLight']}
ForegroundNeutral={extras['NeutralOnPrimaryLight']}
ForegroundNormal={colors['light']['OnPrimary']}
ForegroundPositive={extras['PositiveOnPrimaryLight']}
ForegroundVisited={extras['LinkVisitedOnPrimaryLight']}
[Colors:Tooltip]
BackgroundAlternate={colors['light']['SurfaceVariant']}
BackgroundNormal={extras['LightSurface']}
DecorationFocus={colors['light']['Primary']}
DecorationHover={colors['light']['Primary']}
ForegroundActive={colors['light']['OnSurface']}
ForegroundInactive={colors['light']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['light']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
ForegroundNormal={colors['light']['OnSurface']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Visited']}
[Colors:View]
BackgroundAlternate={extras['LightSurface2']}
BackgroundNormal={extras['LightSurface']}
DecorationFocus={colors['light']['Primary']}
#-----------------------------------------------
DecorationHover={colors['light']['Primary']}
ForegroundActive={colors['light']['InverseSurface']}
ForegroundInactive={colors['light']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['light']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
ForegroundNormal={colors['light']['OnSurfaceVariant']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Visited']}
[Colors:Window]
BackgroundAlternate={extras['LightSurface']}
BackgroundNormal={extras['LightSurface3']}
DecorationFocus={colors['light']['Primary']}
DecorationHover={colors['light']['Primary']}
ForegroundActive={colors['light']['InverseSurface']}
ForegroundInactive={colors['light']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['light']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
#--- Window titles, context icons
ForegroundNormal={colors['light']['OnSurfaceVariant']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Negative']}
[General]
ColorScheme=MaterialYouLight
Name=Material You Light
shadeSortColumn=false
[KDE]
contrast=4
[WM]
activeBackground={extras['LightSurface3']}
activeBlend=#ff0000
activeForeground={colors['light']['OnSurface']}
inactiveBackground={colors['light']['SecondaryContainer']}
inactiveBlend=#ff0000
inactiveForeground={colors['light']['OnSurfaceVariant']}
"""
self._dark_scheme = f"""[ColorEffects:Disabled]
Color={extras['DarkSurface1']}
ColorAmount=0.55
ColorEffect=3
ContrastAmount=0.65
ContrastEffect=0
IntensityAmount=0.1
IntensityEffect=0
[ColorEffects:Inactive]
ChangeSelectionColor=false
Color=Color={colors['dark']['SurfaceVariant']}
ColorAmount=-0.9
ColorEffect=0
ContrastAmount=0.1
ContrastEffect=0
Enable=true
IntensityAmount=0
IntensityEffect=0
[Colors:Button]
BackgroundAlternate={colors['dark']['SurfaceVariant']}
BackgroundNormal={extras['DarkSelectionAlt']}
DecorationFocus={colors['dark']['Primary']}
DecorationHover={colors['dark']['Primary']}
ForegroundActive={colors['dark']['OnSurface']}
ForegroundInactive={colors['dark']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['dark']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
ForegroundNormal={colors['dark']['OnSurface']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Visited']}
[Colors:Header]
BackgroundNormal={extras['DarkSurface3']}
[Colors:Selection]
BackgroundAlternate={colors['dark']['Primary']}
BackgroundNormal={colors['dark']['Primary']}
DecorationFocus={colors['dark']['Primary']}
DecorationHover={colors['dark']['Primary']}
ForegroundActive={colors['dark']['OnPrimary']}
ForegroundInactive={colors['dark']['OnPrimary']}
ForegroundLink={extras['LinkOnPrimaryDark']}
ForegroundNegative={extras['NegativeOnPrimaryDark']}
ForegroundNeutral={extras['NeutralOnPrimaryDark']}
ForegroundNormal={colors['dark']['OnPrimary']}
ForegroundPositive={extras['PositiveOnPrimaryDark']}
ForegroundVisited={extras['LinkVisitedOnPrimaryDark']}
[Colors:Tooltip]
BackgroundAlternate={colors['dark']['SurfaceVariant']}
BackgroundNormal={extras['DarkSurface']}
DecorationFocus={colors['dark']['Primary']}
DecorationHover={colors['dark']['Primary']}
ForegroundActive={colors['dark']['OnSurface']}
ForegroundInactive={colors['dark']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['dark']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
ForegroundNormal={colors['dark']['OnSurface']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Visited']}
[Colors:View]
BackgroundAlternate={extras['DarkSurface2']}
BackgroundNormal={extras['DarkSurface']}
DecorationFocus={colors['dark']['Primary']}
#-----------------------------------------------
DecorationHover={colors['dark']['Primary']}
ForegroundActive={colors['dark']['InverseSurface']}
ForegroundInactive={colors['dark']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['dark']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
ForegroundNormal={colors['dark']['OnSurfaceVariant']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Visited']}
[Colors:Window]
BackgroundAlternate={extras['DarkSurface']}
BackgroundNormal={extras['DarkSurface3']}
DecorationFocus={colors['dark']['Primary']}
DecorationHover={colors['dark']['Primary']}
ForegroundActive={colors['dark']['InverseSurface']}
ForegroundInactive={colors['dark']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['dark']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
#--- Window titles, context icons
ForegroundNormal={colors['dark']['OnSurfaceVariant']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Negative']}
[General]
ColorScheme=MaterialYouDark
Name=Material You dark
shadeSortColumn=true
[KDE]
contrast=4
[WM]
activeBackground={extras['DarkSurface3']}
activeBlend=#ff0000
activeForeground={colors['dark']['OnSurface']}
inactiveBackground={colors['dark']['SecondaryContainer']}
inactiveBlend=#ff0000
inactiveForeground={colors['dark']['OnSecondaryContainer']}
"""
self._wal_light_scheme = {
"wallpaper": wallpaper_data,
"alpha": "100",
"special": {
"background": pywal_colors_light[0],
"foreground": colors['light']['OnSurface'],
"cursor": colors['light']['OnSurface'],
},
"colors": {
"color0": pywal_colors_light[0],
"color1": pywal_colors_light[1],
"color2": pywal_colors_light[2],
"color3": pywal_colors_light[3],
"color4": pywal_colors_light[4],
"color5": pywal_colors_light[5],
"color6": pywal_colors_light[6],
"color7": pywal_colors_light[7],
"color8": colors['light']['Secondary'],
"color9": pywal_colors_light[1],
"color10": pywal_colors_light[2],
"color11": pywal_colors_light[3],
"color12": pywal_colors_light[4],
"color13": pywal_colors_light[5],
"color14": pywal_colors_light[6],
"color15": pywal_colors_light[7]
}
}
self._wal_dark_scheme = {
"wallpaper": wallpaper_data,
"alpha": "100",
"special": {
"background": pywal_colors_dark[0],
"foreground": colors['dark']['OnSurface'],
"cursor": colors['dark']['OnSurface'],
},
"colors": {
"color0": pywal_colors_dark[0],
"color1": pywal_colors_dark[1],
"color2": pywal_colors_dark[2],
"color3": pywal_colors_dark[3],
"color4": pywal_colors_dark[4],
"color5": pywal_colors_dark[5],
"color6": pywal_colors_dark[6],
"color7": pywal_colors_dark[7],
"color8": colors['dark']['Secondary'],
"color9": pywal_colors_dark[1],
"color10": pywal_colors_dark[2],
"color11": pywal_colors_dark[3],
"color12": pywal_colors_dark[4],
"color13": pywal_colors_dark[5],
"color14": pywal_colors_dark[6],
"color15": pywal_colors_dark[7]
}
}
dark_active=colors['dark']['OnBackground']
dark_inactive=extras['DarkSurface3']
light_active=colors['light']['OnBackground']
light_inactive=extras['LightSurface3']
self._sierra_breeze_dark_colors = {
"btn_close_active_color" : tup2str(hex2rgb(blendColors(dark_active, tones_primary['80'], .7))),
"btn_minimize_active_color" : tup2str(hex2rgb(blendColors(dark_active, tones_primary['70'], .7))),
"btn_maximize_active_color" : tup2str(hex2rgb(blendColors(dark_active, tones_primary['55'], .7))),
"btn_keep_above_active_color" : tup2str(hex2rgb(blendColors(dark_active, "#118cff", .7))),
"btn_keep_below_active_color" : tup2str(hex2rgb(blendColors(dark_active, "#5d00b9", .7))),
"btn_on_all_desktops_active_color" : tup2str(hex2rgb(blendColors(dark_active, "#00b9b9", .7))),
"btn_shade_active_color" : tup2str(hex2rgb(blendColors(dark_active, "#b900b6", .7))),
"btn_inactive_color" : tup2str(hex2rgb(blendColors(dark_inactive, colors['dark']['Secondary'], .32)))
}
self._sierra_breeze_light_colors = {
"btn_close_active_color" : tup2str(hex2rgb(blendColors(tones_primary['50'],light_active, .05*lbm))),
"btn_minimize_active_color" : tup2str(hex2rgb(blendColors(tones_primary['60'],light_active, .05*lbm))),
"btn_maximize_active_color" : tup2str(hex2rgb(blendColors(tones_primary['70'],light_active, .05*lbm))),
"btn_keep_above_active_color" : tup2str(hex2rgb(blendColors("#118cff", light_active, .05*lbm))),
"btn_keep_below_active_color" : tup2str(hex2rgb(blendColors("#5d00b9", light_active, .05*lbm))),
"btn_on_all_desktops_active_color" : tup2str(hex2rgb(blendColors("#00b9b9", light_active, .05*lbm))),
"btn_shade_active_color" : tup2str(hex2rgb(blendColors("#b900b6", light_active, .05*lbm))),
"btn_inactive_color" : tup2str(hex2rgb(blendColors(light_inactive, colors['light']['Secondary'], .32)))
}
def get_light_scheme(self):
return(self._light_scheme)
def get_dark_scheme(self):
return(self._dark_scheme)
def get_wal_light_scheme(self):
return (self._wal_light_scheme)
def get_wal_dark_scheme(self):
return (self._wal_dark_scheme)
def get_sierra_breeze_dark_colors(self):
return (self._sierra_breeze_dark_colors)
def get_sierra_breeze_light_colors(self):
return (self._sierra_breeze_light_colors)
|
182422
|
from django.test import TestCase
from django.test import Client
from django.urls import reverse
class TestAnonymousUserView(TestCase):
def setUp(self):
self.client = Client()
# superuser exists, but is not authenticated
def test_basic_login_view(self):
"""
Login view renders OK in case of aunonymous user
"""
ret = self.client.get(reverse('account_login'))
self.assertEqual(
ret.status_code,
200
)
|
182516
|
import math, torch
import numpy as np
from numpy.random import normal as normrnd
from scipy.stats import multivariate_normal, norm
from scipy.linalg import sqrtm, expm
from pdb import set_trace as bp
from include.DNN import DNN
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from include.dataStructures.particle import Particle
class localize:
def __init__(self, numP, su, sz, distMap, mat, wayPts, R, dim, useClas, hardClas, modelpath="./models/best.pth"):
self.np = numP
self.sz = sz
self.dists = distMap
self.dim = dim
self.wayPts = wayPts
self.pts = self.convert(wayPts)
self.nAP = mat.numAPs
self.tx = mat.Tx
self.R = R
self.start = self.wayPts[0]
self.su = su
self.path = []
self.APLocs = []
self.IDs = []
self.use = useClas
self.hard = hardClas
self.modelpath = modelpath
self.model = None
self.confidence = [0, 0, 0, 0] # true positive, false positive, true negative, false negative
if self.dim == 2: self.su = su[0:2]
if self.use: self.load_model()
def print(self, samples):
for i in range(self.np):
print("pose: ", samples[i].pose, " | weight: ", samples[i].w)
def distance(self, x, y):
if len(x)==3 and len(y)==3:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 + (x[2]-y[2])**2 )
else:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 )
def MSE(self):
mse = 0
for i in range(len(self.pts)):
mse += self.distance(self.wayPts[i], self.path[i])
mse = mse/len(self.pts)
return mse
def getCDF(self):
cdf = [0 for x in range(len(self.pts))]
for i in range(len(self.pts)):
cdf[i] = self.distance(self.wayPts[i], self.path[i])
return cdf
def distrib(self):
start = self.wayPts[0] ; samples = []
if self.dim == 2: start = [start[0], start[1]]
if self.dim == 3: start = start
for _ in range(self.np):
samples.append(Particle(start, 1/self.np))
return samples
def convert(self, pts):
n = len(pts)
rtPts = []
for i in range(1, n):
dx = pts[i][0] - pts[i-1][0]
dy = pts[i][1] - pts[i-1][1]
if self.dim==2: rtPts.append([dx, dy])
if self.dim==3: dz = pts[i][2] - pts[i-1][2] ; rtPts.append([dx, dy, dz])
return rtPts
'''
load pytorch model and save dict
'''
def load_model(self):
model = DNN()
path = self.modelpath
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['state_dict'])
self.model = model
self.model.eval()
'''
classify into LOS/NLOS
'''
def classify(self, rssi, euc):
inp = torch.tensor([rssi, euc])
out = self.model(inp.float())
pred = 1 if (out[1]>out[0]) else 0
return pred
'''
weighting using the normpdf subroutine
'''
def getWeight(self, dz):
norpdf = 1
for i in range(len(dz)):
if dz[i]!=0:
norpdf *= norm.pdf(dz[i], 0, self.sz[i])
return norpdf
'''
weighting using the mvnpdf subroutine
'''
def getMultiWeight(self, dz):
idx = [i for i, e in enumerate(dz) if e != 0]
val = [] ; sig = []
if len(idx)==0:
return 1/self.np
for i in idx:
val.append(dz[i])
sig.append(self.sz[i])
mvn = multivariate_normal([0]*len(idx), np.diag(sig))
return mvn.pdf(val)
'''
return is not required as python works on
pass-by-reference and there is no way of
emulating pass-by-value
'''
def motion_model(self, samples, point, su):
for i in range(self.np):
dx = point[0] - normrnd(0, su[0])
dy = point[1] - normrnd(0, su[1])
if self.dim == 2: pose = [samples[i].pose[0] + dx, samples[i].pose[1] + dy]
if self.dim == 3: dz = point[2] - normrnd(0, su[2])
if self.dim == 3: pose = [samples[i].pose[0] + dx, samples[i].pose[1] + dy, samples[i].pose[2] + dz]
samples[i].pose = pose
'''
measurement model for the particle filter
label for dMap = 1 : NLOS , 0 : LOS
'''
def measure_model(self, samples, z):
totalWt = 0 ; nAP = len(z)
for i in range(self.np):
dz = [0 for x in range(nAP)]
for j in range(nAP):
tx = self.tx[j] ; pos = samples[i].pose
d = self.distance(tx, pos)
if d <= self.R:
if self.use:
if self.hard:
label = self.classify(z[j].rssi, d)
# confidence matrix calculation
if label==0 and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif label==0 and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false negative
elif label==1 and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif label==1 and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false positive
if label==0:
dz[j] = abs(z[j].rssi-d)
else:
inp = torch.tensor([z[j].rssi, d])
out = self.model(inp.float()).detach().numpy()
dz[j] = out[0]*abs(z[j].rssi-d) + out[1]*abs(z[j].rssi - normrnd(self.R,3))
# confidence matrix calculation
if out[0]>out[1] and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif out[0]>out[1] and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false positive
elif out[0]<out[1] and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif out[0]<out[1] and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false negative
else:
dz[j] = abs(z[j].rssi-d)
wt = self.getWeight(dz)
samples[i].w *= wt
totalWt += wt
if totalWt!=0:
for i in range(self.np):
samples[i].w = samples[i].w / totalWt
else:
for i in range(self.np):
samples[i].w = 1/self.np
'''
measurement model for fast slam v1
label for dMap = 1 : NLOS , 0 : LOS
'''
def fast_measure_model(self, samples, z):
if self.dim == 2: Qt = np.diag([10,10])
if self.dim == 3: Qt = np.diag([10,10,10])
Qt = Qt.tolist() ; nAP = len(z) ; totWt = 0
for i in range(self.np):
for j in range(nAP):
tx = np.array(self.tx[j]) ; pos = np.array(samples[i].pose)
d = self.distance(tx, pos)
if d <= self.R:
# initialize particle map
if j not in samples[i].mapID:
samples[i].mapMu.append(tx)
samples[i].mapSigma.append(Qt)
samples[i].mapID.append(j)
samples[i].hashMap[j] = len(samples[i].mapID) - 1
samples[i].w = 1/self.np
# update particle map
else:
ID = samples[i].hashMap[j]
# prediction step
muHat = samples[i].mapMu[ID]
sigHat = np.array(samples[i].mapSigma[ID])
# update step
dHat = self.distance(pos, muHat)
# use classifier or not
if self.use:
if self.hard:
label = self.classify(z[j].rssi, dHat)
# confidence matrix calculation
if label==0 and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif label==0 and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false negative
elif label==1 and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif label==1 and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false positive
if label==0:
innov = abs(z[j].rssi-dHat)
else:
continue
else:
inp = torch.tensor([z[j].rssi, dHat])
out = self.model(inp.float()).detach().numpy()
innov = out[0]*abs(z[j].rssi - dHat) + out[1]*abs(z[j].rssi - normrnd(self.R,3))
# confidence matrix calculation
if out[0]>out[1] and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif out[0]>out[1] and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false negative
elif out[0]<out[1] and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif out[0]<out[1] and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false positive
else:
innov = abs(z[j].rssi - dHat)
dx = muHat[0] - pos[0] ; dy = muHat[1] - pos[1]
den = math.sqrt(dx**2 + dy**2)
H = np.array([dx/den, dy/den])
if self.dim==3:
dz = muHat[2] - pos[2]
den = math.sqrt(dx**2 + dy**2 + dz**2)
H = np.array([dx/den, dy/den, dz/den])
try:
Q = np.matmul(np.matmul(H, sigHat), H) + self.sz[j]
except:
bp()
# Kalman Gain
K = np.matmul(sigHat, H)/Q
# update pose/ covar
mu = muHat + innov*K
K = K.reshape((self.dim,1))
sig = (np.identity(self.dim) - K*H)*sigHat
samples[i].mapMu[ID] = mu.reshape((self.dim,))
samples[i].mapSigma[ID] = sig.tolist()
samples[i].w = max(samples[i].w, math.sqrt(2*math.pi*Q)*math.exp(-0.5*(innov**2)/Q))
totWt += samples[i].w
# normalize the weights
if totWt==0:
for i in range(self.np):
samples[i].w = 1/self.np
else:
for i in range(self.np):
samples[i].w = samples[i].w/totWt
'''
resampling algorithm applicable to both
particle filter and fast slam because of
common structure of particle
'''
def resample(self, samples):
idx = [0]*self.np ; Q = [0]*self.np ; Q[0] = samples[0].w
for i in range(1, self.np):
Q[i] = samples[i].w + Q[i-1]
t = np.random.rand(self.np+1, 1)
T = np.sort(t, axis=0)
T[self.np] = 1 ; i,j = 0,0
while i<self.np and j<self.np:
if T[i] < Q[j]:
idx[i] = j
i += 1
else:
j += 1
if len(set(idx))>0.2*self.np:
for i in range(self.np):
samples[i].pose = samples[idx[i]].pose
samples[i].w = 1/self.np
samples[i].mapMu = samples[idx[i]].mapMu
samples[i].mapID = samples[idx[i]].mapID
samples[i].mapSigma = samples[idx[i]].mapSigma
samples[i].hashMap = samples[idx[i]].hashMap
'''
Calculates the effective number of particles
in the sampled distribution.
'''
def neff(self, samples):
wghts = [0]*self.np ; totWt = 0
for i in range(self.np):
wghts[i] = samples[i].w
totWt += samples[i].w
den = 0
for i in range(self.np):
wghts[i] = (wghts[i]/totWt)**2
den += wghts[i]
return 1/den
'''
Calculates weighted mean and variance of the
sample distribution
'''
def meanVar(self, samples):
totWt = 0 ; mu = [0 for _ in range(self.dim)] ; sig = np.zeros((self.dim,self.dim))
for i in range(self.np):
mu[0] += samples[i].pose[0]
mu[1] += samples[i].pose[1]
if self.dim==3: mu[2] += samples[i].pose[2]
totWt += samples[i].w
if self.dim==2: mu = [mu[0]/self.np, mu[1]/self.np]
if self.dim==3: mu = [mu[0]/self.np, mu[1]/self.np, mu[2]/self.np]
for i in range(self.np):
if self.dim==2: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] ])
if self.dim==3: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] , samples[i].pose[2]-mu[2] ])
sig += np.matmul(x.reshape((self.dim,1)),x.reshape((1,self.dim)))
sig = sig/self.np
return mu, sig
'''
Calculates weighted mean and variance of the
sample distribution
'''
def weightedMeanVar(self, samples):
totWt = 0 ; mu = [0 for _ in range(self.dim)] ; sig = np.zeros((self.dim,self.dim))
for i in range(self.np):
mu[0] += samples[i].w*samples[i].pose[0]
mu[1] += samples[i].w*samples[i].pose[1]
if self.dim==3: mu[2] += samples[i].w*samples[i].pose[2]
totWt += samples[i].w
if self.dim==2: mu = [mu[0]/totWt, mu[1]/totWt]
if self.dim==3: mu = [mu[0]/totWt, mu[1]/totWt, mu[2]/totWt]
for i in range(self.np):
if self.dim==2: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] ])
if self.dim==3: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] , samples[i].pose[2]-mu[2] ])
sig += samples[i].w*np.matmul(x.reshape((self.dim,1)),x.reshape((1,self.dim)))
sig = sig/totWt
return mu, sig
'''
Get the maximum weighted particle and use it
to calculate the IDs of the APs discovered &
the locations of the discovered APs
'''
def getAPLocs(self, samples):
maxWeight = -9999999999 ; idx = 0
for i in range(self.np):
if samples[i].w > maxWeight:
maxWeight = samples[i].w
idx = i
self.APLocs = samples[idx].mapMu
self.IDs = samples[idx].mapID
'''
Plot the particle poses for each particle. Can
only be used for debugging as of now as animation
support is yet to be added
'''
def plot(self, samples):
x = [] ; y = []
for i in range(self.np):
x.append(samples[i].pose[0])
y.append(samples[i].pose[1])
plt.plot(x,y,'c.')
mXY,_ = self.meanVar(samples)
wmXY,_ = self.weightedMeanVar(samples)
plt.plot(mXY[0],mXY[1],'ro')
plt.plot(wmXY[0],wmXY[1],'bo')
plt.xlim([-100,300])
plt.ylim([-100,300])
plt.show()
'''
The main Particle filter class
'''
def particleFilter(self):
self.path.append(self.wayPts[0])
samples = self.distrib()
print("Running Particle Filter ..")
for i in range(len(self.pts)):
# provide action update
self.motion_model(samples, self.pts[i], self.su)
# provide measurement update
self.measure_model(samples, self.dists[i])
# resample only when number of effective particle drops
if self.neff(samples) <= 1/3*self.np:
self.resample(samples)
mXY, _ = self.weightedMeanVar(samples)
self.path.append(mXY)
print("Particle Filter has finished running ..")
'''
The main Fast SLAM v1 class
'''
def FastSLAM(self):
self.path.append(self.wayPts[0])
samples = self.distrib()
print("Running Fast SLAM ..")
for i in range(len(self.pts)):
# provide action update
self.motion_model(samples, self.pts[i], self.su)
# provide measurement update
self.fast_measure_model(samples, self.dists[i])
# resample only when number of effective particle drops
if self.neff(samples) <= 1/3*self.np:
self.resample(samples)
mXY, _ = self.weightedMeanVar(samples)
self.path.append(mXY)
self.getAPLocs(samples)
print("FastSLAM has finished running ..")
'''
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
'''
'''
Localizer for Experimental Setup:
1. Contains only FastSlam
2. Measurement Model updated to read data from experiments
'''
class localizeExp:
def __init__(self, numP, su, sz, map, useClas, hardClas, modelpath="./models/best.pth"):
self.np = numP
self.sz = sz
self.dim = map.dim
self.wayPts = map.wayPts
self.pts = self.convert(self.wayPts)
self.dim = map.dim
self.TXName = map.TXName
self.numPts = map.numPts
self.numAPs = map.numAPs
self.maxZ = map.maxZ
self.dists = map.distMap
self.name2MAC = map.name2MAC
self.name2Pos = map.name2Pos
self.MAC2Name = map.MAC2Name
self.start = self.wayPts[0][:2]
self.su = su
self.path = []
self.APLocs = []
self.IDs = []
self.use = useClas
self.hard = hardClas
self.modelpath = modelpath
self.model = None
self.confidence = [0, 0, 0, 0] # true positive, false positive, true negative, false negative
if self.dim == 2: self.su = su[0:2]
if self.use: self.load_model()
def print(self, samples):
for i in range(self.np):
print("pose: ", samples[i].pose, " | weight: ", samples[i].w)
def distance(self, x, y):
if len(x)==3 and len(y)==3:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 + (x[2]-y[2])**2 )
else:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 )
def MSE(self):
mse = 0
for i in range(len(self.pts)):
mse += self.distance(self.wayPts[i], self.path[i])
mse = mse/len(self.pts)
return mse
def getCDF(self):
cdf = [0 for x in range(len(self.pts))]
for i in range(len(self.pts)):
cdf[i] = self.distance(self.wayPts[i], self.path[i])
return cdf
def distrib(self):
start = self.wayPts[0] ; samples = []
if self.dim == 2: start = [start[0], start[1]]
if self.dim == 3: start = start
for _ in range(self.np):
samples.append(Particle(start, 1/self.np))
return samples
def convert(self, pts):
n = len(pts)
rtPts = []
for i in range(1, n):
dx = pts[i][0] - pts[i-1][0]
dy = pts[i][1] - pts[i-1][1]
if self.dim==2: rtPts.append([dx, dy])
if self.dim==3: dz = pts[i][2] - pts[i-1][2] ; rtPts.append([dx, dy, dz])
return rtPts
'''
load pytorch model and save dict
'''
def load_model(self):
model = DNN()
path = self.modelpath
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['state_dict'])
self.model = model
self.model.eval()
'''
classify into LOS/NLOS
'''
def classify(self, rssi, euc):
inp = torch.tensor([rssi, euc])
out = self.model(inp.float())
pred = 1 if (out[1]>out[0]) else 0
return pred
'''
weighting using the normpdf subroutine
'''
def getWeight(self, dz):
norpdf = 1
for i in range(len(dz)):
if dz[i]!=0:
norpdf *= norm.pdf(dz[i], 0, self.sz[i])
return norpdf
def rssi2Dist(self, rssi):
'''
https://stackoverflow.com/questions/11217674/how-to-calculate-distance-from-wifi-router-using-signal-strength
http://pylayers.github.io/pylayers/notebook/2-AP/CoverageMetis.html
'''
if abs(rssi) > 60: exp = (abs(rssi) - 32.44)/20
else : exp = (abs(rssi) - 12.55)/20
val = (10**exp) / 60
return val
'''
weighting using the mvnpdf subroutine
'''
def getMultiWeight(self, dz):
idx = [i for i, e in enumerate(dz) if e != 0]
val = [] ; sig = []
if len(idx)==0:
return 1/self.np
for i in idx:
val.append(dz[i])
sig.append(self.sz[i])
mvn = multivariate_normal([0]*len(idx), np.diag(sig))
return mvn.pdf(val)
'''
return is not required as python works on
pass-by-reference and there is no way of
emulating pass-by-value
'''
def motion_model(self, samples, point, su):
for i in range(self.np):
dx = point[0] - normrnd(0, su[0])
dy = point[1] - normrnd(0, su[1])
pose = [samples[i].pose[0] + dx, samples[i].pose[1] + dy]
samples[i].pose = pose
'''
measurement model for fast slam v1
label for dMap = 1 : NLOS , 0 : LOS
'''
def fast_measure_model(self, samples, wpID):
Qt = np.diag([5,5])
Qt = Qt.tolist() ; totWt = 0
print("Iteration: " , wpID, end='\r')
for i in range(self.np):
for j in range(len(self.name2Pos)):
name = self.TXName[j]
tx = np.array(self.name2Pos[name])
pos = np.array(samples[i].pose)
# initialize particle map
if name not in samples[i].mapID:
samples[i].mapMu.append(tx[:2])
samples[i].mapSigma.append(Qt)
samples[i].mapID.append(name)
samples[i].hashMap[name] = len(samples[i].mapID) - 1
samples[i].w = 1/self.np
# update particle map
else:
ID = samples[i].hashMap[name]
# prediction step
muHat = samples[i].mapMu[ID]
sigHat = np.array(samples[i].mapSigma[ID])
# update step
dHat = self.distance(pos, muHat)
rssiDist = self.dists[wpID][j].rssi
# use classifier or not
if self.use:
if self.hard:
label = self.classify(rssiDist, dHat)
if label==0:
innov = abs(rssiDist-dHat)
else:
continue
else:
inp = torch.tensor([rssiDist, dHat])
out = self.model(inp.float()).detach().numpy()
innov = out[0]*abs(rssiDist - dHat) + out[1]*abs(rssiDist - normrnd(15,3))
else:
innov = abs(rssiDist - dHat)
dx = muHat[0] - pos[0] ; dy = muHat[1] - pos[1]
den = math.sqrt(dx**2 + dy**2)
H = np.array([dx/den, dy/den])
try:
Q = np.matmul(np.matmul(H, sigHat), H) + self.sz[j]
except:
bp()
# Kalman Gain
K = np.matmul(sigHat, H)/Q
# update pose/ covar
mu = muHat + innov*K
K = K.reshape((self.dim,1))
sig = (np.identity(self.dim) - K*H)*sigHat
samples[i].mapMu[ID] = mu.reshape((self.dim,))
samples[i].mapSigma[ID] = sig.tolist()
samples[i].w = max(samples[i].w, math.sqrt(2*math.pi*Q)*math.exp(-0.5*(innov**2)/Q))
totWt += samples[i].w
# normalize the weights
if totWt==0:
for i in range(self.np):
samples[i].w = 1/self.np
else:
for i in range(self.np):
samples[i].w = samples[i].w/totWt
'''
resampling algorithm applicable to both
particle filter and fast slam because of
common structure of particle
'''
def resample(self, samples):
idx = [0]*self.np ; Q = [0]*self.np ; Q[0] = samples[0].w
for i in range(1, self.np):
Q[i] = samples[i].w + Q[i-1]
t = np.random.rand(self.np+1, 1)
T = np.sort(t, axis=0)
T[self.np] = 1 ; i,j = 0,0
while i<self.np and j<self.np:
if T[i] < Q[j]:
idx[i] = j
i += 1
else:
j += 1
if len(set(idx))>0.2*self.np:
for i in range(self.np):
samples[i].pose = samples[idx[i]].pose
samples[i].w = 1/self.np
samples[i].mapMu = samples[idx[i]].mapMu
samples[i].mapID = samples[idx[i]].mapID
samples[i].mapSigma = samples[idx[i]].mapSigma
samples[i].hashMap = samples[idx[i]].hashMap
'''
Calculates the effective number of particles
in the sampled distribution.
'''
def neff(self, samples):
wghts = [0]*self.np ; totWt = 0
for i in range(self.np):
wghts[i] = samples[i].w
totWt += samples[i].w
den = 0
for i in range(self.np):
wghts[i] = (wghts[i]/totWt)**2
den += wghts[i]
return 1/den
'''
Calculates weighted mean and variance of the
sample distribution
'''
def meanVar(self, samples):
totWt = 0 ; mu = [0 for _ in range(self.dim)] ; sig = np.zeros((self.dim,self.dim))
for i in range(self.np):
mu[0] += samples[i].pose[0]
mu[1] += samples[i].pose[1]
if self.dim==3: mu[2] += samples[i].pose[2]
totWt += samples[i].w
if self.dim==2: mu = [mu[0]/self.np, mu[1]/self.np]
if self.dim==3: mu = [mu[0]/self.np, mu[1]/self.np, mu[2]/self.np]
for i in range(self.np):
if self.dim==2: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] ])
if self.dim==3: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] , samples[i].pose[2]-mu[2] ])
sig += np.matmul(x.reshape((self.dim,1)),x.reshape((1,self.dim)))
sig = sig/self.np
return mu, sig
'''
Calculates weighted mean and variance of the
sample distribution
'''
def weightedMeanVar(self, samples):
totWt = 0 ; mu = [0 for _ in range(self.dim)] ; sig = np.zeros((self.dim,self.dim))
for i in range(self.np):
mu[0] += samples[i].w*samples[i].pose[0]
mu[1] += samples[i].w*samples[i].pose[1]
if self.dim==3: mu[2] += samples[i].w*samples[i].pose[2]
totWt += samples[i].w
if self.dim==2: mu = [mu[0]/totWt, mu[1]/totWt]
if self.dim==3: mu = [mu[0]/totWt, mu[1]/totWt, mu[2]/totWt]
for i in range(self.np):
if self.dim==2: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] ])
if self.dim==3: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] , samples[i].pose[2]-mu[2] ])
sig += samples[i].w*np.matmul(x.reshape((self.dim,1)),x.reshape((1,self.dim)))
sig = sig/totWt
return mu, sig
'''
Get the maximum weighted particle and use it
to calculate the IDs of the APs discovered &
the locations of the discovered APs
'''
def getAPLocs(self, samples):
maxWeight = -9999999999 ; idx = 0
for i in range(self.np):
if samples[i].w > maxWeight:
maxWeight = samples[i].w
idx = i
self.APLocs = samples[idx].mapMu
self.IDs = samples[idx].mapID
'''
Plot the particle poses for each particle. Can
only be used for debugging as of now as animation
support is yet to be added
'''
def plot(self):
print("Displaying Floor Plan.")
wayPts = self.wayPts
path = self.path
TX = self.APLocs
ID = self.IDs
# display the waypoints by RRT
if wayPts!=None:
rows = []; cols = []
for x,y in wayPts:
rows.append(x); cols.append(y)
plt.plot(cols, rows, 'b.-')
# display the actual AP locations
if self.TXName!=None:
rows = []; cols = []
for i in self.TXName:
rows.append(self.name2Pos[i][0]); cols.append(self.name2Pos[i][1])
plt.text(i[1],i[0]," NAME-"+str(i), color='black')
plt.plot(rows, cols, 'rx')
# display the localized path
if path!=None:
rows = []; cols = []
for i in path:
rows.append(i[0]); cols.append(i[1])
plt.plot(cols, rows, 'c.-')
# display the estimated AP locations
if TX!=None and ID!=None:
rows = []; cols = []; ctr = 0
for i in TX:
rows.append(i[0]); cols.append(i[1])
plt.text(i[1],i[0]," NAME "+str(ID[ctr]), color='red')
ctr += 1
plt.plot(cols, rows, 'rx')
plt.gca().invert_yaxis()
plt.show()
'''
The main Fast SLAM v1 class
'''
def FastSLAM(self):
self.path.append(self.wayPts[0][:2])
samples = self.distrib()
print("Running Fast SLAM ..")
for i in range(len(self.pts)):
# provide action update
self.motion_model(samples, self.pts[i], self.su)
# provide measurement update
self.fast_measure_model(samples, i)
# resample only when number of effective particle drops
if self.neff(samples) <= 1/3*self.np:
self.resample(samples)
mXY, _ = self.weightedMeanVar(samples)
self.path.append(mXY)
self.getAPLocs(samples)
print("FastSLAM has finished running ..")
|
182549
|
import numpy as np
class Main:
def __init__(self):
self.n, self.m = map(int, input().split())
def output(self):
print(np.eye(self.n, self.m, k=0))
if __name__ == '__main__':
obj = Main()
obj.output()
|
182557
|
train = dict(
batch_size=10,
num_workers=4,
use_amp=True,
num_epochs=100,
num_iters=30000,
epoch_based=True,
lr=0.0001,
optimizer=dict(
mode="adamw",
set_to_none=True,
group_mode="r3", # ['trick', 'r3', 'all', 'finetune'],
cfg=dict(),
),
grad_acc_step=1,
sche_usebatch=True,
scheduler=dict(
warmup=dict(
num_iters=0,
),
mode="poly",
cfg=dict(
lr_decay=0.9,
min_coef=0.001,
),
),
save_num_models=1,
ms=dict(
enable=False,
extra_scales=[0.75, 1.25, 1.5],
),
grad_clip=dict(
enable=False,
mode="value", # or 'norm'
cfg=dict(),
),
ema=dict(
enable=False,
cmp_with_origin=True,
force_cpu=False,
decay=0.9998,
),
)
|
182565
|
from machine import Pin, I2C
import ssd1306
# using default address 0x3C
sda=machine.Pin(4)
scl=machine.Pin(5)
i2c = I2C(0,sda=sda, scl=scl, freq=400000)
display = ssd1306.SSD1306_I2C(128, 64, i2c)
display.rect(0, 0, 128, 16, 1)
display.fill_rect(0, 0, 100, 16, 1)
for row in range(1, 4):
for n in range(4):
if row % 2:
pos = 32*n
else:
pos = 16+32*n
display.fill_rect(pos, row*16, 16, 16, 1)
display.show()
|
182568
|
import cv2
import numpy as np
import scipy.ndimage
from operator import itemgetter
import math
from tkinter import *
from tkinter import messagebox
from tkinter import filedialog
from tkinter import ttk
from MyFunctions import *
Q_90=[
[3,2,2,3,5,8,10,12],
[2,2,3,4,5,12,12,11],
[3,3,3,5,8,11,14,11],
[3,3,4,6,10,17,16,12],
[4,4,7,11,14,22,21,15],
[5,7,11,13,16,12,23,18],
[10,13,16,17,21,24,24,21],
[14,18,19,20,22,20,20,20]
]
Q_50=[
[16,11,10,16,24,40,51,61],
[12,12,14,19,26,58,60,55],
[14,13,16,24,40,57,69,56],
[14,17,22,29,51,87,80,62],
[18,22,37,56,68,109,103,77],
[24,35,55,64,81,104,113,92],
[49,64,78,87,103,121,120,101],
[72,92,95,98,112,100,103,99]
]
Q_10=[
[80,60,50,80,120,200,255,255],
[55,60,70,95,130,255,255,255],
[70,65,80,120,200,255,255,255],
[70,85,110,145,255,255,255,255],
[90,110,185,255,255,255,255,255],
[120,175,255,255,255,255,255,255],
[245,255,255,255,255,255,255,255],
[255,255,255,255,255,255,255,255]
]
Q_CL=[
[16,11,10,16,24,40,51,61],
[12,12,14,19,26,58,60,55],
[14,13,16,24,40,57,69,56],
[14,17,22,29,51,87,80,62],
[18,22,37,56,68,109,103,77],
[24,35,55,64,81,104,113,92],
[49,64,78,87,103,121,120,101],
[72,92,95,98,112,100,103,99]
]
def OpenShowImage():
global img,firstimage,first_image_label
filename=filedialog.askopenfilename(initialdir = "../Forged Images/",title = "Open File",filetypes = (("png files","*.png"),("bmp files","*.bmp"),("jpeg files","*.jpg"),("All Files","*.*")))
firstimage=PhotoImage(file='{}'.format(filename))
first_image_label=Label(leftframe,image=firstimage)
first_image_label.pack()
img=cv2.imread('{}'.format(filename),0)
def AccuracyTest():
filename = filedialog.askopenfilename(initialdir = "../Forged Images/",title ="Open File",filetypes = (("png files","*.png"),("bmp files","*.bmp"),("jpeg files","*.jpg"),("All Files","*.*")))
img_for_accuracy = cv2.imread('{}'.format(filename),0)
dp=0
yp=0
yn=0
for i in range(height):
for j in range(width):
if(img_for_accuracy[i][j]==0 and img2[i][j] == 255):
yp+=1
elif(img_for_accuracy[i][j]==255 and img2[i][j] == 255):
dp+=1
elif(img_for_accuracy[i][j]==255 and img2[i][j] == 0):
yn+=1
precision = dp / (dp + yp)
recall = dp / (dp + yn)
f1 = 2 * (precision * recall) / (precision + recall)
messagebox.showinfo("Accuracy Result",f1)
def GetQuantizationMatrix(size,mainsize):
for i in range (0,size):
for j in range(0,size):
quantization_matrix[i][j]=(pow(2,size-2))
if (size != 2):
GetQuantizationMatrix(size-1,mainsize)
def MakeDCT():
global height,width
height, width = img.shape
vis0=np.zeros((height,width),np.float32)
vis0[:height,:width]=img
global quantization_matrix
quantization_matrix=[[] for i in range(0,8)]
for i in quantization_matrix:
for j in range(0,8):
i.append(0)
GetQuantizationMatrix(8,8)
global diagonaled_array
diagonaled_array=[[] for i in range((height-7)*(width-7))]
count=0
for i in range (0,height-7):
for j in range(0,width-7):
#### Make Quantization With Q_CL Matrix ####
if(quantization_matrix_selection_box.get() == "Q_CL"):
vis1 = cv2.dct(vis0[i:i+8,j:j+8])
for k in range(0,8):
for t in range(0,8):
vis1[k][t] = GetMinDistValue((vis1[k][t] / Q_CL[k][t]))
vis1 = getdiagonalarray(vis1,8,8)
#### Make Quantization With Q_90 Matrix ####
elif(quantization_matrix_selection_box.get() == "Q_90"):
vis1 = cv2.dct(vis0[i:i+8,j:j+8])
for k in range(0,8):
for t in range(0,8):
vis1[k][t] = GetMinDistValue((vis1[k][t] / Q_90[k][t]))
vis1 = getdiagonalarray(vis1,8,8)
#### Make Quantization With Q_50 Matrix ####
elif(quantization_matrix_selection_box.get() == "Q_50"):
vis1 = cv2.dct(vis0[i:i+8,j:j+8])
for k in range(0,8):
for t in range(0,8):
vis1[k][t] = GetMinDistValue((vis1[k][t] / Q_50[k][t]))
vis1 = getdiagonalarray(vis1,8,8)
#### Make Quantization With Q_10 Matrix ####
elif(quantization_matrix_selection_box.get() == "Q_10"):
vis1 = cv2.dct(vis0[i:i+8,j:j+8])
for k in range(0,8):
for t in range(0,8):
vis1[k][t] = GetMinDistValue((vis1[k][t] / Q_10[k][t]))
vis1 = getdiagonalarray(vis1,8,8)
#### Make Quantization With Divide By 16 ####
elif(quantization_matrix_selection_box.get() == "Divide by 16"):
vis1 = cv2.dct(vis0[i:i+8,j:j+8])
for k in range(0,8):
for t in range(0,8):
vis1[k][t] = GetMinDistValue((vis1[k][t] / 16))
vis1 = getdiagonalarray(vis1,8,8)
############## Not Make Quantization ##############
elif(quantization_matrix_selection_box.get() == "Not Selected"):
vis1 = getdiagonalarray(cv2.dct(vis0[i:i+8,j:j+8]), 8, 8)
#### Make Quantization With Quantization Matrix ####
elif(quantization_matrix_selection_box.get() == "QTable in Article"):
vis1 = cv2.dct(vis0[i:i+8,j:j+8])
for k in range(0,8):
for t in range(0,8):
vis1[k][t] = GetMinDistValue(vis1[k][t] / quantization_matrix[k][t])
vis1 = getdiagonalarray(vis1,8,8)
diagonaled_array[count].append(vis1)
diagonaled_array[count].append([i,j])
count+=1
messagebox.showinfo("Success","DCT Dönüşüm Tamamlandı")
number_of_vector_to_compare=10
max_euclidean_distance=1.0
threshold_distance_for_similar_blocks=5
min_count_for_similar_shift_vectors=10
def TryToDetectForgery():
global hough_space,img2,result_image,result_image_label
hough_space=[]
diagonaled_array.sort(key=itemgetter(0))
for i in range(0,len(diagonaled_array) - number_of_vector_to_compare + 1):
toplam = 0.0
for j in range(i + 1,i + number_of_vector_to_compare):
for k in range(0, 15):
toplam += pow(( diagonaled_array[i][0][k] - diagonaled_array[j][0][k]), 2)
if(math.sqrt(toplam) < max_euclidean_distance):
if(math.sqrt(pow((diagonaled_array[i][1][0]-diagonaled_array[j][1][0]),2) + pow((diagonaled_array[i][1][1]-diagonaled_array[j][1][1]),2)) > threshold_distance_for_similar_blocks):
hough_space.append(diagonaled_array[i][1][0])
hough_space.append(diagonaled_array[i][1][1])
hough_space.append(diagonaled_array[j][1][0])
hough_space.append(diagonaled_array[j][1][1])
hough_space.append([abs(diagonaled_array[i][1][0]-diagonaled_array[j][1][0]),abs(diagonaled_array[i][1][1]-diagonaled_array[j][1][1])])
img2 = np.zeros((height,width,1),np.uint8)
for i in range(4,len(hough_space),5):
if(hough_space.count(hough_space[i]) > min_count_for_similar_shift_vectors):
for j in range(0,8):
for k in range(0,8):
img2[hough_space[i-4]+j,hough_space[i-3]+k,0]=255
img2[hough_space[i-2]+j,hough_space[i-1]+k,0]=255
filename = filedialog.asksaveasfilename(initialdir = "../Forged Images/",title = "Save File",filetypes = (("png files","*.png"),("bmp files","*.bmp"),("jpeg files","*.jpg"),("All Files","*.*")))
cv2.imwrite('{}'.format(filename),img2)
result_image=PhotoImage(file='{}'.format(filename))
result_image_label=Label(rightframe,image=result_image)
result_image_label.pack()
def GetNumberOfVectorToCompare():
global number_of_vector_to_compare
number_of_vector_to_compare=int(number_of_vector_to_compare_spin.get())
def GetMaxEuclideanDistance():
global max_euclidean_distance
max_euclidean_distance=float(maximum_Euclidean_distance_spin.get())
def GetThresholdDistanceForSimilarBlocks():
global threshold_distance_for_similar_blocks
threshold_distance_for_similar_blocks=int(threshold_distance_for_similar_blocks_spin.get())
def GetMinCountForSimilarShiftVectors():
global min_count_for_similar_shift_vectors
min_count_for_similar_shift_vectors=int(min_count_for_similar_shift_vectors_spin.get())
root=Tk()
root.geometry("800x325")
root.title("Copy Move Forgery Detection")
menubar=Menu(root)
filemenu=Menu(menubar,tearoff=0)
filemenu.add_command(label="Open", command=OpenShowImage)
filemenu.add_command(label="DCT to Image Blocks", command=MakeDCT)
filemenu.add_command(label="Try to Detect Forgery",command=TryToDetectForgery)
filemenu.add_separator()
filemenu.add_command(label="Exit",command=root.quit)
menubar.add_cascade(label="File",menu=filemenu)
root.config(menu=menubar)
mainframe=Frame(root)
mainframe.pack(fill=BOTH)
leftframe=Frame(mainframe,width=256,height=256)
leftframe.pack(side=LEFT,padx=15,pady=25,anchor=W)
middleframe=Frame(mainframe)
middleframe.pack(side=LEFT,anchor=CENTER)
rightframe=Frame(mainframe,width=256,height=256)
rightframe.pack(side=LEFT,anchor=E,padx=15)
quantization_matrix_selection_box_label=Label(middleframe,text="Select Quantization Matrix")
quantization_matrix_selection_box_label.pack()
quantization_matrix_selection_box=ttk.Combobox(middleframe,width=15)
quantization_matrix_selection_box['values'] = ("Not Selected","QTable in Article","Q_CL","Q_90","Q_50","Q_10","Divide by 16")
quantization_matrix_selection_box.pack(anchor=CENTER,pady=3)
quantization_matrix_selection_box.current(0)
number_of_vector_to_compare_spin_label=Label(middleframe,text="Number of Vector to Compare")
number_of_vector_to_compare_spin_label.pack(pady=3)
number_of_vector_to_compare_spin = Spinbox(middleframe, from_=0, to=100,width=5,command=GetNumberOfVectorToCompare)
number_of_vector_to_compare_spin.pack(anchor=CENTER,pady=3)
maximum_Euclidean_distance_spin_label=Label(middleframe,text="Maximum Euclidean Distance")
maximum_Euclidean_distance_spin_label.pack(pady=3)
maximum_Euclidean_distance_spin = Spinbox(middleframe, from_=0, to=100,width=5,format="%.2f",increment=0.1,command=GetMaxEuclideanDistance)
maximum_Euclidean_distance_spin.pack(anchor=CENTER)
threshold_distance_for_similar_blocks_label=Label(middleframe,text="Threshold Distance for Similar Blocks")
threshold_distance_for_similar_blocks_label.pack(pady=3)
threshold_distance_for_similar_blocks_spin = Spinbox(middleframe, from_=5, to=100,width=5,command=GetThresholdDistanceForSimilarBlocks)
threshold_distance_for_similar_blocks_spin.pack(anchor=CENTER,pady=3)
min_count_for_similar_shift_vectors_label = Label(middleframe,text="Minimum Count for Similar Shift Vectors")
min_count_for_similar_shift_vectors_label.pack(pady=3)
min_count_for_similar_shift_vectors_spin = Spinbox(middleframe, from_=25, to=1000,width=5,command=GetMinCountForSimilarShiftVectors)
min_count_for_similar_shift_vectors_spin.pack(anchor=CENTER,pady=3)
accuracy_test_button=Button(middleframe,text="Accuracy Test",bg='gray' ,width=15,command=AccuracyTest)
accuracy_test_button.pack(anchor=CENTER,pady=7)
status=Label(root,text="Made By zumrudu-anka",bd=1,relief=SUNKEN)
status.pack(side=BOTTOM,fill=X)
root.mainloop()
|
182584
|
import tensorflow as tf
def lrelu(x, trainbable=None):
alpha = 0.2
return tf.maximum(alpha * x, x)
def prelu(x, trainable=True):
alpha = tf.get_variable(
name='alpha',
shape=x.get_shape()[-1],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0),
trainable=trainable)
return tf.maximum(0.0, x) + alpha * tf.minimum(0.0, x)
def conv_layer(x, filter_shape, stride, trainable=True):
filter_ = tf.get_variable(
name='weight',
shape=filter_shape,
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(),
trainable=trainable)
return tf.nn.conv2d(
input=x,
filter=filter_,
strides=[1, stride, stride, 1],
padding='SAME')
def deconv_layer(x, filter_shape, output_shape, stride, trainable=True):
filter_ = tf.get_variable(
name='weight',
shape=filter_shape,
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(),
trainable=trainable)
return tf.nn.conv2d_transpose(
value=x,
filter=filter_,
output_shape=output_shape,
strides=[1, stride, stride, 1])
def max_pooling_layer(x, size, stride):
return tf.nn.max_pool(
value=x,
ksize=[1, size, size, 1],
strides=[1, stride, stride, 1],
padding='SAME')
def avg_pooling_layer(x, size, stride):
return tf.nn.avg_pool(
value=x,
ksize=[1, size, size, 1],
strides=[1, stride, stride, 1],
padding='SAME')
def full_connection_layer(x, out_dim, trainable=True):
in_dim = x.get_shape().as_list()[-1]
W = tf.get_variable(
name='weight',
shape=[in_dim, out_dim],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1),
trainable=trainable)
b = tf.get_variable(
name='bias',
shape=[out_dim],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0),
trainable=trainable)
return tf.add(tf.matmul(x, W), b)
def batch_normalize(x, is_training, decay=0.99, epsilon=0.001, trainable=True):
def bn_train():
batch_mean, batch_var = tf.nn.moments(x, axes=[0, 1, 2])
train_mean = tf.assign(
pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(
pop_var, pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
return tf.nn.batch_normalization(
x, batch_mean, batch_var, beta, scale, epsilon)
def bn_inference():
return tf.nn.batch_normalization(
x, pop_mean, pop_var, beta, scale, epsilon)
dim = x.get_shape().as_list()[-1]
beta = tf.get_variable(
name='beta',
shape=[dim],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.0),
trainable=trainable)
scale = tf.get_variable(
name='scale',
shape=[dim],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.1),
trainable=trainable)
pop_mean = tf.get_variable(
name='pop_mean',
shape=[dim],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0),
trainable=False)
pop_var = tf.get_variable(
name='pop_var',
shape=[dim],
dtype=tf.float32,
initializer=tf.constant_initializer(1.0),
trainable=False)
return tf.cond(is_training, bn_train, bn_inference)
def flatten_layer(x):
input_shape = x.get_shape().as_list()
dim = input_shape[1] * input_shape[2] * input_shape[3]
transposed = tf.transpose(x, (0, 3, 1, 2))
return tf.reshape(transposed, [-1, dim])
def pixel_shuffle_layer(x, r, n_split):
def PS(x, r):
bs, a, b, c = x.get_shape().as_list()
x = tf.reshape(x, (bs, a, b, r, r))
x = tf.transpose(x, (0, 1, 2, 4, 3))
x = tf.split(x, a, 1)
x = tf.concat([tf.squeeze(x_) for x_ in x], 2)
x = tf.split(x, b, 1)
x = tf.concat([tf.squeeze(x_) for x_ in x], 2)
return tf.reshape(x, (bs, a*r, b*r, 1))
xc = tf.split(x, n_split, 3)
return tf.concat([PS(x_, r) for x_ in xc], 3)
|
182609
|
from django.db import models
from . import abstract_models
class BrightcoveItem(abstract_models.AbstractBrightcoveItem):
"""
Media from brightcove.
Brightcove is a video editing and management product which can be
found at http://brightcove.com/.
They have in built APIs and players.
The BrightcoveField is a django specific implementation to allow
the embedding of videos. It anticipates the video ID will be used
as a lookup value.
"""
is_full_width = models.BooleanField(default=False)
is_four_three = models.BooleanField(default=False, help_text='Does this video have a 4:3 ratio?')
pass
|
182611
|
import torch
import torch.nn as nn
import math
# wildcard import for legacy reasons
if __name__ == '__main__':
import sys
sys.path.append("..")
from models.blocks import *
from models.wide_resnet import compression, group_lowrank
# only used in the first convolution, which we do not substitute by convention
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
# only used for final fully connectec layers
def conv_1x1_bn(inp, oup, ConvClass):
return nn.Sequential(
ConvClass(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, ConvClass):
super(InvertedResidual, self).__init__()
self.stride = stride
self.Conv = ConvClass
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
self.Conv(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
self.Conv(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
self.Conv(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, ConvClass, block=None, n_class=1000,
input_size=224, width_mult=1.):
super(MobileNetV2, self).__init__()
self.kwargs = dict(ConvClass=ConvClass, block=block, n_class=n_class,
input_size=input_size, width_mult=width_mult)
block = InvertedResidual
self.Conv = ConvClass
input_channel = 32
last_channel = 1280
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
assert input_size % 32 == 0
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t, ConvClass=self.Conv))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t, ConvClass=self.Conv))
input_channel = output_channel
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel, self.Conv))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building classifier
self.classifier_conv = self.Conv(self.last_channel, n_class, 1, 1, 0, bias=True)
#self.classifier = \
#nn.Dropout(0.2), remove dropout for training according to github
# nn.(self.last_channel, n_class),
#)
self._initialize_weights()
def classifier(self, x):
n, c = x.size()
x = self.classifier_conv(x.view(n,c,1,1))
n, c, _, _ = x.size()
return x.view(n,c)
def forward(self, x):
#y_orig = self.features(x)
attention_maps = []
attention = lambda x: F.normalize(x.pow(2).mean(1).view(x.size(0), -1))
y = x
for block in self.features:
y = block(y)
if isinstance(block, InvertedResidual):
if block.stride > 1:
attention_maps.append(attention(y))
#error = torch.abs(y-y_orig).max()
#assert error < 1e-2, f"Error {error} above 0.01"
x = y
x = x.mean(3).mean(2)
x = self.classifier(x)
return x, attention_maps
def compression_ratio(self):
return compression(self.__class__, self.kwargs)
def grouped_parameters(self, weight_decay):
return group_lowrank(self.named_parameters(), weight_decay,
self.compression_ratio())
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
if hasattr(m, 'weight'):
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def save_reference():
net = MobileNetV2()
net.eval()
x = torch.randn(1,3,224,224).float()
y = net(x)
print(y.size())
torch.save(x, "reference_input_mobilenet.torch")
torch.save(y, "reference_output_mobilenet.torch")
torch.save(net.state_dict(), "reference_state_mobilenet.torch")
def match_keys(net, state):
nstate = net.state_dict()
old_keys = [k for k in state]
for i, k in enumerate(nstate):
p = state[old_keys[i]]
if i == (len(old_keys)-2):
n,m = p.size()
nstate[k] = p.view(n,m,1,1)
else:
nstate[k] = p
return nstate
def test():
import os
net = MobileNetV2(Conv)
if os.path.exists("reference_state_mobilenet.torch"):
state = torch.load("reference_state_mobilenet.torch")
state = match_keys(net, state)
net.load_state_dict(state)
net.eval()
x = torch.load("reference_input_mobilenet.torch")
else:
x = torch.randn(1,3,224,224).float()
y, _ = net(Variable(x))
print(y.size())
# check if these match the test weights
if os.path.exists("reference_output_mobilenet.torch"):
ref_output = torch.load("reference_output_mobilenet.torch")
error = torch.abs(ref_output - y).max()
print(f"Error: {error}, Max logit: {y.max()}/{ref_output.max()}, Min logit: {y.min()}/{ref_output.min()}")
state = {
'net': net.state_dict(),
'epoch': 150,
'args': None,
'width': None,
'depth': None,
'conv': 'Conv',
'blocktype': None,
'module': None,
'train_losses': None,
'train_errors': None,
'val_losses': None,
'val_errors': [28.2],
}
torch.save(state, "mobilenetv2.tonylins.t7")
def test_compression():
net = MobileNetV2(Conv)
#net = MobileNetV2(conv_function('Hashed_0.1'))
nparams = lambda x: sum([p.numel() for p in x.parameters()])
for block in net.features:
print(nparams(block))
for x in block:
print(x)
print(nparams(x))
#CompressedConv = conv_function("Hashed_0.1")
for conv in ['Shuffle_%i'%i for i in [4,8,16,32]]+['Hashed_0.01']:
print(conv)
CompressedConv = conv_function(conv)
net = MobileNetV2(CompressedConv)
print(" ", net.compression_ratio())
if __name__ == '__main__':
test()
#test_compression()
|
182614
|
from __future__ import print_function
import deepstate_base
import logrun
class KleeTest(deepstate_base.DeepStateTestCase):
def run_deepstate(self, deepstate):
(r, output) = logrun.logrun([deepstate, "build/examples/Klee", "--klee"],
"deepstate.out", 1800)
self.assertEqual(r, 0)
self.assertTrue("zero" in output)
self.assertTrue("positive" in output)
self.assertTrue("negative" in output)
|
182626
|
import unittest
from numpy import max, abs, ones, zeros, copy, sum, sqrt, hstack
from cantera import Solution, one_atm, gas_constant
import numpy as np
from spitfire import ChemicalMechanismSpec
from os.path import join, abspath
from subprocess import getoutput
test_mech_directory = abspath(join('tests', 'test_mechanisms', 'old_xmls'))
mechs = [x.replace('.xml', '') for x in getoutput('ls ' + test_mech_directory + ' | grep .xml').split('\n')]
def validate_on_mechanism(mech, temperature, pressure, test_rhs=True, test_jac=True):
xml = join(test_mech_directory, mech + '.xml')
r = ChemicalMechanismSpec(xml, 'gas').griffon
gas = Solution(xml)
ns = gas.n_species
T = temperature
p = pressure
gas.TPX = T, p, ones(ns)
y = gas.Y
rho = gas.density_mass
state = hstack((rho, T, y[:-1]))
rhsGR = np.empty(ns + 1)
rhsGRTemporary = np.empty(ns + 1)
jacGR = np.empty((ns + 1) * (ns + 1))
r.reactor_rhs_isochoric(state, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, rhsGR)
r.reactor_jac_isochoric(state, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, 0, rhsGRTemporary, jacGR)
jacGR = jacGR.reshape((ns + 1, ns + 1), order='F')
def cantera_rhs(rho_arg, T_arg, Y_arg):
gas.TDY = T_arg, rho_arg, Y_arg
w = gas.net_production_rates * gas.molecular_weights
e = gas.standard_int_energies_RT * gas.T * gas_constant / gas.molecular_weights
cv = gas.cv_mass
rhs = zeros(ns + 1)
rhs[0] = 0.
rhs[1] = - sum(w * e) / (rho_arg * cv)
rhs[2:] = w[:-1] / rho
return rhs
rhsCN = cantera_rhs(rho, T, y)
if test_rhs:
pass_rhs = max(abs(rhsGR - rhsCN) / (abs(rhsCN) + 1.)) < 100. * sqrt(np.finfo(float).eps)
if test_jac:
jacFD = zeros((ns + 1, ns + 1))
wm1 = zeros(ns + 1)
wp1 = zeros(ns + 1)
drho = 1.e-4
dT = 1.e-2
dY = 1.e-6
state_m = hstack((rho - drho, T, y[:-1]))
state_p = hstack((rho + drho, T, y[:-1]))
r.reactor_rhs_isochoric(state_m, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, wm1)
r.reactor_rhs_isochoric(state_p, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, wp1)
jacFD[:, 0] = (- wm1 + wp1) / (2. * drho)
state_m = hstack((rho, T - dT, y[:-1]))
state_p = hstack((rho, T + dT, y[:-1]))
r.reactor_rhs_isochoric(state_m, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, wm1)
r.reactor_rhs_isochoric(state_p, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, wp1)
jacFD[:, 1] = (- wm1 + wp1) / (2. * dT)
for i in range(ns - 1):
y_m1, y_p1 = copy(y), copy(y)
y_m1[i] += - dY
y_m1[-1] -= - dY
y_p1[i] += dY
y_p1[-1] -= dY
state_m = hstack((rho, T, y_m1[:-1]))
state_p = hstack((rho, T, y_p1[:-1]))
r.reactor_rhs_isochoric(state_m, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, wm1)
r.reactor_rhs_isochoric(state_p, 0, 0, np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, wp1)
jacFD[:, 2 + i] = (- wm1 + wp1) / (2. * dY)
gas.TDY = T, rho, y
cv = gas.cv_mass
cvi = gas.standard_cp_R * gas_constant / gas.molecular_weights
w = gas.net_production_rates * gas.molecular_weights
e = gas.standard_int_energies_RT * gas.T * gas_constant / gas.molecular_weights
gas.TDY = T + dT, rho, y
wp = gas.net_production_rates * gas.molecular_weights
cvp = gas.cv_mass
gas.TDY = T - dT, rho, y
wm = gas.net_production_rates * gas.molecular_weights
cvm = gas.cv_mass
wsensT = (wp - wm) / (2. * dT)
cvsensT = (cvp - cvm) / (2. * dT)
jacFD11 = np.copy(jacFD[1, 1])
jacSemiFD11 = - 1. / cv * (1. / rho * (sum(wsensT * e) + sum(cvi * w)) + cvsensT * rhsGR[1])
pass_jac = max(abs(jacGR - jacFD) / (abs(jacGR) + 1.)) < 1.e-3
if not pass_jac:
print('fd:')
for i in range(ns + 1):
for j in range(ns + 1):
print(f'{jacFD[i, j]:12.2e}', end=', ')
print('')
print('gr:')
for i in range(ns + 1):
for j in range(ns + 1):
print(f'{jacGR[i, j]:12.2e}', end=', ')
print('')
print('gr-fd:')
for i in range(ns + 1):
for j in range(ns + 1):
df = (jacGR[i, j] - jacFD[i, j]) / (abs(jacFD[i, j]) + 1.0)
if df > 1.e-3:
print(f'{df:12.2e}', end=', ')
else:
print(f'{"":16}', end=', ')
print('')
print('')
if test_rhs:
return pass_rhs
if test_jac:
return pass_jac
def create_test(m, T, p, test_rhs, test_jac):
def test(self):
self.assertTrue(validate_on_mechanism(m, T, p, test_rhs, test_jac))
return test
class Accuracy(unittest.TestCase):
pass
temperature_dict = {'600K': 600., '1200K': 1200.}
pressure_dict = {'1atm': one_atm, '2atm': 2. * one_atm}
for mech in mechs:
for temperature in temperature_dict:
for pressure in pressure_dict:
rhsname = 'test_rhs_' + mech + '_' + temperature + '_' + pressure
jacname = 'test_jac_' + mech + '_' + temperature + '_' + pressure
jsdname = 'test_jac_sparse_vs_dense_' + mech + '_' + temperature + '_' + pressure
setattr(Accuracy, rhsname, create_test(mech, temperature_dict[temperature], pressure_dict[pressure],
test_rhs=True, test_jac=False))
if 'methane' not in mech: # skip methane in the finite difference Jacobian test
setattr(Accuracy, jacname, create_test(mech, temperature_dict[temperature], pressure_dict[pressure],
test_rhs=False, test_jac=True))
if __name__ == '__main__':
unittest.main()
|
182658
|
import numpy as np
import pandas as pd
# -----------------------------------
# Regression
# -----------------------------------
# rmse
from sklearn.metrics import mean_squared_error
# y_true are the true values、y_pred are the predictions
y_true = [1.0, 1.5, 2.0, 1.2, 1.8]
y_pred = [0.8, 1.5, 1.8, 1.3, 3.0]
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
print(rmse)
# 0.5532
# -----------------------------------
# Binary classification
# -----------------------------------
# Confusion matrix
from sklearn.metrics import confusion_matrix
# True values and predicted values are binary, i.e. either 0 or 1
y_true = [1, 0, 1, 1, 0, 1, 1, 0]
y_pred = [0, 0, 1, 1, 0, 0, 1, 1]
tp = np.sum((np.array(y_true) == 1) & (np.array(y_pred) == 1))
tn = np.sum((np.array(y_true) == 0) & (np.array(y_pred) == 0))
fp = np.sum((np.array(y_true) == 0) & (np.array(y_pred) == 1))
fn = np.sum((np.array(y_true) == 1) & (np.array(y_pred) == 0))
confusion_matrix1 = np.array([[tp, fp],
[fn, tn]])
print(confusion_matrix1)
# array([[3, 1],
# [2, 2]])
# Can also be created using the confusion_matrix() function from scikit-learn's metrics, but
# be aware that the arrangement of the confusion matrix elements may be different
confusion_matrix2 = confusion_matrix(y_true, y_pred)
print(confusion_matrix2)
# array([[2, 1],
# [2, 3]])
# -----------------------------------
# accuracy
from sklearn.metrics import accuracy_score
# True values and predicted values are binary, i.e. either 0 or 1
y_true = [1, 0, 1, 1, 0, 1, 1, 0]
y_pred = [0, 0, 1, 1, 0, 0, 1, 1]
accuracy = accuracy_score(y_true, y_pred)
print(accuracy)
# 0.625
# -----------------------------------
# logloss
from sklearn.metrics import log_loss
# True values are binary (0 or 1), predicted values are probabilities
y_true = [1, 0, 1, 1, 0, 1]
y_prob = [0.1, 0.2, 0.8, 0.8, 0.1, 0.3]
logloss = log_loss(y_true, y_prob)
print(logloss)
# 0.7136
# -----------------------------------
# Multi-class classification
# -----------------------------------
# multi-class logloss
from sklearn.metrics import log_loss
# True values are 3-class classifiers, predicted values are probabilities for each class
y_true = np.array([0, 2, 1, 2, 2])
y_pred = np.array([[0.68, 0.32, 0.00],
[0.00, 0.00, 1.00],
[0.60, 0.40, 0.00],
[0.00, 0.00, 1.00],
[0.28, 0.12, 0.60]])
logloss = log_loss(y_true, y_pred)
print(logloss)
# 0.3626
# -----------------------------------
# Multi-label classification
# -----------------------------------
# mean_f1, macro_f1, micro_f1
from sklearn.metrics import f1_score
# For calculating performance metric of multi-label classification, it is easier to handle the true / predicted values as binary matrices of record x class
# True values - [[1,2], [1], [1,2,3], [2,3], [3]]
y_true = np.array([[1, 1, 0],
[1, 0, 0],
[1, 1, 1],
[0, 1, 1],
[0, 0, 1]])
# Predicted values - [[1,3], [2], [1,3], [3], [3]]
y_pred = np.array([[1, 0, 1],
[0, 1, 0],
[1, 0, 1],
[0, 0, 1],
[0, 0, 1]])
# mean_f1 is the mean of the F1-scores for each record
mean_f1 = np.mean([f1_score(y_true[i, :], y_pred[i, :]) for i in range(len(y_true))])
# macro_f1 is the mean of the F1-scores for each class
n_class = 3
macro_f1 = np.mean([f1_score(y_true[:, c], y_pred[:, c]) for c in range(n_class)])
# micro-f1 is the F1-score calculate using the true/predicted values for each record-class pair
micro_f1 = f1_score(y_true.reshape(-1), y_pred.reshape(-1))
print(mean_f1, macro_f1, micro_f1)
# 0.5933, 0.5524, 0.6250
# Can also be calculated using a scikit-learn function
mean_f1 = f1_score(y_true, y_pred, average='samples')
macro_f1 = f1_score(y_true, y_pred, average='macro')
micro_f1 = f1_score(y_true, y_pred, average='micro')
# -----------------------------------
# Multi-class classification with ordered classes
# -----------------------------------
# quadratic weighted kappa
from sklearn.metrics import confusion_matrix, cohen_kappa_score
# Function for calculating quadratic weighted kappa
def quadratic_weighted_kappa(c_matrix):
numer = 0.0
denom = 0.0
for i in range(c_matrix.shape[0]):
for j in range(c_matrix.shape[1]):
n = c_matrix.shape[0]
wij = ((i - j) ** 2.0)
oij = c_matrix[i, j]
eij = c_matrix[i, :].sum() * c_matrix[:, j].sum() / c_matrix.sum()
numer += wij * oij
denom += wij * eij
return 1.0 - numer / denom
# y_true is the true class list, y_pred is the predicted class list
y_true = [1, 2, 3, 4, 3]
y_pred = [2, 2, 4, 4, 5]
# Calculate the confusion matrix
c_matrix = confusion_matrix(y_true, y_pred, labels=[1, 2, 3, 4, 5])
# Calculate quadratic weighted kappa
kappa = quadratic_weighted_kappa(c_matrix)
print(kappa)
# 0.6153
# Can also be calculated using a scikit-learn function
kappa = cohen_kappa_score(y_true, y_pred, weights='quadratic')
# -----------------------------------
# Recommendation
# -----------------------------------
# MAP@K
# K=3, with 5 records and 4 class types
K = 3
# True values for each record
y_true = [[1, 2], [1, 2], [4], [1, 2, 3, 4], [3, 4]]
# Predicted values for each record - as K=3, usually predict order of 3 records for each class
y_pred = [[1, 2, 4], [4, 1, 2], [1, 4, 3], [1, 2, 3], [1, 2, 4]]
# Function to calculate the average precision for each record
def apk(y_i_true, y_i_pred):
# Length of y_pred must be less than or equal to K, and all elements must be unique
assert (len(y_i_pred) <= K)
assert (len(np.unique(y_i_pred)) == len(y_i_pred))
sum_precision = 0.0
num_hits = 0.0
for i, p in enumerate(y_i_pred):
if p in y_i_true:
num_hits += 1
precision = num_hits / (i + 1)
sum_precision += precision
return sum_precision / min(len(y_i_true), K)
# Function for calculating MAP@K
def mapk(y_true, y_pred):
return np.mean([apk(y_i_true, y_i_pred) for y_i_true, y_i_pred in zip(y_true, y_pred)])
# Calculate MAP@K
print(mapk(y_true, y_pred))
# 0.65
# Even if the number of true values is the same, if the order is different then the score will be different
print(apk(y_true[0], y_pred[0]))
print(apk(y_true[1], y_pred[1]))
# 1.0, 0.5833
|
182659
|
from django.conf.urls.defaults import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Enable Slumber
(r'^slumber/', include('slumber.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/(.*)', admin.site.root),
)
|
182662
|
from ansys.dpf import core as dpf
from ansys.dpf.core.check_version import server_meet_version
def test_unit_mesh_cache(simple_bar):
model = dpf.Model(simple_bar)
mesh = model.metadata.meshed_region
initunit = mesh.unit
assert len(mesh._cache.cached) == 1
assert mesh.unit == initunit
mesh.unit = "cm"
assert len(mesh._cache.cached) == 0
assert mesh.unit == "cm"
assert len(mesh._cache.cached) == 1
def test_named_selections_mesh_cache(simple_bar):
model = dpf.Model(simple_bar)
mesh = model.metadata.meshed_region
init = mesh.available_named_selections
assert len(mesh._cache.cached) == 1
assert mesh.available_named_selections == init
assert len(mesh._cache.cached) == 1
ns = mesh.named_selection(init[0])
assert len(mesh._cache.cached) == 2
def test_mismatch_instances_cache(simple_bar):
model = dpf.Model(simple_bar)
model2 = dpf.Model(simple_bar)
mesh = model.metadata.meshed_region
mesh2 = model2.metadata.meshed_region
initunit = mesh.unit
assert len(mesh._cache.cached) == 1
assert len(mesh2._cache.cached) == 0
assert mesh.unit == initunit
mesh.unit = "cm"
assert len(mesh._cache.cached) == 0
mesh2.unit
assert len(mesh2._cache.cached) == 1
def test_available_results_cache(simple_bar):
model = dpf.Model(simple_bar)
res_info = model.metadata.result_info
for res in res_info:
pass
assert len(res_info._cache.cached) == len(res_info) + 1
def test_physics_type_cache(simple_bar):
ds = dpf.DataSources(simple_bar)
provider = dpf.operators.metadata.result_info_provider(data_sources=ds)
res_info = provider.outputs.result_info()
assert len(res_info._cache.cached) == 0
res_info.unit_system
assert len(res_info._cache.cached) == 1
res_info.physics_type
if server_meet_version("3.0", ds._server):
assert len(res_info._cache.cached) == 2
else:
assert len(res_info._cache.cached) == 1
def test_server_info_cache():
if not dpf.SERVER:
dpf.start_local_server()
dpf.SERVER.info
identifier = dpf.cache.MethodIdentifier("_get_server_info", (), {})
assert identifier in dpf.SERVER._base_service._cache.cached
|
182721
|
import glob
import logging
import os
from datetime import datetime
from pywatts.core.exceptions.io_exceptions import IOException
logger = logging.getLogger()
ALLOWED_FILES = ["png", "csv", "xlsx", "pickle", "tex", "json", "h5", "pt", "md"]
class FileManager:
"""
This class is responsible for managing files in pyWATTS.
It ensures that all files for one pipeline run are in the same folder.
Moreover, it appends a timestamp to the corresponding path
:param path: Root path for the results of the pipeline
:type path: str
:param time_mode: If true, then a subfolder with the current time is created
:type time_mode: bool
"""
def __init__(self, path, time_mode=True):
self.basic_path = path
self.time_mode = time_mode
if time_mode:
self.path = os.path.join(path, datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
else:
self.path = path
def _create_path_dirs(self):
"""
Creates all directories needed to write files to the directory at self.path
"""
if self.time_mode:
os.makedirs(self.path, exist_ok=False)
else:
os.makedirs(self.path, exist_ok=True)
logger.info("Created folder %s", self.path)
def get_path(self, filename: str, path=None):
"""
Returns a path to file. This path is in the folder of the corresponding pipeline run.
Moreover it is ensured that no data are overwritten.
:param filename: Name of the file to write
:type filename: str
:param path: Optional path extension to the file.
:return: The path, where the results should be stored.
"""
if not os.path.exists(self.path):
self._create_path_dirs()
if filename.split(".")[-1] not in ALLOWED_FILES:
message = f"{filename.split('.')[-1]} is not an allowed file type. Allowed types are {ALLOWED_FILES}."
logger.error(message)
raise IOException(message)
if os.path.split(filename)[0] != "":
logger.warning("Remove head of %s, since this contains path informations.", filename)
filename = os.path.split(filename)[1]
if path is not None:
path = os.path.join(self.path, path)
os.makedirs(path)
logger.info("Created folder %s", path)
else:
path = self.path
return_path = os.path.join(path, filename)
if os.path.isfile(return_path):
filename, extension = os.path.splitext(return_path)
number = len(glob.glob(f'{filename}*{extension}'))
logger.info("File %s already exists. We appended %s to the name", return_path, number + 1)
return_path = f"{filename}_{number + 1}.{extension}"
return return_path
|
182768
|
from typing import Callable
from typing import List
from typing import Optional
from typing import Sequence
from optuna._experimental import experimental
from optuna.study import Study
from optuna.trial import FrozenTrial
from optuna.visualization._pareto_front import _get_pareto_front_info
from optuna.visualization._pareto_front import _ParetoFrontInfo
from optuna.visualization.matplotlib._matplotlib_imports import _imports
if _imports.is_successful():
from optuna.visualization.matplotlib._matplotlib_imports import Axes
from optuna.visualization.matplotlib._matplotlib_imports import plt
@experimental("2.8.0")
def plot_pareto_front(
study: Study,
*,
target_names: Optional[List[str]] = None,
include_dominated_trials: bool = True,
axis_order: Optional[List[int]] = None,
constraints_func: Optional[Callable[[FrozenTrial], Sequence[float]]] = None,
targets: Optional[Callable[[FrozenTrial], Sequence[float]]] = None,
) -> "Axes":
"""Plot the Pareto front of a study.
.. seealso::
Please refer to :func:`optuna.visualization.plot_pareto_front` for an example.
Example:
The following code snippet shows how to plot the Pareto front of a study.
.. plot::
import optuna
def objective(trial):
x = trial.suggest_float("x", 0, 5)
y = trial.suggest_float("y", 0, 3)
v0 = 4 * x ** 2 + 4 * y ** 2
v1 = (x - 5) ** 2 + (y - 5) ** 2
return v0, v1
study = optuna.create_study(directions=["minimize", "minimize"])
study.optimize(objective, n_trials=50)
optuna.visualization.matplotlib.plot_pareto_front(study)
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their objective
values. ``study.n_objectives`` must be either 2 or 3 when ``targets`` is :obj:`None`.
target_names:
Objective name list used as the axis titles. If :obj:`None` is specified,
"Objective {objective_index}" is used instead. If ``targets`` is specified
for a study that does not contain any completed trial,
``target_name`` must be specified.
include_dominated_trials:
A flag to include all dominated trial's objective values.
axis_order:
A list of indices indicating the axis order. If :obj:`None` is specified,
default order is used. ``axis_order`` and ``targets`` cannot be used at the same time.
.. warning::
Deprecated in v3.0.0. This feature will be removed in the future. The removal of
this feature is currently scheduled for v5.0.0, but this schedule is subject to
change. See https://github.com/optuna/optuna/releases/tag/v3.0.0.
constraints_func:
An optional function that computes the objective constraints. It must take a
:class:`~optuna.trial.FrozenTrial` and return the constraints. The return value must
be a sequence of :obj:`float` s. A value strictly larger than 0 means that a
constraint is violated. A value equal to or smaller than 0 is considered feasible.
This specification is the same as in, for example,
:class:`~optuna.integration.NSGAIISampler`.
If given, trials are classified into three categories: feasible and best, feasible but
non-best, and infeasible. Categories are shown in different colors. Here, whether a
trial is best (on Pareto front) or not is determined ignoring all infeasible trials.
targets:
A function that returns a tuple of target values to display.
The argument to this function is :class:`~optuna.trial.FrozenTrial`.
``targets`` must be :obj:`None` or return 2 or 3 values.
``axis_order`` and ``targets`` cannot be used at the same time.
If ``study.n_objectives`` is neither 2 nor 3, ``targets`` must be specified.
.. note::
Added in v3.0.0 as an experimental feature. The interface may change in newer
versions without prior notice.
See https://github.com/optuna/optuna/releases/tag/v3.0.0.
Returns:
A :class:`matplotlib.axes.Axes` object.
"""
_imports.check()
info = _get_pareto_front_info(
study, target_names, include_dominated_trials, axis_order, constraints_func, targets
)
if info.n_targets == 2:
return _get_pareto_front_2d(info)
elif info.n_targets == 3:
return _get_pareto_front_3d(info)
else:
assert False, "Must not reach here"
def _get_pareto_front_2d(info: _ParetoFrontInfo) -> "Axes":
# Set up the graph style.
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
_, ax = plt.subplots()
ax.set_title("Pareto-front Plot")
cmap = plt.get_cmap("tab10") # Use tab10 colormap for similar outputs to plotly.
ax.set_xlabel(info.target_names[info.axis_order[0]])
ax.set_ylabel(info.target_names[info.axis_order[1]])
trial_label: str = "Trial"
if (
info.infeasible_trials_with_values is not None
and len(info.infeasible_trials_with_values) > 0
):
ax.scatter(
x=[values[info.axis_order[0]] for _, values in info.infeasible_trials_with_values],
y=[values[info.axis_order[1]] for _, values in info.infeasible_trials_with_values],
color="#cccccc",
label="Infeasible Trial",
)
trial_label = "Feasible Trial"
if info.non_best_trials_with_values is not None and len(info.non_best_trials_with_values) > 0:
ax.scatter(
x=[values[info.axis_order[0]] for _, values in info.non_best_trials_with_values],
y=[values[info.axis_order[1]] for _, values in info.non_best_trials_with_values],
color=cmap(0),
label=trial_label,
)
if info.best_trials_with_values is not None and len(info.best_trials_with_values) > 0:
ax.scatter(
x=[values[info.axis_order[0]] for _, values in info.best_trials_with_values],
y=[values[info.axis_order[1]] for _, values in info.best_trials_with_values],
color=cmap(3),
label="Best Trial",
)
if info.non_best_trials_with_values is not None and ax.has_data():
ax.legend()
return ax
def _get_pareto_front_3d(info: _ParetoFrontInfo) -> "Axes":
# Set up the graph style.
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.set_title("Pareto-front Plot")
cmap = plt.get_cmap("tab10") # Use tab10 colormap for similar outputs to plotly.
ax.set_xlabel(info.target_names[info.axis_order[0]])
ax.set_ylabel(info.target_names[info.axis_order[1]])
ax.set_zlabel(info.target_names[info.axis_order[2]])
trial_label: str = "Trial"
if (
info.infeasible_trials_with_values is not None
and len(info.infeasible_trials_with_values) > 0
):
ax.scatter(
xs=[values[info.axis_order[0]] for _, values in info.infeasible_trials_with_values],
ys=[values[info.axis_order[1]] for _, values in info.infeasible_trials_with_values],
zs=[values[info.axis_order[2]] for _, values in info.infeasible_trials_with_values],
color="#cccccc",
label="Infeasible Trial",
)
trial_label = "Feasible Trial"
if info.non_best_trials_with_values is not None and len(info.non_best_trials_with_values) > 0:
ax.scatter(
xs=[values[info.axis_order[0]] for _, values in info.non_best_trials_with_values],
ys=[values[info.axis_order[1]] for _, values in info.non_best_trials_with_values],
zs=[values[info.axis_order[2]] for _, values in info.non_best_trials_with_values],
color=cmap(0),
label=trial_label,
)
if info.best_trials_with_values is not None and len(info.best_trials_with_values):
ax.scatter(
xs=[values[info.axis_order[0]] for _, values in info.best_trials_with_values],
ys=[values[info.axis_order[1]] for _, values in info.best_trials_with_values],
zs=[values[info.axis_order[2]] for _, values in info.best_trials_with_values],
color=cmap(3),
label="Best Trial",
)
if info.non_best_trials_with_values is not None and ax.has_data():
ax.legend()
return ax
|
182809
|
from tqdm import tqdm
from bol.utils.helper_functions import read_wav_file, convert_to_tensor
from .._model import BolModel
from bol.data import Wav2Vec2TsDataLoader
from bol.utils.resampler import resample_using_sox
class Wav2Vec2TS(BolModel):
def __init__(self, model_path, use_cuda_if_available):
super().__init__(model_path, "False")
self.load_jit_model()
def predict_for_files(self, file_path, verbose, dataloader=False, convert=False):
preds = []
filenames = []
if verbose:
disable = False
else:
disable = True
if dataloader:
dataloader_obj = Wav2Vec2TsDataLoader(batch_size = 1, num_workers = 1 ,file_data_path = file_path, convert=convert)
dataloader = dataloader_obj.get_file_data_loader()
for batch in tqdm(dataloader, disable=disable):
wav = batch[0].squeeze(1)
file = batch[1]
pred = self._model(wav)
preds.append(pred)
filenames.extend(file)
else:
for file in tqdm(file_path, disable=disable):
wav, sample_rate = read_wav_file(file, 'sf')
if sample_rate != 16000 and convert:
wav = resample_using_sox(wav, input_type='array', output_type='array', sample_rate_in=sample_rate)
wav = convert_to_tensor(wav)
pred = self._model(wav)
preds.append(pred)
filenames.append(file)
return preds, filenames
def predict(
self,
file_path,
with_lm=False,
return_filenames=True,
apply_vad=False,
verbose=0,
convert=False
):
if type(file_path) == str:
file_path = [file_path]
preds = []
filenames = []
if apply_vad:
for file in file_path:
files_split_from_vad = self.preprocess_vad(file)
preds_local, filenames_local = self.predict_for_files(
files_split_from_vad, verbose=verbose, convert=False
)
predictions = self.postprocess_vad(filenames_local, preds_local)
preds.append(predictions)
filenames.append(file)
else:
preds, filenames = self.predict_for_files(file_path, verbose=verbose, convert=convert)
predictions = dict(zip(filenames, preds))
if return_filenames:
final_preds = [
{"file": key, "transcription": value}
for key, value in predictions.items()
]
else:
final_preds = preds
return final_preds
|
182812
|
import pytest
from abnf.parser import *
def test_empty_literal():
parser = Literal('')
assert parser
def test_literal():
parser = Literal('moof')
source = 'moof'
node, start = parser.parse(source, 0)
assert node.value == 'moof'
@pytest.mark.parametrize("value", [None, 47, ('a', 'b', 'c'), (1, 2)])
def test_literal_bad_value(value):
with pytest.raises(TypeError):
Literal(value)
def test_literal_range_fail():
parser = Literal(('a', 'b'))
with pytest.raises(ParseError):
parser.parse('c', 0)
def test_literal_range_out_of_bounds():
parser = Literal(('a', 'b'))
with pytest.raises(ParseError):
parser.parse('a', 1)
def test_empty_literal_out_of_bounds():
parser = Literal('')
src = 'a'
with pytest.raises(ParseError):
parser.parse(src, 1)
@pytest.mark.parametrize('value, expected', [('foo', r"Literal('foo')"), ('\r', r"Literal('\x0d')")])
def test_literal_str(value, expected):
parser = Literal(value)
assert str(parser) == expected
@pytest.mark.parametrize('value, src', [('A', 'A'), ('A', 'a'), ('a', 'A'), ('a', 'a')])
def test_literal_case_insensitive(value, src):
parser = Literal(value)
node, start = parser.parse(src, 0)
assert node.value == src
@pytest.mark.parametrize("src", ['moof', 'MOOF', 'mOOf', 'mOoF'])
def test_char_val(src):
node, start = ABNFGrammarRule('char-val').parse('"moof"', 0)
visitor = CharValNodeVisitor()
parser = visitor.visit_char_val(node)
char_node, start = parser.parse(src, 0)
assert char_node and char_node.value == src
@pytest.mark.parametrize("src", ['moof', 'MOOF', 'mOOf', 'mOoF'])
def test_char_val_case_insensitive(src):
node, start = ABNFGrammarRule('char-val').parse('%i"moof"', 0)
visitor = CharValNodeVisitor()
parser = visitor.visit_char_val(node)
char_node, start = parser.parse(src, 0)
assert char_node and char_node.value.casefold() == 'moof'
def test_char_val_case_sensitive():
node, start = ABNFGrammarRule('char-val').parse('%s"MOOF"', 0)
visitor = CharValNodeVisitor()
parser = visitor.visit_char_val(node)
src = 'MOOF'
char_node, start = parser.parse(src, 0)
assert char_node and char_node.value == src
@pytest.mark.parametrize("src", ['MOOF', 'mOOf', 'mOoF'])
def test_char_val_case_sensitive_fail(src):
node, start = ABNFGrammarRule('char-val').parse('%s"moof"', 0)
visitor = CharValNodeVisitor()
parser = visitor.visit_char_val(node)
with pytest.raises(ParseError):
parser.parse(src, 0)
def test_bin_val():
src = "b01111000"
node = ABNFGrammarRule('bin-val').parse_all(src)
visitor = NumValVisitor()
parser = visitor.visit(node)
assert parser.value == 'x'
@pytest.mark.parametrize("src", ['A', 'B', 'Z'])
def test_literal_range(src):
parser = Literal(('\x41', '\x5A'))
node, start = parser.parse(src, 0)
assert node and node.value == src
@pytest.mark.parametrize("src", ['foo', 'bar'])
def test_alternation(src):
parser = Alternation(Literal('foo'), Literal('bar'))
node, start = parser.parse(src, 0)
assert node.value == src
# test repetition and match of empty elements.
@pytest.mark.parametrize("src, expected", [
('1*43', (1, 43)),
('1*', (1, None)),
('*43', (0, 43)),
('43', (43, 43)),
])
def test_rule_repeat(src, expected):
node, start = ABNFGrammarRule('repeat').parse(src, 0)
visitor = ABNFGrammarNodeVisitor(ABNFGrammarRule)
parser = visitor.visit_repeat(node)
assert (parser.min, parser.max) == expected
def test_repetition():
parser = Repetition(Repeat(1, 2), Literal('a'))
node, start = parser.parse('aa', 0)
assert [x for x in flatten(node)] == [LiteralNode('a', x, 1) for x in range(0, 2)]
def test_repetition_str():
parser = Repetition(Repeat(1, 2), Literal('a'))
assert str(parser) == "Repetition(Repeat(1, 2), Literal('a'))"
# concatenation has higher precedence than alternation; the next few tests confirm this.
@pytest.mark.parametrize("src", ['bc', 'a'])
def test_operator_precedence(src):
grammar_src = '"a" / "b" "c"'
node, start = ABNFGrammarRule('alternation').parse(grammar_src, 0)
visitor = ABNFGrammarNodeVisitor(ABNFGrammarRule)
parser = visitor.visit_alternation(node)
node, start = parser.parse(src, 0)
assert ''.join(x.value for x in flatten(node)) == src
@pytest.mark.parametrize("src", ['ac'])
def test_operator_precedence_1(src):
grammar_src = '"a" / "b" "c"'
node, start = ABNFGrammarRule('alternation').parse(grammar_src, 0)
visitor = ABNFGrammarNodeVisitor(ABNFGrammarRule)
parser = visitor.visit_alternation(node)
node, start = parser.parse(src, 0)
assert node.value == 'a'
@pytest.mark.parametrize("src", ['ac', 'bc'])
def test_operator_precedence_2(src):
grammar_src = '("a" / "b") "c"'
node, start = ABNFGrammarRule('concatenation').parse(grammar_src, 0)
visitor = ABNFGrammarNodeVisitor(ABNFGrammarRule)
parser = visitor.visit_concatenation(node)
node, start = parser.parse(src, 0)
assert ''.join(x.value for x in node) == src
def test_node_str():
node_name = 'foo'
node_children = []
node = Node(name=node_name, *node_children)
assert str(node) == 'Node(name=%s, children=%s)' % (node_name, str(node_children))
def test_node_eq():
assert Node('foo') == Node('foo')
def test_literal_node_str():
# test just exercises Node.__str__.
node = LiteralNode('a', 1, 2)
assert str(node)
def test_literal_node_children():
node = LiteralNode('', 0, 0)
assert node.children == []
def test_Alternation_str():
parser = Alternation(Literal('foo'), Literal('bar'))
assert str(parser) == "Alternation(Literal('foo'), Literal('bar'))"
def test_Concatenation_str():
parser = Concatenation(Literal('foo'), Literal('bar'))
assert str(parser) == "Concatenation(Literal('foo'), Literal('bar'))"
def test_option_str():
parser = Option(Alternation(Literal('foo')))
assert str(parser) == "Option(Alternation(Literal('foo')))"
def test_rule_undefined():
with pytest.raises(GrammarError):
Rule('undefined').parse('x', 0)
def test_rule_str():
assert str(Rule('ALPHA')) == "Rule('ALPHA')"
@pytest.mark.parametrize("src", ['a', 'b'])
def test_rule_def_alternation(src):
class TestRule(Rule):
pass
rulelist = ['moof = "a"', 'moof =/ "b"']
for rule in rulelist:
TestRule.create(rule)
node, start = TestRule('moof').parse(src, 0)
assert node and node.value == src
class XRule(Rule):
pass
# an XRule object is created, without definition.
XRule('foo')
def test_rule_rules():
assert XRule.rules() == [XRule('foo')]
@pytest.mark.parametrize("name, rule", [('foo', XRule('foo')), ('bar', None)])
def test_rule_get(name, rule):
assert XRule.get(name) is rule
def test_parse_all_pass():
src = 'moof'
node = ABNFGrammarRule('rulename').parse_all(src)
assert node.value == src
def test_parse_all_fail():
src = 'rule name'
with pytest.raises(ParseError):
ABNFGrammarRule('rulename').parse_all(src)
@pytest.mark.parametrize("num_val", ['%x2227', '%d8743', '%b0010001000100111'])
def test_unicode_num_val(num_val):
# https://github.com/declaresub/abnf/issues/1
class TestRule(Rule):
pass
value = '∧'
rule = 'combine = %s' % num_val
combine = TestRule.create(rule)
node = combine.parse_all(value)
assert node.value == value
def test_unicode_hex_val_concat():
class TestRule(Rule):
pass
value = 'Я́блоко'
rule = 'apple = %x42f.301.431.43b.43e.43a.43e'
combine = TestRule.create(rule)
print(str(combine.definition))
node = combine.parse_all(value)
assert node.value == value
def test_from_file(tmp_path):
grammar = ['foo = "foo"\r\n', 'bar = "bar"\r\n']
path = tmp_path / 'test_grammar.abnf'
path.write_text(''.join(grammar))
class FromFileRule(Rule):
pass
FromFileRule.from_file(path)
@pytest.mark.parametrize("first_match, value", [(True, 'foo'), (False, 'foobar')])
def test_alternation_first_match(first_match, value):
src = 'foobar'
parser = Alternation(Literal('foo'), Literal('foobar'), first_match=first_match)
node, start = parser.parse(src, 0)
assert node.value == value
def test_alternation_first_match1():
src = 'bar'
parser = Alternation(Literal('foo'), Literal('bar'), first_match=True)
node, start = parser.parse(src, 0)
assert node.value == src
def test_alternation_first_match_fail():
src = 'moof'
parser = Alternation(Literal('foo'), Literal('bar'), first_match=True)
with pytest.raises(ParseError):
parser.parse(src, 0)
def test_exclude_rule_identifier():
class ExcludeRule(Rule):
pass
ExcludeRule.create('foo = %x66.6f.6f')
ExcludeRule.create('keyword = foo')
ExcludeRule.create('identifier = ALPHA *(ALPHA / DIGIT )')
keyword = ExcludeRule('keyword')
identifier = ExcludeRule('identifier')
identifier.exclude_rule(keyword)
src = 'foo1'
node, start = identifier.parse(src, 0)
assert node.value == src and start == 4
def test_exclude_rule_keyword():
class ExcludeRule(Rule):
pass
ExcludeRule.create('foo = %x66.6f.6f')
ExcludeRule.create('keyword = foo')
ExcludeRule.create('identifier = ALPHA *(ALPHA / DIGIT )')
keyword = ExcludeRule('keyword')
identifier = ExcludeRule('identifier')
identifier.exclude_rule(keyword)
src = 'foo'
with pytest.raises(ParseError):
identifier.parse(src, 0)
@pytest.mark.parametrize("args", [(None, 1), (Literal('a'), None)])
def test_parseerror_bad_args(args):
with pytest.raises(ValueError):
ParseError(*args)
def test_parseerror_str():
# I'm not checking the output, just exercising ParseError.__str__ .
assert str(ParseError(Literal('a'), 1))
def test_prose_val():
class TestRule(Rule):
pass
rule = 'test-prose-val = <blah blah>'
with pytest.raises(GrammarError):
TestRule.create(rule)
|
182814
|
import time
import os
import timeit
import torch
import argparse
from flextensor.examples import FUNC_TABLE
from flextensor.test import test_graph_schedule_cpu_general_dx
from flextensor.train import Entity, train_op_schedule_cpu_general_dx
def run(batch_size, height, width, channel, kernel_size, output_channel, stride, padding,
model_path, epoch=5, sample_size=16, number=100, test=False):
entities = []
func = FUNC_TABLE["conv2d_channel_batch"].func
args = (batch_size, height, width, channel, kernel_size, kernel_size, output_channel, stride, padding)
entities.append(Entity("conv2d_channel_batch", args))
model_path = os.path.abspath(model_path)
if not test:
beg = time.time()
train_op_schedule_cpu_general_dx(entities, epoch, sample_size, model_path)
end = time.time()
print("{}({}):".format("conv2d_channel_batch", args))
print("train done! use {}ms".format((end - beg) * 1e3))
test_graph_schedule_cpu_general_dx(func, args, model_path, number=number)
def pytorch_baseliine(batch_size, height, width, channel, kernel_size, output_channel, stride, padding, number=100):
# conv = torch.nn.Conv2d(channel, output_channel, (kernel_size, kernel_size), (stride, stride), (padding, padding), bias=False)
# conv = torch.nn.functional.conv2d
# A = torch.rand([batch_size, channel, height, width])
# W = torch.rand([output_channel, channel, kernel_size, kernel_size])
# # warm-up
# conv(A, W)
# beg = time.time()
# for i in range(number):
# conv(A, W)
# end = time.time()
run_time = timeit.timeit(setup= 'import torch\n'
'conv = torch.nn.functional.conv2d\n'
'A = torch.rand([' + str(batch_size) + ', ' + str(channel) + ', ' + str(height) + ', ' + str(width) + '])\n'
'W = torch.rand([' + str(output_channel) + ', ' + str(channel) + ', ' + str(kernel_size) + ', ' + str(kernel_size) + '])\n'
'conv(A, W)\n',
stmt='conv(A, W)',
number=number)
print("pytorch use {}ms".format(run_time / number * 1e3))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--train", help="train the model", action="store_true")
parser.add_argument("-p", "--pytorch", help="run pytorch baseline", action="store_true")
parser.add_argument("-a", "--flextensor", help="run auto-scheduler", action="store_true")
parser.add_argument("-n", "--number", help="number of tests", type=int, default=100)
parser.add_argument("-f", "--model_file_path", type=str, default="../logs/test_model.pkl")
parser.add_argument("--params", help="N,H,W,C,k,K,stride,padding", type=str, default="1,14,14,512,3,512,1,1")
parser.add_argument("--epoch", type=int, default=5)
parser.add_argument("--sample", type=int, default=16)
args = parser.parse_args()
test = not args.train
use_torch = args.pytorch
use_auto = args.flextensor
try:
params = [int(x) for x in args.params.split(",")]
batch_size, height, width, channel, kernel_size, output_channel, stride, padding = params
if use_torch:
pytorch_baseliine(batch_size, height, width, channel, kernel_size, output_channel, stride, padding, args.number)
if use_auto:
run(batch_size, height, width, channel, kernel_size, output_channel, stride, padding,
args.model_file_path, args.epoch, args.sample, args.number, test)
except Exception as e:
raise ValueError("Bad parameters, please refer to usage")
# arg_lst = [
# (1, 7, 7, 1024, 3, 3, 1024, 1, 1),
# # (8, 7, 7, 1024, 3, 3, 1024, 1, 1),
# # (64, 7, 7, 1024, 3, 3, 1024, 1, 1),
# # (256, 7, 7, 1024, 3, 3, 1024, 1, 1),
# (1, 14, 14, 1024, 1, 1, 512, 1, 0),
# (1, 28, 28, 256, 3, 3, 512, 1, 1),
# (1, 28, 28, 512, 1, 1, 256, 1, 0),
# (1, 56, 56, 128, 3, 3, 256, 1, 1),
# (1, 56, 56, 192, 1, 1, 128, 1, 0),
# (1, 112, 112, 64, 3, 3, 192, 1, 1),
# (1, 448, 448, 3, 7, 7, 64, 2, 3)
# ]
# names = [
# "yolo24_b1",
# # "yolo24_b8",
# # "yolo24_b64",
# # "yolo24_b256",
# "yolo19_b1",
# "yolo10_b1",
# "yolo7_b1",
# "yolo4_b1",
# "yolo3_b1",
# "yolo2_b1",
# "yolo1_b1"
# ]
# for i in range(len(arg_lst)):
# # model_path = "opt_conv2d_nchw_" + names[i] + "_cpu.pkl"
# # entities = []
# # args = arg_lst[i]
# # entities.append(Entity("conv2d_nchw", args))
# # model_path = os.path.abspath(model_path)
# # train_op_schedule_cpu_general_dx(entities, 20, 50, model_path,
# # logfile="process_conv2d_nchw_" + names[i] + "_cpu.txt", device="cuda:2")
# N, H, W, CI, k, _, CO, S, P = arg_lst[i]
# print(names[i], pytorch_baseliine(N, H, W, CI, k, CO, S, P, 10))
|
182851
|
import os
import shlex
version_dict = {
'0': 'v_1_0_1',
'1': 'v_1_0_1',
'20200603104139': '20200603104139',
'20201002000616': '20201002000616',
'20210202145952': '20210202145952',
'20210260056859': 'latest'
}
latest = "latest"
def make_goose_template(conn_str, command):
return ' '.join(shlex.quote(arg) for arg in [
"goose",
"postgres",
f"{conn_str}",
f"{command}"
])
path = os.path.dirname(__file__) + "/../migration_files"
def make_goose_migration_template(conn_str, command):
return ' '.join(shlex.quote(arg) for arg in [
"goose",
"-dir",
path,
"postgres",
f"{conn_str}",
f"{command}"
])
|
182853
|
import xml.etree.ElementTree as ET
import math
import yaml
import sys
if len(sys.argv) != 2:
print("Only one agrument expected: visit session file")
exit(1)
session_file = str(sys.argv[1])
if 'gui' in session_file:
print('Warning: gui detected in session file name. Visit saves two files '
'one for the gui and the other one. I need the other one.')
tree = ET.parse(session_file)
root = tree.getroot()
indent = 0
def printRecur(root):
"""Recursively prints the tree."""
global indent
print (' '*indent + '%s: %s' % (root.tag.title(), root.attrib.get('name', root.text)))
name = str(root.attrib.get('name', root.text))
print (' '*indent + '%s' % name)
indent += 4
for elem in root.getchildren():
printRecur(elem)
indent -= 4
view = 0
def find_view(root):
name = str(root.attrib.get('name', root.text))
if name == "View3DAttributes":
global view
view = root
for elem in root:
find_view(elem)
if type(view) is not type(int):
print("View3DAttributes not found")
exit(1)
#printRecur(root)
find_view(root)
view_d = {}
for field in view.iter('Field'):
name = field.attrib.get('name')
length = field.attrib.get('length')
is_double = 'double' in field.attrib.get('type')
if is_double:
values = []
tvals = field.text.split()
for v in tvals:
values.append(float(v))
view_d[name] = values
vn = view_d["viewNormal"];
mag = math.sqrt(vn[0]*vn[0] + vn[1]*vn[1] + vn[2]*vn[2])
vn[0] = vn[0] / mag;
vn[1] = vn[1] / mag;
vn[2] = vn[2] / mag;
ps = view_d["parallelScale"][0]
va = view_d["viewAngle"][0]
focus = view_d["focus"]
camera = {}
camera["camera"] = {}
camera["camera"]["position"] = [0,0,0]
camera["camera"]["position"][0] = vn[0] * (ps / math.tan(math.pi * va / 360.0)) + focus[0]
camera["camera"]["position"][1] = vn[1] * (ps / math.tan(math.pi * va / 360.0)) + focus[1]
camera["camera"]["position"][2] = vn[2] * (ps / math.tan(math.pi * va / 360.0)) + focus[2]
camera["camera"]["look_at"] = focus
camera["camera"]["up"] = view_d["viewUp"]
camera["camera"]["zoom"] = view_d["imageZoom"]
camera["camera"]["fov"] = va
print(yaml.dump(camera, default_flow_style=None))
|
182874
|
import io
import pcpp
class FortranPreprocessor(pcpp.Preprocessor):
def __init__(self):
super().__init__()
self.add_path('.')
def parse_to_string(self, text, source):
with io.StringIO() as f:
self.parse(text, source=source)
self.write(f)
return f.getvalue()
|
182876
|
from tartiflette.language.validators.query.rule import (
June2018ReleaseValidationRule,
)
from tartiflette.language.validators.query.utils import find_nodes_by_name
from tartiflette.utils.errors import graphql_error_from_nodes
class ArgumentUniqueness(June2018ReleaseValidationRule):
"""
This validator validates, for a given directive or field arguments list,
that arguments are provided only once.
More details @ https://graphql.github.io/graphql-spec/June2018/#sec-Argument-Uniqueness
"""
RULE_NAME = "argument-uniqueness"
RULE_LINK = "https://graphql.github.io/graphql-spec/June2018/#sec-Argument-Uniqueness"
RULE_NUMBER = "5.4.2"
def validate(self, arguments, path, **__):
errors = []
already_tested = []
for argument in arguments:
if argument.name.value in already_tested:
continue
with_same_name = find_nodes_by_name(arguments, argument.name.value)
if len(with_same_name) > 1:
already_tested.append(argument.name.value)
errors.append(
graphql_error_from_nodes(
message=f"Can't have multiple arguments named < {argument.name.value} >.",
path=path,
nodes=with_same_name,
extensions=self._extensions,
)
)
return errors
|
182893
|
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Variable
import time
import math
import random
import ray
import copy, sys
from functools import partial
# Quaternion utility functions. Due to python relative imports and directory structure can't cleanly use cassie.quaternion_function
def inverse_quaternion(quaternion):
result = np.copy(quaternion)
result[1:4] = -result[1:4]
return result
def quaternion_product(q1, q2):
result = np.zeros(4)
result[0] = q1[0]*q2[0]-q1[1]*q2[1]-q1[2]*q2[2]-q1[3]*q2[3]
result[1] = q1[0]*q2[1]+q2[0]*q1[1]+q1[2]*q2[3]-q1[3]*q2[2]
result[2] = q1[0]*q2[2]-q1[1]*q2[3]+q1[2]*q2[0]+q1[3]*q2[1]
result[3] = q1[0]*q2[3]+q1[1]*q2[2]-q1[2]*q2[1]+q1[3]*q2[0]
return result
def rotate_by_quaternion(vector, quaternion):
q1 = np.copy(quaternion)
q2 = np.zeros(4)
q2[1:4] = np.copy(vector)
q3 = inverse_quaternion(quaternion)
q = quaternion_product(q2, q3)
q = quaternion_product(q1, q)
result = q[1:4]
return result
def euler2quat(z=0, y=0, x=0):
z = z/2.0
y = y/2.0
x = x/2.0
cz = math.cos(z)
sz = math.sin(z)
cy = math.cos(y)
sy = math.sin(y)
cx = math.cos(x)
sx = math.sin(x)
result = np.array([
cx*cy*cz - sx*sy*sz,
cx*sy*sz + cy*cz*sx,
cx*cz*sy - sx*cy*sz,
cx*cy*sz + sx*cz*sy])
if result[0] < 0:
result = -result
return result
@ray.remote
class eval_worker(object):
def __init__(self, id_num, env_fn, policy, num_steps, max_speed, min_speed):
self.id_num = id_num
self.cassie_env = env_fn()
self.policy = copy.deepcopy(policy)
self.num_steps = num_steps
self.max_speed = max_speed
self.min_speed = min_speed
@torch.no_grad()
def run_test(self, speed_schedule, orient_schedule):
start_t = time.time()
save_data = np.zeros(6)
state = torch.Tensor(self.cassie_env.reset_for_test(full_reset=True))
self.cassie_env.speed = 0.5
self.cassie_env.side_speed = 0
self.cassie_env.phase_add = 1
num_commands = len(orient_schedule)
count = 0
orient_ind = 0
speed_ind = 1
orient_add = 0
passed = 1
while not (speed_ind == num_commands and orient_ind == num_commands and count == self.num_steps) and passed:
# Update speed command
if count == self.num_steps:
count = 0
self.cassie_env.speed = speed_schedule[speed_ind]
self.cassie_env.speed = np.clip(self.cassie_env.speed, self.min_speed, self.max_speed)
if self.cassie_env.speed > 1.4:
self.cassie_env.phase_add = 1.5
else:
self.cassie_env.phase_add = 1
speed_ind += 1
# Update orientation command
elif count == self.num_steps // 2:
orient_add += orient_schedule[orient_ind]
orient_ind += 1
# Update orientation
# TODO: Make update orientation function in each env to this will work with an abitrary environment
quaternion = euler2quat(z=orient_add, y=0, x=0)
iquaternion = inverse_quaternion(quaternion)
curr_orient = state[1:5]
curr_transvel = state[15:18]
new_orient = quaternion_product(iquaternion, curr_orient)
if new_orient[0] < 0:
new_orient = -new_orient
new_translationalVelocity = rotate_by_quaternion(curr_transvel, iquaternion)
state[1:5] = torch.FloatTensor(new_orient)
state[15:18] = torch.FloatTensor(new_translationalVelocity)
# Get action
action = self.policy(state, True)
action = action.data.numpy()
state, reward, done, _ = self.cassie_env.step(action)
state = torch.Tensor(state)
if self.cassie_env.sim.qpos()[2] < 0.4:
passed = 0
count += 1
if passed:
save_data[0] = passed
save_data[1] = -1
else:
save_data[:] = np.array([passed, count//(self.num_steps//2), self.cassie_env.speed, orient_add,\
self.cassie_env.speed-speed_schedule[max(0, speed_ind-2)], orient_schedule[orient_ind-1]])
return self.id_num, save_data, time.time() - start_t
def eval_commands_multi(env_fn, policy, num_steps=200, num_commands=4, max_speed=3, min_speed=0, num_iters=4, num_procs=4, filename="test_eval_command.npy"):
start_t1 = time.time()
ray.init(num_cpus=num_procs)
total_data = np.zeros((num_iters, 6))
# Make all args
all_speed_schedule = np.zeros((num_iters, num_commands))
all_orient_schedule = np.zeros((num_iters, num_commands))
for i in range(num_iters):
all_speed_schedule[i, 0] = 0.5
for j in range(num_commands-1):
speed_add = random.choice([-1, 1])*random.uniform(0.4, 1.3)
if all_speed_schedule[i, j] + speed_add < min_speed or all_speed_schedule[i, j] + speed_add > max_speed:
speed_add *= -1
all_speed_schedule[i, j+1] = all_speed_schedule[i, j] + speed_add
orient_schedule = np.random.uniform(np.pi/6, np.pi/3, num_commands)
orient_sign = np.random.choice((-1, 1), num_commands)
all_orient_schedule[i, :] = orient_schedule * orient_sign
# Make and start eval workers
workers = [eval_worker.remote(i, env_fn, policy, num_steps, max_speed, min_speed) for i in range(num_procs)]
eval_ids = [workers[i].run_test.remote(all_speed_schedule[i, :], all_orient_schedule[i, :]) for i in range(num_procs)]
print("started workers")
curr_arg_ind = num_procs
curr_data_ind = 0
bar_width = 30
sys.stdout.write(progress_bar(0, num_iters, bar_width, 0))
sys.stdout.flush()
eval_start = time.time()
while curr_arg_ind < num_iters:
done_id = ray.wait(eval_ids, num_returns=1, timeout=None)[0][0]
worker_id, data, eval_time = ray.get(done_id)
total_data[curr_data_ind, :] = data
eval_ids.remove(done_id)
eval_ids.append(workers[worker_id].run_test.remote(all_speed_schedule[curr_arg_ind, :], all_orient_schedule[curr_arg_ind, :]))
curr_arg_ind += 1
curr_data_ind += 1
sys.stdout.write("\r{}".format(progress_bar(curr_data_ind, num_iters, bar_width, (time.time()-eval_start))))
sys.stdout.flush()
result = ray.get(eval_ids)
for ret_tuple in result:
total_data[curr_data_ind, :] = ret_tuple[1]
curr_data_ind += 1
sys.stdout.write("\r{}".format(progress_bar(num_iters, num_iters, bar_width, time.time()-eval_start)))
print("")
print("Got all results")
np.save(filename, total_data)
print("total time: ", time.time() - start_t1)
ray.shutdown()
def progress_bar(curr_ind, total_ind, bar_width, elapsed_time):
num_bar = int((curr_ind / total_ind) // (1/bar_width))
num_space = int(bar_width - num_bar)
outstring = "[{}]".format("-"*num_bar + " "*num_space)
outstring += " {:.2f}% complete".format(curr_ind / total_ind * 100)
if elapsed_time == 0:
time_left = "N/A"
outstring += " {:.1f} elapsed, {} left".format(elapsed_time, time_left)
else:
time_left = elapsed_time/curr_ind*(total_ind-curr_ind)
outstring += " {:.1f} elapsed, {:.1f} left".format(elapsed_time, time_left)
return outstring
def report_stats(filename):
data = np.load(filename)
num_iters = data.shape[0]
pass_rate = np.sum(data[:, 0]) / num_iters
success_inds = np.where(data[:, 0] == 1)[0]
# data[success_inds, 1] = -1
speed_fail_inds = np.where(data[:, 1] == 0)[0]
orient_fail_inds = np.where(data[:, 1] == 1)[0]
print("pass rate: ", pass_rate)
# print("speed failure: ", data[speed_fail_inds, 4])
# print("orient failure: ", data[orient_fail_inds, 5])
speed_change = data[speed_fail_inds, 4]
orient_change = data[orient_fail_inds, 5]
speed_neg_inds = np.where(speed_change < 0)
speed_pos_inds = np.where(speed_change > 0)
orient_neg_inds = np.where(orient_change < 0)
orient_pos_inds = np.where(orient_change > 0)
print("Number of speed failures: ", len(speed_fail_inds))
print("Number of orient failures: ", len(orient_fail_inds))
if len(speed_fail_inds) == 0:
avg_pos_speed = "N/A"
avg_neg_speed = "N/A"
else:
avg_pos_speed = np.mean(speed_change[speed_pos_inds])
avg_neg_speed = np.mean(speed_change[speed_neg_inds])
if len(orient_fail_inds) == 0:
avg_pos_orient = "N/A"
avg_neg_orient = "N/A"
else:
avg_pos_orient = np.mean(orient_change[orient_pos_inds])
avg_neg_orient = np.mean(orient_change[orient_neg_inds])
print("avg pos speed failure: ", avg_pos_speed)
print("avg neg speed failure: ", avg_neg_speed)
print("avg pos orient failure: ", avg_pos_orient)
print("avg neg orient failure: ", avg_neg_orient)
@torch.no_grad()
def eval_commands(cassie_env, policy, num_steps=200, num_commands=2, max_speed=3, min_speed=0, num_iters=1):
# save_data will hold whether passed or not (1 or 0), whether orient command or speed command caused failure (1, 0),
# speed and orient command at failure, and speed and orient change at failure
save_data = np.zeros((num_iters, 6))
start_t = time.time()
for j in range(num_iters):
state = torch.Tensor(cassie_env.reset_for_test())
cassie_env.speed = 0.5
cassie_env.side_speed = 0
cassie_env.phase_add = 1
speed_schedule = [0.5]
for i in range(num_commands-1):
speed_add = random.choice([-1, 1])*random.uniform(0.4, 1.3)
if speed_schedule[i] + speed_add < min_speed or speed_schedule[i] + speed_add > max_speed:
speed_add *= -1
speed_schedule.append(speed_schedule[i] + speed_add)
orient_schedule = np.random.uniform(np.pi/6, np.pi/3, num_commands)
orient_sign = np.random.choice((-1, 1), num_commands)
orient_schedule = orient_schedule * orient_sign
# print("Speed schedule: ", speed_schedule)
# print("Orient schedule: ", orient_schedule)
count = 0
orient_ind = 0
speed_ind = 1
orient_add = 0
passed = 1
while not (speed_ind == num_commands and orient_ind == num_commands and count == num_steps) and passed:
if count == num_steps:
count = 0
cassie_env.speed = speed_schedule[speed_ind]
cassie_env.speed = np.clip(cassie_env.speed, min_speed, max_speed)
if cassie_env.speed > 1.4:
cassie_env.phase_add = 1.5
else:
cassie_env.phase_add = 1
speed_ind += 1
# print("Current speed: ", cassie_env.speed, speed_ind)
elif count == num_steps // 2:
orient_add += orient_schedule[orient_ind]
orient_ind += 1
# print("Current orient add: ", orient_add, orient_ind)
# Update orientation
quaternion = euler2quat(z=orient_add, y=0, x=0)
iquaternion = inverse_quaternion(quaternion)
curr_orient = state[1:5]
curr_transvel = state[15:18]
new_orient = quaternion_product(iquaternion, curr_orient)
if new_orient[0] < 0:
new_orient = -new_orient
new_translationalVelocity = rotate_by_quaternion(curr_transvel, iquaternion)
state[1:5] = torch.FloatTensor(new_orient)
state[15:18] = torch.FloatTensor(new_translationalVelocity)
# Get action
action = policy(state, True)
action = action.data.numpy()
state, reward, done, _ = cassie_env.step(action)
state = torch.Tensor(state)
if cassie_env.sim.qpos()[2] < 0.4:
# print("Failed")
passed = 0
count += 1
if passed:
# print("passed")
save_data[j, 0] = passed
save_data[j, 1] = -1
else:
# print("didnt pass")
save_data[j, :] = np.array([passed, count//(num_steps//2), cassie_env.speed, orient_add,\
cassie_env.speed-speed_schedule[max(0, speed_ind-2)], orient_schedule[orient_ind-1]])
print("time: ", time.time() - start_t)
return save_data
def vis_commands(cassie_env, policy, num_steps=200, num_commands=4, max_speed=1, min_speed=0):
state = torch.Tensor(cassie_env.reset_for_test())
cassie_env.speed = 0.5
cassie_env.side_speed = 0
cassie_env.phase_add = 1
# orient_schedule = np.pi/4*np.arange(8)
# speed_schedule = np.random.uniform(-1.5, 1.5, 4)
speed_schedule = [0.5]
for i in range(num_commands-1):
speed_add = random.choice([-1, 1])*random.uniform(0.4, 1.3)
if speed_schedule[i] + speed_add < min_speed or speed_schedule[i] + speed_add > max_speed:
speed_add *= -1
speed_schedule.append(speed_schedule[i] + speed_add)
orient_schedule = np.random.uniform(np.pi/6, np.pi/3, num_commands)
orient_sign = np.random.choice((-1, 1), num_commands)
orient_schedule = orient_schedule * orient_sign
print("Speed schedule: ", speed_schedule)
print("Orient schedule: ", orient_schedule)
dt = 0.05
speedup = 3
count = 0
orient_ind = 0
speed_ind = 0
orient_add = 0
# print("Current orient add: ", orient_add)
render_state = cassie_env.render()
with torch.no_grad():
while render_state:
if (not cassie_env.vis.ispaused()):
# orient_add = orient_schedule[math.floor(count/num_steps)]
if count == num_steps:
count = 0
speed_ind += 1
if speed_ind >= len(speed_schedule):
print("speed Done")
exit()
cassie_env.speed = speed_schedule[speed_ind]
cassie_env.speed = np.clip(cassie_env.speed, 0, 3)
if cassie_env.speed > 1.4:
cassie_env.phase_add = 1.5
print("Current speed: ", cassie_env.speed)
elif count == num_steps // 2:
orient_ind += 1
if orient_ind >= len(orient_schedule):
print("orient Done")
exit()
orient_add += orient_schedule[orient_ind]
print("Current orient add: ", orient_add)
# Update orientation
quaternion = euler2quat(z=orient_add, y=0, x=0)
iquaternion = inverse_quaternion(quaternion)
curr_orient = state[1:5]
curr_transvel = state[15:18]
new_orient = quaternion_product(iquaternion, curr_orient)
if new_orient[0] < 0:
new_orient = -new_orient
new_translationalVelocity = rotate_by_quaternion(curr_transvel, iquaternion)
state[1:5] = torch.FloatTensor(new_orient)
state[15:18] = torch.FloatTensor(new_translationalVelocity)
# Get action
action = policy(state, True)
action = action.data.numpy()
state, reward, done, _ = cassie_env.step(action)
if cassie_env.sim.qpos()[2] < 0.4:
print("Failed")
exit()
else:
state = torch.Tensor(state)
count += 1
render_state = cassie_env.render()
time.sleep(dt / speedup)
################################
##### DEPRACATED FUNCTIONS #####
################################
@ray.remote
@torch.no_grad()
def eval_commands_worker(env_fn, policy, num_steps, num_commands, max_speed, min_speed, num_iters):
cassie_env = env_fn()
# save_data will hold whether passed or not (1 or 0), whether orient command or speed command caused failure (1, 0),
# speed and orient command at failure, and speed and orient change at failure
save_data = np.zeros((num_iters, 6))
start_t = time.time()
for j in range(num_iters):
state = torch.Tensor(cassie_env.reset_for_test())
cassie_env.speed = 0.5
cassie_env.side_speed = 0
cassie_env.phase_add = 1
speed_schedule = [0.5]
for i in range(num_commands-1):
speed_add = random.choice([-1, 1])*random.uniform(0.4, 1.3)
if speed_schedule[i] + speed_add < min_speed or speed_schedule[i] + speed_add > max_speed:
speed_add *= -1
speed_schedule.append(speed_schedule[i] + speed_add)
orient_schedule = np.random.uniform(np.pi/6, np.pi/3, num_commands)
orient_sign = np.random.choice((-1, 1), num_commands)
orient_schedule = orient_schedule * orient_sign
count = 0
orient_ind = 0
speed_ind = 1
orient_add = 0
passed = 1
while not (speed_ind == num_commands and orient_ind == num_commands and count == num_steps) and passed:
if count == num_steps:
count = 0
cassie_env.speed = speed_schedule[speed_ind]
cassie_env.speed = np.clip(cassie_env.speed, min_speed, max_speed)
if cassie_env.speed > 1.4:
cassie_env.phase_add = 1.5
else:
cassie_env.phase_add = 1
speed_ind += 1
elif count == num_steps // 2:
orient_add += orient_schedule[orient_ind]
orient_ind += 1
# Update orientation
quaternion = euler2quat(z=orient_add, y=0, x=0)
iquaternion = inverse_quaternion(quaternion)
curr_orient = state[1:5]
curr_transvel = state[15:18]
new_orient = quaternion_product(iquaternion, curr_orient)
if new_orient[0] < 0:
new_orient = -new_orient
new_translationalVelocity = rotate_by_quaternion(curr_transvel, iquaternion)
state[1:5] = torch.FloatTensor(new_orient)
state[15:18] = torch.FloatTensor(new_translationalVelocity)
# Get action
action = policy(state, True)
action = action.data.numpy()
state, reward, done, _ = cassie_env.step(action)
state = torch.Tensor(state)
if cassie_env.sim.qpos()[2] < 0.4:
passed = 0
count += 1
if passed:
save_data[j, 0] = passed
save_data[j, 1] = -1
else:
save_data[j, :] = np.array([passed, count//(num_steps//2), cassie_env.speed, orient_add,\
cassie_env.speed-speed_schedule[max(0, speed_ind-2)], orient_schedule[orient_ind-1]])
# if save_data[j, 1] == 0:
# print("speed diff: ", speed_schedule[speed_ind-1]-speed_schedule[speed_ind-2])
# print("curr speed: ", cassie_env.speed)
# print("speed schedule: ", speed_schedule)
# print("speed ind: ", speed_ind)
# print("curr schedule: ", speed_schedule[speed_ind-1])
return save_data, time.time() - start_t
# TODO: Change to create workers, then pass a single iter to each one. This way, in case a worker finishes before the others
# it can start running more iters. Can also add running stats of how many more tests to run, w/ loading bar
def eval_commands_multi_old(env_fn, policy, num_steps=200, num_commands=4, max_speed=3, min_speed=0, num_iters=4, num_procs=4, filename="test_eval_command.npy"):
start_t1 = time.time()
ray.init(num_cpus=num_procs)
result_ids = []
for i in range(num_procs):
curr_iters = num_iters // num_procs
if i == num_procs - 1: # is last process to get launched, do remaining iters if not evenly divided between procs
curr_iters = num_iters - i*curr_iters
print("curr iters: ", curr_iters)
args = (env_fn, policy, num_steps, num_commands, max_speed, min_speed, curr_iters)
print("Starting worker ", i)
result_ids.append(eval_commands_worker.remote(*args))
result = ray.get(result_ids)
# print(result)
print("Got all results")
total_data = np.concatenate([result[i][0] for i in range(num_procs)], axis=0)
# print("timings: ", [result[i][1] for i in range(num_procs)])
# print("sim timings: ", [result[i][2] for i in range(num_procs)])
# # max_force = np.concatenate(result, axis=1)
# print("total_data: ", total_data)
np.save(filename, total_data)
print("total time: ", time.time() - start_t1)
ray.shutdown()
|
182900
|
import logging_functions as lf
import time
import randomized_record
import config
CHECK_CONFIGURATION = False
TARGET_ACTIVITIES = [
"com.google.android.gm/.ConversationListActivityGmail",
"com.google.android.gm/.EmlViewerActivityGmail",
"com.google.android.gm/.GmailActivity",
"com.google.android.gm/com.android.mail.ui.settings.PublicPreferenceActivity",
"com.google.android.gm/.ui.MailboxSelectionActivityGmail",
"com.google.android.gm/.CreateShortcutActivityGmail",
"com.google.android.gm/.CreateShortcutActivityGoogleMail",
"com.google.android.gm/com.android.mail.ui.MailActivity",
"com.google.android.gm/.ComposeActivityGmailExternal",
"com.google.android.gm/.ui.MailActivityGmail",
"com.google.android.gm/.ui.MailActivityGmail",
"com.google.android.gm/.ComposeActivityGmailExternal",
#"com.amazon.mShop.android.shopping/com.amazon.mShop.details.ProductDetailsActivity",
#"com.amazon.mShop.android.shopping/com.amazon.mShop.android.home.PublicUrlActivity",
#"com.amazon.mShop.android.shopping/com.amazon.mShop.search.RetailSearchFragmentActivity",
#"com.amazon.mShop.android.shopping/com.amazon.mShop.search.SearchActivity",
]
def launch_activity(activity_target):
return lf.adb("am start -n " + activity_target, get_output=True)
def start_stop_activity(activity_target):
package_name = activity_target.split("/")[0]
print("Package name of " + activity_target + ": " + package_name)
lf.trigger_new_event(activity_target)
launch_activity(activity_target)
time.sleep(config.DELAY_AFTER_LAUNCH)
lf.kill_app(package_name)
time.sleep(config.DELAY_AFTER_KILL)
def check_config():
failed_launches = []
for activity in TARGET_ACTIVITIES:
print("\nAttempt to launch " + activity)
output = launch_activity(activity)
print(output)
if "SecurityException" in output:
failed_launches.append(activity)
time.sleep(1)
print("\n" + str(len(failed_launches)) + " failed launches occured:", failed_launches, "\n")
def main():
print("Record activity launches\n")
lf.start_logging_procedure()
if not CHECK_CONFIGURATION:
randomized_record.acquire_data_randomized(TARGET_ACTIVITIES, config.records_per_app(), start_stop_activity)
else:
check_config()
lf.stop_logging_app()
main()
|
182911
|
from pudzu.charts import *
from pudzu.dates import *
import dateparser
# -------------
# G7 time chart
# -------------
START = dateparser.parse('1 January 1960').date()
END = datetime.date.today()
def duration(d):
return dateparser.parse(get_non(d, 'end', END.isoformat())).date() - max(START, dateparser.parse(d['start']).date())
def percentage_left(df):
return sum((duration(d) for _,d in df[df.spectrum == "left"].iterrows()), datetime.timedelta(0)) / sum((duration(d) for _,d in df.iterrows()), datetime.timedelta(0))
df = pd.read_csv("datasets/g7.csv")
groups = df.groupby(by=lambda idx: "{} ({})".format(df['country'][idx], df['office'][idx]))
group_order = sorted(list(groups.groups), key=lambda s: percentage_left(groups.get_group(s)), reverse=True)
data = [groups.get_group(g) for g in group_order]
colorfn = lambda d: {"left": "#d62728", "right": "#393b79", "centre": "#e7ba52"}[d['spectrum']]
startfn = lambda d: dateparser.parse(d['start']).date()
endfn = lambda d: dateparser.parse(get_non(d, 'end', END.isoformat())).date()
labelfn = lambda d: Image.from_text(d['name'].split(" ")[-1], arial(10), padding=(2), fg="white", bg=colorfn(d))
labels = ["{:.0%}".format(percentage_left(df)) for df in data]
title = Image.from_text("G7 countries by time spent under left-of-centre governments (1960-present)", arial(30, bold=True), fg="white").pad((0,5,0,30),bg="black")
chart = time_chart(1200, 40, data, startfn, endfn, colorfn, interval_label_key=labelfn,
xmin=START, label_font=arial(16), labels_left=group_order, labels_right=labels, title=title,
grid_interval=DateInterval(years=10), grid_font=arial(16), grid_labels=lambda v: str(v.year)).pad(5, "black")
def box(s): return Image.new("RGBA", (20,20), colorfn({"spectrum": s}))
def label(s): return Image.from_text(s, arial(12), fg="white")
footer_row = [box("left"), label("left-of-centre"), box("centre"), label("centrist"), box("right"), label("right-of-centre"),
Image.new("RGBA", (50,0)),
Image.from_text("Colours are standard UK colours for conservatism, liberalism and social democracy.", arial(16), fg="white"),
Image.from_text("Note that they differ from the ones used in the US since 2000.", arial(16, bold=True), fg="white")]
footer = Image.from_row(footer_row, padding=3, bg="black")
img = Image.from_column([chart, footer], bg="black", padding=(0,20))
img.save("output/politics_g7.png")
|
182918
|
import datetime
import pyg_lib
author = '<NAME>'
project = 'pyg_lib'
version = pyg_lib.__version__
copyright = f'{datetime.datetime.now().year}, {author}'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
]
html_theme = 'sphinx_rtd_theme'
autodoc_member_order = 'bysource'
intersphinx_mapping = {
'python': ('http://docs.python.org', None),
'torch': ('https://pytorch.org/docs/master', None),
}
|
182921
|
import ctypes
import windows
import windows.generated_def as gdef
from ..apiproxy import ApiProxy, NeededParameter, is_implemented
from ..error import fail_on_zero
class ShlwapiProxy(ApiProxy):
APIDLL = "Shlwapi"
default_error_check = staticmethod(fail_on_zero)
@ShlwapiProxy()
def StrStrIW(pszFirst, pszSrch):
return StrStrIW.ctypes_function(pszFirst, pszSrch)
@ShlwapiProxy()
def StrStrIA(pszFirst, pszSrch):
return StrStrIA.ctypes_function(pszFirst, pszSrch)
@ShlwapiProxy()
def IsOS(dwOS):
if not is_implemented(IsOS) and windows.system.version[0] < 6:
# Before Vista:
# If so use ordinal 437 from DOCUMENTATION
# https://docs.microsoft.com/en-us/windows/desktop/api/shlwapi/nf-shlwapi-isos#remarks
IsOS.proxy.func_name = 437
return IsOS.ctypes_function(dwOS)
|
182934
|
import torch
class TensorProvider():
def tensors(self):
raise NotImplementedError("Abstract.")
|
182935
|
t = int(input())
while t:
N = int(input())
A = list(map(int, input().split()))
l = []
for i in range(N):
for j in range(i+1, N):
l.append(A[i]+A[j])
c = l.count(max(l))
print(c/len(l))
t = t-1
|
182953
|
def remove_reads_in_other_fastq(input_fastq, remove_fastq, out_file_name):
out = open(out_file_name, "w")
# Get seq ids to remove
remove_ids = set()
i = 0
for line in open(remove_fastq):
if i % 500000 == 0:
print(i)
i += 1
if line.startswith("@"):
remove_ids.add(line)
i = 0
write_line = True
for line in open(input_fastq):
if i % 100000 == 0:
print(i)
i += 1
line = line.replace("/1", "")
if line.startswith("@"):
if line in remove_ids:
write_line = False
else:
write_line = True
if write_line:
out.writelines(["%s" % line])
if __name__ == "__main__":
remove_reads_in_other_fastq("orig.fastq", "filtered_outside_mhc.fastq", "final.fastq")
|
182978
|
import mimetypes
import os
import re
import traceback
from django.http import (
HttpResponse,
JsonResponse,
HttpResponseBadRequest,
HttpResponseServerError,
HttpResponseNotFound,
HttpResponseForbidden,
)
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.db import transaction
from django.db.utils import IntegrityError
from django.utils.datastructures import MultiValueDictKeyError
from django.http.request import HttpRequest
from django.views.decorators.http import require_GET, require_POST
from django.views.generic import TemplateView
import semver
from .models import StaticDeployment, DeploymentVersion, DeploymentCategory, ProxyDeployment
from .forms import StaticDeploymentForm, ProxyDeploymentForm
from .upload import (
handle_uploaded_static_archive,
update_symlink,
delete_hosted_deployment,
delete_hosted_version,
)
from .serialize import serialize
from .validation import (
BadInputException,
validate_deployment_name,
get_validated_form,
NotFound,
NotAuthenticated,
InvalidCredentials,
validate_subdomain,
)
from .proxy import trigger_proxy_server_update
# Used to get the name of the deployment into which a given URL points
REDIRECT_URL_RGX = re.compile("^/__HOSTED/([^/]+)/.*$")
# Taken from https://djangosnippets.org/snippets/101/
def send_data(path, filename=None, mimetype=None):
if filename is None:
filename = os.path.basename(path)
if mimetype is None:
mimetype, encoding = mimetypes.guess_type(filename)
response = HttpResponse(content_type=mimetype)
response.write(open(path, "rb").read())
return response
def with_caught_exceptions(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except BadInputException as e:
return HttpResponseBadRequest(str(e))
except NotFound:
return HttpResponseNotFound()
except NotAuthenticated:
return HttpResponseForbidden("You must be logged into access this view")
except InvalidCredentials:
return HttpResponseForbidden("Invalid username or password provided")
except Exception as e:
print("Uncaught error: {}".format(str(e)))
traceback.print_exc()
return HttpResponseServerError(
"An unhandled error occured while processing the request"
)
return wrapper
def with_default_success(func):
""" Decorator that returns a JSON success message if no errors occur during the request. """
def wrapper(*args, **kwargs):
func(*args, **kwargs)
return JsonResponse({"success": True, "error": False})
return wrapper
def with_login_required(func):
""" Decorator that verifies that a user is logged in and returns a Forbidden status code if
the requester is not. """
def wrapper(router: TemplateView, req: HttpRequest, *args, **kwargs):
if not req.user.is_authenticated:
raise NotAuthenticated()
return func(router, req, *args, **kwargs)
return wrapper
@require_GET
def index(_req: HttpRequest):
return HttpResponse("Site is up and running! Try `GET /deployments`.")
@require_POST
@with_caught_exceptions
@with_default_success
def login_user(req: HttpRequest):
username = None
password = <PASSWORD>
try:
username = req.POST["username"]
password = req.POST["password"]
except MultiValueDictKeyError:
raise BadInputException("You must supply both a username and password")
user = authenticate(req, username=username, password=password)
if user is not None:
login(req, user)
else:
raise InvalidCredentials("Invalid username or password")
def get_or_none(Model, do_raise=True, **kwargs):
""" Lookups a model given some query parameters. If a match is found, it is returned.
Otherwise, either `None` is returned or a `NotFound` exception is raised depending on
the value of `do_raise`. """
try:
return Model.objects.get(**kwargs)
except Model.DoesNotExist:
if do_raise:
raise NotFound()
else:
return None
class Deployments(TemplateView):
@with_caught_exceptions
@with_login_required
def get(self, request: HttpRequest):
all_deployments = StaticDeployment.objects.prefetch_related(
"deploymentversion_set", "categories"
).all()
deployments_data = serialize(all_deployments, json=False)
deployments_data_with_versions = [
{
**datum,
"versions": serialize(deployment_model.deploymentversion_set.all(), json=False),
"categories": serialize(deployment_model.categories.all(), json=False),
}
for (datum, deployment_model) in zip(deployments_data, all_deployments)
]
return JsonResponse(deployments_data_with_versions, safe=False)
@with_caught_exceptions
@with_login_required
def post(self, request: HttpRequest):
form = get_validated_form(StaticDeploymentForm, request)
deployment_name = form.cleaned_data["name"]
subdomain = form.cleaned_data["subdomain"]
version = form.cleaned_data["version"]
categories = form.cleaned_data["categories"].split(",")
not_found_document = form.cleaned_data["not_found_document"]
validate_deployment_name(deployment_name)
validate_subdomain(subdomain)
deployment_descriptor = None
try:
with transaction.atomic():
# Create the new deployment descriptor
deployment_descriptor = StaticDeployment(
name=deployment_name, subdomain=subdomain, not_found_document=not_found_document
)
deployment_descriptor.save()
# Create categories
for category in categories:
(category_model, _) = DeploymentCategory.objects.get_or_create(
category=category
)
deployment_descriptor.categories.add(category_model)
# Create the new version and set it as active
version_model = DeploymentVersion(
version=version, deployment=deployment_descriptor, active=True
)
version_model.save()
handle_uploaded_static_archive(request.FILES["file"], subdomain, version)
except IntegrityError as e:
if "Duplicate entry" in str(e):
raise BadInputException("`name` and `subdomain` must be unique!")
else:
raise e
return JsonResponse(
{
"name": deployment_name,
"subdomain": subdomain,
"version": version,
"url": deployment_descriptor.get_url(),
}
)
def get_query_dict(query_string: str, req: HttpRequest) -> dict:
lookup_field = req.GET.get("lookupField", "id")
if lookup_field not in ["id", "subdomain", "name"]:
raise BadInputException("The supplied `lookupField` was invalid")
return {lookup_field: query_string}
class Deployment(TemplateView):
@with_caught_exceptions
def get(self, req: HttpRequest, deployment_id=None):
query_dict = get_query_dict(deployment_id, req)
deployment = get_or_none(StaticDeployment, **query_dict)
versions = DeploymentVersion.objects.filter(deployment=deployment)
active_version = next(v for v in versions if v.active)
deployment_data = serialize(deployment, json=False)
versions_data = serialize(versions, json=False)
versions_list = list(map(lambda version_datum: version_datum["version"], versions_data))
deployment_data = {
**deployment_data,
"versions": versions_list,
"active_version": serialize(active_version, json=False)["version"],
}
return JsonResponse(deployment_data, safe=False)
@with_caught_exceptions
@with_login_required
@with_default_success
def delete(self, req: HttpRequest, deployment_id=None):
with transaction.atomic():
query_dict = get_query_dict(deployment_id, req)
deployment = get_or_none(StaticDeployment, **query_dict)
deployment_data = serialize(deployment, json=False)
# This will also recursively delete all attached versions
deployment.delete()
delete_hosted_deployment(deployment_data["subdomain"])
class DeploymentVersionView(TemplateView):
@staticmethod
def is_version_special(version: str) -> bool:
return version in ["minor", "m", "patch", "p", "major", "M"]
@staticmethod
def transform_special_version(special_version: str, previous_version: str) -> str:
if special_version in ["patch", "p"]:
return semver.bump_patch(previous_version)
elif special_version in ["minor", "m"]:
return semver.bump_minor(previous_version)
elif special_version in ["major", "M"]:
return semver.bump_major(previous_version)
else:
raise "Unreachable: `transform_special_version` should never be called with invalid special version"
@with_caught_exceptions
def get(
self, req: HttpRequest, *args, deployment_id=None, version=None
): # pylint: disable=W0221
query_dict = get_query_dict(deployment_id, req)
deployment = get_or_none(StaticDeployment, **query_dict)
version_model = get_or_none(DeploymentVersion, deployment=deployment, version=version)
return serialize(version_model)
@with_caught_exceptions
@with_login_required
def post(self, req: HttpRequest, deployment_id=None, version=None):
query_dict = get_query_dict(deployment_id, req)
deployment = get_or_none(StaticDeployment, **query_dict)
if not req.FILES["file"]:
raise BadInputException(
"No multipart file named `file` found in request; this must be provided."
)
# Assert that the new version is unique among other versions for the same deployment
if (not self.is_version_special(version)) and DeploymentVersion.objects.filter(
deployment=deployment, version=version
):
raise BadInputException("The new version name must be unique.")
version_model = None
with transaction.atomic():
# Set any old active deployment as inactive
old_version_model = DeploymentVersion.objects.get(deployment=deployment, active=True)
if old_version_model:
old_version_model.active = False
old_version_model.save()
# Transform special versions by bumping the previous semver version
if self.is_version_special(version):
try:
version = self.transform_special_version(version, old_version_model.version)
except Exception as e:
raise BadInputException(e)
# Create the new version and set it active
version_model = DeploymentVersion(version=version, deployment=deployment, active=True)
version_model.save()
deployment_data = serialize(deployment, json=False)
# Extract the supplied archive into the hosting directory
handle_uploaded_static_archive(
req.FILES["file"], deployment_data["subdomain"], version, init=False
)
# Update the `latest` version to point to this new version
update_symlink(deployment_data["subdomain"], version)
return serialize(version_model)
@with_caught_exceptions
@with_login_required
@with_default_success
def delete(self, req: HttpRequest, deployment_id=None, version=None):
with transaction.atomic():
query_dict = get_query_dict(deployment_id, req)
deployment = get_or_none(StaticDeployment, **query_dict)
deployment_data = serialize(deployment, json=False)
# Delete the entry for the deployment version from the database
DeploymentVersion.objects.filter(deployment=deployment, version=version).delete()
# If no deployment versions remain for the owning deployment, delete the deployment
delete_deployment = False
if not DeploymentVersion.objects.filter(deployment=deployment):
delete_deployment = True
deployment.delete()
if delete_deployment:
delete_hosted_deployment(deployment_data["subdomain"])
else:
delete_hosted_version(deployment_data["subdomain"], version)
@with_caught_exceptions
def not_found(req):
# This environment variable is passed in from Apache
redirect_url = req.META.get("REDIRECT_URL")
if redirect_url is None:
return HttpResponseNotFound()
# Get the name of the deployment that this 404 applies to, if any
match = REDIRECT_URL_RGX.match(redirect_url)
if match is None:
return HttpResponseNotFound()
deployment_subdomain = match[1]
# Check to see if there's a custom 404 handle for the given deployment
deployment = get_or_none(StaticDeployment, subdomain=deployment_subdomain)
not_found_document = deployment.not_found_document
if not_found_document is None:
return HttpResponseNotFound()
if deployment is None:
return HttpResponseNotFound()
# Sandbox the retrieved pathname to be within the deployment's directory, preventing all kinds
# of potentially nasty directory traversal stuff.
deployment_dir_path = os.path.abspath(os.path.join(settings.HOST_PATH, deployment.subdomain))
document_path = os.path.abspath(
os.path.relpath(
os.path.join(deployment_dir_path, "latest", not_found_document),
start=not_found_document,
)
)
common_prefix = os.path.commonprefix([deployment_dir_path, document_path])
if common_prefix != deployment_dir_path:
return HttpResponseBadRequest(
(
f"Invalid error document provided: {not_found_document}; "
"must be relative to deployment."
)
)
if not os.path.exists(document_path):
return HttpResponseBadRequest(
f"The specified 404 document {not_found_document} doesn't exist in this deployment."
)
# Since our way of serving this file loads it into memory, we block any files that are >128MB
file_size = os.path.getsize(document_path)
if file_size > 1024 * 1024 * 1024 * 128:
return HttpResponseBadRequest(
f"Custom not found document is {file_size} bytes, which is more than the 128MB limit."
)
return send_data(document_path)
class ProxyDeployments(TemplateView):
@with_caught_exceptions
@with_login_required
def get(self, request: HttpRequest):
all_proxy_deployments = ProxyDeployment.objects.all()
return serialize(all_proxy_deployments)
@with_caught_exceptions
@with_login_required
def post(self, request: HttpRequest):
form = get_validated_form(ProxyDeploymentForm, request)
name = form.cleaned_data["name"]
subdomain = form.cleaned_data["subdomain"]
use_cors_headers = form.cleaned_data["use_cors_headers"] or False
validate_deployment_name(name)
validate_subdomain(subdomain)
proxy_deployment_descriptor = ProxyDeployment(
name=name,
subdomain=subdomain,
destination_address=form.cleaned_data["destination_address"],
use_cors_headers=use_cors_headers,
)
try:
proxy_deployment_descriptor.save()
except IntegrityError as e:
if "Duplicate entry" in str(e):
raise BadInputException("`name` and `subdomain` must be unique!")
else:
raise e
trigger_proxy_server_update()
return JsonResponse(
{"name": name, "subdomain": subdomain, "url": proxy_deployment_descriptor.get_url()}
)
class ProxyDeploymentView(TemplateView):
@with_caught_exceptions
def get(self, req: HttpRequest, deployment_id=None):
query_dict = get_query_dict(deployment_id, req)
deployment = get_or_none(StaticDeployment, **query_dict)
return serialize(deployment)
@with_caught_exceptions
@with_login_required
@with_default_success
def delete(self, req: HttpRequest, deployment_id=None):
query_dict = get_query_dict(deployment_id, req)
proxy_deployment = get_or_none(ProxyDeployment, **query_dict)
proxy_deployment.delete()
trigger_proxy_server_update()
|
183005
|
import os, sys
from exceptions import ContainerError
import utils, StringIO, logging
import py_backend
class Container:
def __init__(self, name, state, config, mounts=None):
self.log = logging.getLogger('maestro')
self.state = state
self.config = config
self.name = name
self.mounts = mounts
if 'hostname' not in self.config:
self.config['hostname'] = name
#if 'command' not in self.config:
# self.log.error("Error: No command specified for container " + name + "\n")
# raise ContainerError('No command specified in configuration')
self.backend = py_backend.PyBackend()
def create(self):
self._start_container(False)
def run(self):
self._start_container()
def rerun(self):
# Commit the current container and then use that image_id to restart.
self.state['image_id'] = self.backend.commit_container(self.state['container_id'])['Id']
self._start_container()
def start(self):
utils.status("Starting container %s - %s" % (self.name, self.state['container_id']))
self.backend.start_container(self.state['container_id'], self.mounts)
def stop(self, timeout=10):
utils.status("Stopping container %s - %s" % (self.name, self.state['container_id']))
self.backend.stop_container(self.state['container_id'], timeout=timeout)
def destroy(self, timeout=None):
self.stop(timeout)
utils.status("Destroying container %s - %s" % (self.name, self.state['container_id']))
self.backend.remove_container(self.state['container_id'])
def get_ip_address(self):
return self.backend.get_ip_address(self.state['container_id'])
def inspect(self):
return self.backend.inspect_container(self.state['container_id'])
def attach(self):
# should probably catch ctrl-c here so that the process doesn't abort
for line in self.backend.attach_container(self.state['container_id']):
sys.stdout.write(line)
def _start_container(self, start=True):
# Start the container
self.state['container_id'] = self.backend.create_container(self.state['image_id'], self.config)
if (start):
self.start()
self.log.info('Container started: %s %s', self.name, self.state['container_id'])
|
183008
|
from dreaml.dataframe.transform import BatchTransform
from dreaml.dataframe.dataframe import DataFrame
class Linear(BatchTransform):
def func(self,target_df,a,X_df,b,Y_df,row_labels=None,col_labels=None):
"""Fetch matrices from dataframes, and return the resulting linear
combination in a dataframe"""
x = X_df.get_matrix()
y = Y_df.get_matrix()
if row_labels==None:
row_labels = X_df._row_index.keys()
if col_labels==None:
col_labels = X_df._col_index.keys()
if (x.shape != y.shape):
raise ValueError
return DataFrame.from_matrix(a*x+b*y,row_labels,col_labels)
|
183033
|
import numpy as np
import collections
import torch
from tqdm import tqdm
from .mean_field_elbo import MeanFieldELBO
class HierarchicalELBO(MeanFieldELBO):
def __init__(self,
config,
model,
variational_likelihood,
variational_prior,
variational_posterior,
proximity_loss=None
):
super().__init__(config, model, variational_likelihood)
self.config = config
self.p_z = model
self.q_z = variational_likelihood
self.q_nu = variational_prior
self.r_nu = variational_posterior
self.proximity_loss = None
def set_proximity_loss(self, proximity_loss):
self.proximity_loss = proximity_loss
@torch.no_grad()
def sample_objective(self, num_samples, batch_size=2**13):
cfg = self.config
res = []
assert num_samples >= batch_size
for _ in tqdm(range(num_samples // batch_size)):
hier_elbo = self.sample_objective_single_batch(batch_size)
res.extend(hier_elbo.tolist())
return res
@torch.no_grad()
def sample_objective_single_batch(self, batch_size, return_terms=False):
nu, log_q_nu = self.q_nu.sample_and_log_prob(num_samples=batch_size)
z = self.q_z.sample(logit=nu)
nu_0, log_r_i_nu, log_r_nu = self.r_nu.inverse_and_log_prob(nu, z)
if log_r_i_nu is not None:
dims = tuple(range(1, log_r_i_nu.ndim))
log_r_nu = log_r_i_nu.sum(dims) + log_r_nu
log_q_z = self.q_z.log_prob(logit=nu, value=z).sum((1, 2))
log_p_z = -self.config.beta * self.p_z.energy(z)
hier_elbo = (log_p_z - log_q_z + log_r_nu - log_q_nu).squeeze()
if return_terms:
res = {'log_p_z': log_p_z,
'log_q_z_entropy': -log_q_z,
'log_r_nu': log_r_nu,
'log_q_nu': log_q_nu,
'nu': nu,
'nu_0': nu_0}
return {k: v.cpu().numpy() for k, v in res.items()}
else:
return hier_elbo.cpu().detach().numpy()
def compute_grad(self, annealing_temp=1.0):
cfg = self.config
nu, log_q_nu = self.q_nu.sample_and_log_prob(cfg.num_samples_grad)
z = self.q_z.sample(logit=nu)
_, log_r_i_nu, log_r_nu = self.r_nu.inverse_and_log_prob(nu, z)
dims = tuple(range(1, log_r_nu.ndim))
if log_r_i_nu is not None:
log_r_nu = log_r_i_nu.sum(dims) + log_r_nu # (num_samples,)
log_r_i_nu = log_r_i_nu.detach() # (num_samples, L, L)
E_del_nu_z_i_terms = super().compute_grad_natural_parameters(
logit=nu,
z=z,
log_r_i_nu=log_r_i_nu,
annealing_temp=annealing_temp)
hier_elbo_pre_grad = (
# yields del_theta nu(eps; theta) del_nu L_{MF}
# both have 2 lattice dimensions; sum over these
(nu * E_del_nu_z_i_terms).sum(dims)
# yields E_q(z | nu) [del_theta nu(eps; theta) * del_nu log r(nu | z)]
+ log_r_nu / annealing_temp
# yields del_theta nu(eps; theta) del_nu log q(nu; theta)
- log_q_nu
) # do not take expectation over s(epsilon), only after adding proximity loss
# loss is negative elbo
loss = -hier_elbo_pre_grad
if self.proximity_loss is not None:
tensor_dict = {'log_q_nu': log_q_nu, 'log_r_nu': log_r_nu}
loss += self.proximity_loss.compute_total_constraint(tensor_dict)
self.proximity_loss.moving_average.update(tensor_dict)
# take expectation over s(epsilon) and compute gradients
loss.mean(0).backward()
|
183131
|
from django.db.models import OuterRef, Q, Exists
from rest_framework.request import Request
from rest_framework.response import Response
from usaspending_api.common.cache_decorator import cache_response
from usaspending_api.disaster.v2.views.disaster_base import DisasterBase
from usaspending_api.financial_activities.models import FinancialAccountsByProgramActivityObjectClass
from usaspending_api.references.models import DisasterEmergencyFundCode
class DefCodeCountViewSet(DisasterBase):
"""
Obtain the count of DEF Codes related to supplied DEFC filter.
"""
endpoint_doc = "usaspending_api/api_contracts/contracts/v2/disaster/def_code/count.md"
@cache_response()
def post(self, request: Request) -> Response:
filters = [
Q(disaster_emergency_fund_id=OuterRef("pk")),
self.is_in_provided_def_codes,
self.all_closed_defc_submissions,
self.is_non_zero_total_spending,
]
count = (
DisasterEmergencyFundCode.objects.annotate(
include=Exists(FinancialAccountsByProgramActivityObjectClass.objects.filter(*filters).values("pk"))
)
.filter(include=True)
.values("pk")
.count()
)
return Response({"count": count})
|
183137
|
from PyQt5 import QtGui, QtCore, QtWidgets
import sys
Window = QtWidgets.QMainWindow
Brush = QtGui.QBrush
Pen = QtGui.QPen
Color = QtGui.QColor
Stage = QtWidgets.QGraphicsScene
ImageItem = QtWidgets.QGraphicsPixmapItem
Image = QtGui.QPixmap
class View(QtWidgets.QGraphicsView):
def __init__(self, stage):
super().__init__(stage)
self.stage = stage
i = Image('moon.jpg')
i = i.scaledToHeight(600)
self.stage.addItem(ImageItem(i))
if __name__=='__main__':
app = QtWidgets.QApplication(sys.argv)
stage = Stage(0, 0, 800, 600)
view = View(stage)
view.show()
app.exec_() # complete all execution code in the GUI
|
183145
|
import cmd
class Interface(cmd.Cmd):
prompt = 'Command: '
def do_foo(self, arg):
print(arg)
interface = Interface()
interface.cmdloop()
|
183193
|
import os
from protocols.BLE.ble_device import BLEDevice
class BLEReplayAttackHelper:
"""
This is the helper class which is used to perform BLE Replay attack.
"""
def __init__(self, file_path):
self.file_path = file_path
self.write_requests = {}
self.run()
def run(self):
self.create_tmp_file()
self.get_write_requests()
self.replay_write_requests()
self.delete_tmp_file()
def create_tmp_file(self):
"""
Convert pcap file to txt
"""
# convert pcap file to .txt file
file_path = "\\ ".join(self.file_path.split())
os.system("tshark -X lua_script:tmp.txt -r " + file_path + " -V -T text > tmp.txt")
def get_write_requests(self):
"""
Retrieves all write requests from the given file
"""
# Open the file
f = open("tmp.txt", "r")
# Get the content of the file
content = f.read()
# Get frames
frames = content.split("\n\n")
# Search each frame to find Write Requests
for frame in frames:
try:
# Slave address index
slave_address_index = frame.index("Slave Address:")
# Update the frame
frame = frame[slave_address_index:]
# Get the slave address
open_para_index = frame.index("(")
close_para_index = frame.index(")")
slave_address = frame[open_para_index + 1:close_para_index]
# Write request index
write_request_index = frame.index("Opcode: Write Request")
# Update the frame
frame = frame[write_request_index:]
# Handle index
handle_index = frame.index("Handle:") + 8
# Update the frame
frame = frame[handle_index:]
# Space index
space_index = frame.index(" ")
# Get the handle
handle = frame[:space_index]
# Value index
value_index = frame.index("Value:") + 7
# Get the value
value = frame[value_index:]
# Add handle-value to the list
if slave_address in self.write_requests:
self.write_requests[slave_address].append({"handle": handle, "value": value})
else:
self.write_requests[slave_address] = [{"handle": handle, "value": value}]
except ValueError:
pass
print "retrieved write requests"
def replay_write_requests(self):
"""
Replay all write requests
"""
# Replay all write request
for slave_address in self.write_requests:
# Create a connection to the device
device = BLEDevice(slave_address)
# Replay all write request belonging to the device
for handle_value_pair in self.write_requests[slave_address]:
handle = handle_value_pair["handle"]
value = handle_value_pair["value"]
device.writecmd(handle, value)
print "wrote " + value + " to handle: " + handle
def delete_tmp_file(self):
"""
Delete the created tmp file
"""
os.remove("tmp.txt")
|
183221
|
import re
import base64
import os
from itertools import compress
import xml.etree.ElementTree as ET
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, logdevinfo, is_platform_windows
def get_deviceActivator(files_found, report_folder, seeker):
data_list = []
alllines = ''
file_found = str(files_found[0])
with open(file_found, 'r') as f_in:
for line in f_in:
line = line.strip()
alllines = alllines + line
found = re.findall('<key>ActivationInfoXML</key><data>(.*)</data><key>RKCertification</key><data>', alllines)
base64_message= found[0]
data = base64.b64decode(base64_message)
outpath = os.path.join(report_folder, "results.xml")
with open(outpath, 'wb') as f_out:
f_out.write(data)
xmlfile = outpath
tree = ET.parse(xmlfile)
root = tree.getroot()
for elem in root:
for elemx in elem:
for elemz in elemx:
data_list.append(str(elemz.text).strip())
it = iter(data_list)
results = list(zip(it, it))
for x in results:
if x[0] == 'EthernetMacAddress':
logdevinfo(f"Ethernet Mac Address: {x[1]}")
if x[0] == 'BluetoothAddress':
logdevinfo(f"Bluetooth Address: {x[1]}")
if x[0] == 'WifiAddress':
logdevinfo(f"Wifi Address: {x[1]}")
if x[0] == 'ModelNumber':
logdevinfo(f"Model Number: {x[1]}")
if len(results) > 0:
report = ArtifactHtmlReport('iOS Device Activator Data')
report.start_artifact_report(report_folder, 'iOS Device Activator Data')
report.add_script()
data_headers = ('Key','Values')
report.write_artifact_data_table(data_headers, results, file_found)
report.end_artifact_report()
tsvname = 'iOS Device Activator Data'
tsv(report_folder, data_headers, results, tsvname)
else:
logfunc('No iOS Device Activator Data')
|
183235
|
import numpy as np
from rvs import Dolly
from math_utils import *
from plot_utils import *
def Pr_Xnk_leq_x(X, n, k, x):
# log(INFO, "x= {}".format(x) )
cdf = 0
for i in range(k, n+1):
cdf += binom_(n, i) * X.cdf(x)**i * X.tail(x)**(n-i)
return cdf
def EXnk(X, n, k, m=1):
if k == 0:
return 0
if m == 1:
# EXnk, abserr = scipy.integrate.quad(lambda x: 1 - Pr_Xnk_leq_x(X, n, k, x), 0.0001, np.Inf) # 2*X.u_l
EXnk = float(mpmath.quad(lambda x: 1 - Pr_Xnk_leq_x(X, n, k, x), [0.0001, 10*X.u_l] ) )
else:
# EXnk, abserr = scipy.integrate.quad(lambda x: m*x**(m-1) * (1 - Pr_Xnk_leq_x(X, n, k, x)), 0.0001, np.Inf)
EXnk = float(mpmath.quad(lambda x: m*x**(m-1) * (1 - Pr_Xnk_leq_x(X, n, k, x) ), [0.0001, 10*X.u_l] ) )
return EXnk
def ECnk(X, n, k):
if k == 0:
return 0
EC = 0
for i in range(1, k):
EC += EXnk(X, n, i)
EC += (n-k+1)*EXnk(X, n, k)
return EC
def plot_cdf_X(X):
x_l, Pr_X_leq_x_l = [], []
for x in np.linspace(0, 30, 100):
x_l.append(x)
Pr_X_leq_x_l.append(X.cdf(x) )
plot.plot(x_l, Pr_X_leq_x_l, c='blue', marker='x', ls=':', mew=0.1, ms=8)
fontsize = 20
plot.legend(loc='best', framealpha=0.5, fontsize=14, numpoints=1)
plot.xlabel(r'$x$', fontsize=fontsize)
plot.ylabel(r'$\Pr\{X \leq x\}$', fontsize=fontsize)
plot.title(r'$X \sim {}$'.format(X.to_latex() ), fontsize=fontsize)
fig = plot.gcf()
fig.set_size_inches(4, 4)
plot.savefig('plot_cdf_X.png', bbox_inches='tight')
fig.clear()
log(INFO, "done.")
def redsmall_ES_wSl(k, r, D, Sl, d=None, red='coding'):
if d is None:
return D.mean()*sum([EXnk(Sl, i, i)*k.pdf(i) for i in k.v_l] )
ED_given_D_leq_doverk = lambda k: D.mean_given_leq_x(d/k)
return redsmall_ES_wSl(k, r, D, Sl, d=None, red=red) \
+ sum([(EXnk(Sl, i*r, i) - EXnk(Sl, i, i) )*ED_given_D_leq_doverk(i)*D.cdf(d/i)*k.pdf(i) for i in k.v_l] )
# + sum([(ES_k_n_pareto(i, i*r, a, alpha) - ES_k_n_pareto(i, i, a, alpha) )*ED_given_D_leq_doverk(i)*D.cdf(d/i)*k.pdf(i) for i in k.v_l] )
def redsmall_ES2_wSl(k, r, D, Sl, d=None, red='coding'):
if d is None:
return D.moment(2)*sum([EXnk(Sl, i, i, m=2)*k.pdf(i) for i in k.v_l] )
ED2_given_D_leq_doverk = lambda k: moment(D, 2, given_X_leq_x=True, x=d/k)
return redsmall_ES2_wSl(k, r, D, Sl, d=None, red=red) \
+ sum([(EXnk(Sl, i*r, i, m=2) - EXnk(Sl, i, i, m=2) )*ED2_given_D_leq_doverk(i)*D.cdf(d/i)*k.pdf(i) for i in k.v_l] )
def redsmall_EC_wSl(k, r, D, Sl, d=None, red='coding'):
if d is None:
return k.mean()*D.mean()*Sl.mean()
ED_given_D_leq_doverk = lambda k: D.mean_given_leq_x(d/k)
return redsmall_EC_wSl(k, r, D, Sl, d=None, red=red) \
+ sum([(ECnk(Sl, i*r, i) - i*Sl.mean())*ED_given_D_leq_doverk(i)*D.cdf(d/i)*k.pdf(i) for i in k.v_l] )
def ar_for_ro0(ro0, N, Cap, k, r, D, Sl):
return ro0*N*Cap/k.mean()/D.mean()/Sl.mean()
def redsmall_ET_EW_Prqing_wMGc_wSl(ro0, N, Cap, k, r, D, Sl, d, red='coding'):
'''Using the result for M/M/c to approximate E[T] in M/G/c.
[https://en.wikipedia.org/wiki/M/G/k_queue]
'''
ar = ar_for_ro0(ro0, N, Cap, k, r, D, Sl)
ES = redsmall_ES_wSl(k, r, D, Sl, d, red)
ES2 = redsmall_ES2_wSl(k, r, D, Sl, d, red)
EC = redsmall_EC_wSl(k, r, D, Sl, d, red)
log(INFO, "d= {}".format(d), ES=ES, ES2=ES2, EC=EC)
EW, Prqing = MGc_EW_Prqing(ar, N*Cap*ES/EC, ES, ES2)
if EW < 0:
# log(ERROR, "!!!", EW=EW, Prqing=Prqing, ES=ES, ES2=ES2, EC=EC)
# return None, None, None
# return (ES + abs(EW))**2, None, None
return 10**6, None, None
ET = ES + EW
# log(INFO, "d= {}, ro= {}, ES= {}, EW= {}, ET= {}".format(d, ro, ES, EW, ET) )
# log(INFO, "d= {}, ro= {}".format(d, ro) )
# return round(ET, 2), round(EW, 2), round(Prqing, 2)
return ET, EW, Prqing
def redsmall_approx_ET_EW_Prqing_wMGc_wSl(ro0, N, Cap, k, r, D, Sl, d, red='coding'):
ar = ar_for_ro0(ro0, N, Cap, k, r, D, Sl)
ro = ro0
ES = redsmall_ES_wSl(k, r, D, Sl, d, red)
# ES2 = redsmall_ES2_wSl(k, r, D, Sl, d, red)
# EC = redsmall_EC_wSl(k, r, D, Sl, d, red)
log(INFO, "d= {}".format(d), ar=ar, ES=ES) # , ES2=ES2, EC=EC
EW = 1/ar * ro**2/(1 - ro)
ET = ES + EW
return ET, EW, ro
def plot_ET(N, Cap, k, r, D, Sl, red='coding'):
def plot_(ro0):
log(INFO, "ro0= {}".format(ro0) )
d_l, ET_l = [], []
for d in np.linspace(D.l_l, D.mean()*15, 7):
ET, EW, Prqing = redsmall_ET_EW_Prqing_wMGc_wSl(ro0, N, Cap, k, r, D, Sl, d, red='coding') # redsmall_ES_wSl(k, r, D, Sl, d, red)
log(INFO, "d= {}, ET= {}, EW= {}, Prqing= {}".format(d, ET, EW, Prqing) )
if ET > 150:
break
d_l.append(d)
ET_l.append(ET)
plot.plot(d_l, ET_l, label=r'$\rho_0= {}$'.format(ro0), c=next(darkcolor_c), marker=next(marker_c), ls=':', mew=0.1, ms=8)
plot_(ro0=0.8)
# plot_(ro0=0.9)
fontsize = 20
plot.legend(loc='best', framealpha=0.5, fontsize=14, numpoints=1)
plot.xlabel(r'$d$', fontsize=fontsize)
plot.ylabel(r'$E[T]$', fontsize=fontsize)
plot.title(r'$r= {}$, $k \sim {}$'.format(r, k.to_latex() ) + "\n" \
+ r'$D \sim {}$, $Sl \sim {}$'.format(D.to_latex(), Sl.to_latex() ), fontsize=fontsize)
fig = plot.gcf()
fig.set_size_inches(4, 4)
plot.savefig('plot_ET.png', bbox_inches='tight')
fig.clear()
log(INFO, "done.")
if __name__ == "__main__":
X = Dolly()
print("EX= {}".format(X.mean() ) )
def EXnk_(n, k):
EX_ = EXnk(X, n, k)
print("n= {}, k= {}, EXnk= {}".format(n, k, EX_) )
# EXnk_(n=10, k=10)
# EXnk_(n=10, k=8)
# EXnk_(n=10, k=5)
N, Cap = 20, 10
k = BZipf(1, 10)
r = 2
D = Pareto(10, 3)
Sl = Dolly()
plot_ET(N, Cap, k, r, D, Sl)
|
183267
|
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers, exceptions
from ..fields import UUIDField
from ..models import User_Group_Membership
class DeleteMembershipSerializer(serializers.Serializer):
membership_id = UUIDField(required=True)
def validate(self, attrs: dict) -> dict:
membership_id = attrs.get('membership_id')
try:
membership = User_Group_Membership.objects.get(pk=membership_id)
except User_Group_Membership.DoesNotExist:
msg = "NO_PERMISSION_OR_NOT_EXIST"
raise exceptions.ValidationError(msg)
if membership.user != self.context['request'].user:
# Its not his own membership right (leave group functionality) check if the user has the necessary access
# privileges for this group
if not User_Group_Membership.objects.filter(group_id=membership.group_id, user=self.context['request'].user,
group_admin=True, accepted=True).exists():
msg = "NO_PERMISSION_OR_NOT_EXIST"
raise exceptions.ValidationError(msg)
attrs['membership'] = membership
return attrs
|
183282
|
import time
from enum import Enum
import yaml
from yahoofinancials import YahooFinancials
from market_watcher.config import context
class STRATEGIES(Enum):
LONG_STRADDLE = "long straddle"
SHORT_STRADDLE = "short straddle"
def get_terget_stocks(file_path):
"""Reads target stocks for long/short straddle strategies."""
try:
with open(file_path) as f:
target_stocks = yaml.load(f, Loader=yaml.FullLoader)
return target_stocks
except Exception as e:
print(e)
def get_email_config():
"""Returns email notifier related configuration."""
email_config = {}
email_config["hostname"] = context.config["SMTP_HOSTNAME"]
email_config["port"] = context.config["SMTP_PORT"]
email_config["username"] = context.config["SMTP_USERNAME"]
email_config["password"] = context.config["SMTP_PASSWORD"]
email_config["sender"] = context.config["EMAIL_SENDER"]
email_config["recipients"] = context.config["EMAIL_RECIPIENTS"]
return email_config
def get_pnl_threashold_config():
"""Returns strategy alert configuration."""
pnl_threashold = {}
pnl_threashold["LONG_THRESHOLD"] = float(context.config["LONG_THRESHOLD"])
pnl_threashold["SHORT_THRESHOLD"] = float(context.config["SHORT_THRESHOLD"])
return pnl_threashold
def get_slack_config():
slack_config = {}
slack_config["long url"] = context.config["SLACK_LONG_WEBHOOK"]
slack_config["short url"] = context.config["SLACK_SHORT_WEBHOOK"]
return slack_config
class MarketWatcherEngine:
"""MarketWatcher core engine logic for scarping financial data."""
def __init__(self, target_stocks=None, notifiers=None):
self.target_stocks = target_stocks
self.notifiers = notifiers
pnl_threashold_config = get_pnl_threashold_config()
self.long_threshlold = pnl_threashold_config["LONG_THRESHOLD"]
self.short_threshlold = pnl_threashold_config["SHORT_THRESHOLD"]
self.daily_pnls = None
def search_for_intestment_opportunities(self):
# Update interval for sending email notifications
update_timeout = int(context.config["UPDATE_TIMEOUT"])
# Remaining time until email alert
remaining_seconds = update_timeout
while context.running:
if remaining_seconds > 0:
remaining_seconds -= 1
time.sleep(1)
else:
remaining_seconds = update_timeout
self.process_latest_market_movements()
def process_latest_market_movements(self):
"""Goes through each target stock and checks if there is a
potential invetment opportunity.
If the opportunity is found trader is notified about it though email
noticication stating which options trading strategy trader should
implement.
"""
self.daily_pnls = self.get_daily_pnls()
investment_opportunities = []
for ticker in self.target_stocks:
if self.is_investment_opportunity(
self.target_stocks[ticker]["strategy"], abs(self.daily_pnls[ticker])
):
investment_opportunities.append(ticker)
investment_data = self.get_investment_data(investment_opportunities)
self.notify(investment_data)
return investment_data
def notify(self, investment_data):
"""Sends investment updates to subscribed notifiers."""
if self.notifiers:
for notifier in self.notifiers:
notifier.notify(investment_data)
def get_daily_pnls(self):
"""Returns daily pnls"""
target_stocks = list(self.target_stocks.keys())
yahoo_financials_target_stocks = YahooFinancials(target_stocks)
return yahoo_financials_target_stocks.get_current_percent_change()
def get_investment_data(self, investment_opportunities):
"""Returns two dictionaries that contain investment data for both strategies"""
long_straddle = {}
short_straddle = {}
for ticker in investment_opportunities:
if STRATEGIES.LONG_STRADDLE.value == self.target_stocks[ticker]["strategy"]:
long_straddle[ticker] = self.daily_pnls[ticker]
elif (
STRATEGIES.SHORT_STRADDLE.value
== self.target_stocks[ticker]["strategy"]
):
short_straddle[ticker] = self.daily_pnls[ticker]
return {
STRATEGIES.LONG_STRADDLE.value: long_straddle,
STRATEGIES.SHORT_STRADDLE.value: short_straddle,
}
def is_investment_opportunity(self, strategy, abs_daily_pnl):
"""Check if the stock is applicable for one of the options trading strategies."""
if STRATEGIES.LONG_STRADDLE.value == strategy:
if abs_daily_pnl > self.long_threshlold:
return True
elif STRATEGIES.SHORT_STRADDLE.value == strategy:
if abs_daily_pnl < self.short_threshlold:
return True
return False
|
183315
|
from django.db import models
from project.models import Project
from utils.django_utils.base_model import BaseModel
# Create your models here.
class Function(BaseModel):
function_name = models.CharField(max_length=150, verbose_name='全局函数名称', help_text='全局函数名称')
function_desc = models.CharField(max_length=256, blank=True, default='', verbose_name='全局函数描述', help_text='全局函数描述')
function_body = models.TextField(verbose_name='全局函数内容', help_text='全局函数内容')
project = models.OneToOneField(to=Project, on_delete=models.CASCADE, verbose_name='所属项目ID', help_text='所属项目ID')
class Meta:
db_table = 'function_info'
verbose_name = '全局函数'
verbose_name_plural = verbose_name
def __str__(self):
return self.function_name
|
183398
|
import torch
import numpy as np
def calculate_accuracy(y_pred, y_true):
# Inspired from https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html.
_, predicted = torch.max(y_pred, 1)
acc = (predicted == y_true).sum().item() / len(y_pred)
return acc
|
183412
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from six.moves import xrange
from wtte import transforms as tr
def timeline_plot(padded, title='', cmap="jet", plot=True, fig=None, ax=None):
if fig is None or ax is None:
fig, ax = plt.subplots(ncols=2, sharey=True, figsize=(12, 4))
ax[0].imshow(padded, interpolation='none',
aspect='auto', cmap=cmap, origin='lower')
ax[0].set_ylabel('sequence')
ax[0].set_xlabel('sequence time')
ax[1].imshow(tr.right_pad_to_left_pad(padded),
interpolation='none',
aspect='auto',
cmap=cmap,
origin='lower')
ax[1].set_ylabel('sequence')
ax[1].set_xlabel('absolute time') # (Assuming sequences end today)
fig.suptitle(title, fontsize=14)
if plot:
fig.show()
return None, None
else:
return fig, ax
def timeline_aggregate_plot(padded, title='', cmap="jet", plot=True):
fig, ax = plt.subplots(ncols=2, nrows=2, sharex=True,
sharey=False, figsize=(12, 8))
fig, ax[0] = timeline_plot(
padded, title, cmap=cmap, plot=False, fig=fig, ax=ax[0])
ax[1, 0].plot(np.nanmean(padded, axis=0), lw=0.5,
c='black', drawstyle='steps-post')
ax[1, 0].set_title('mean/timestep')
padded = tr.right_pad_to_left_pad(padded)
ax[1, 1].plot(np.nanmean(padded, axis=0), lw=0.5,
c='black', drawstyle='steps-post')
ax[1, 1].set_title('mean/timestep')
fig.suptitle(title, fontsize=14)
if plot:
fig.show()
return None, None
else:
return fig, ax
|
183487
|
import xmltodict
import pytest
from jmeter_api.basics.jsr223 import ScriptLanguage
from jmeter_api.samplers.jsr223.elements import JSR223Sampler
from jmeter_api.basics.utils import tag_wrapper
class TestJSR223SamplerRender:
def test_scriptLanguage(self):
element = JSR223Sampler(script_language=ScriptLanguage.JAVA)
rendered_doc = element.to_xml()
parsed_doc = xmltodict.parse(tag_wrapper(rendered_doc,'test_result'))
for tag in parsed_doc['test_result']['JSR223Sampler']['stringProp']:
if tag['@name'] == 'scriptLanguage':
assert tag['#text'] == 'java'
def test_cacheKey(self):
element = JSR223Sampler(cache_key=False)
rendered_doc = element.to_xml()
parsed_doc = xmltodict.parse(tag_wrapper(rendered_doc,'test_result'))
for tag in parsed_doc['test_result']['JSR223Sampler']['stringProp']:
if tag['@name'] == 'cacheKey':
assert tag['#text'] == 'false'
def test_fileName(self):
element = JSR223Sampler(filename="./jmeter_api/basics/jsr223_test.groovy")
rendered_doc = element.to_xml()
parsed_doc = xmltodict.parse(tag_wrapper(rendered_doc,'test_result'))
for tag in parsed_doc['test_result']['JSR223Sampler']['stringProp']:
if tag['@name'] == 'filename':
assert tag['#text'] == "./jmeter_api/basics/jsr223_test.groovy"
def test_script(self):
sc = """var a=2
vars.put("some value",a)
log("value added")"""
element = JSR223Sampler(script=sc)
rendered_doc = element.to_xml()
parsed_doc = xmltodict.parse(tag_wrapper(rendered_doc,'test_result'))
for tag in parsed_doc['test_result']['JSR223Sampler']['stringProp']:
if tag['@name'] == 'script':
assert tag['#text'] == sc
def test_hashtree_contain(self):
element = JSR223Sampler()
rendered_doc = element.to_xml()
assert '<hashTree />' in rendered_doc
|
183495
|
from flake8_plugin_utils import assert_error, assert_not_error
from flake8_fastapi.errors import RouteDecoratorError
from flake8_fastapi.visitors import RouteDecorator
def test_code_with_error(code: str):
assert_error(RouteDecorator, code, RouteDecoratorError)
def test_code_without_error(code: str):
assert_not_error(RouteDecorator, code)
|
183523
|
from dataclasses import dataclass
from enum import Enum, auto
from magma.is_definition import isdefinition
from magma.logging import root_logger
from magma.passes import DefinitionPass
_logger = root_logger()
class MultipleDefinitionException(Exception):
pass
class UniquificationMode(Enum):
WARN = auto()
ERROR = auto()
UNIQUIFY = auto()
@dataclass(frozen=True)
class _HashStruct:
defn_repr: str
is_verilog: bool
verilog_str: bool
inline_verilog: tuple
def _make_hash_struct(definition):
repr_ = repr(definition)
inline_verilog = tuple()
for inline_str, connect_references in definition.inline_verilog_strs:
connect_references = tuple(connect_references.items())
inline_verilog += (inline_str, connect_references)
if hasattr(definition, "verilogFile") and definition.verilogFile:
return _HashStruct(repr_, True, definition.verilogFile, inline_verilog)
return _HashStruct(repr_, False, "", inline_verilog)
def _hash(definition):
hash_struct = _make_hash_struct(definition)
return hash(hash_struct)
class UniquificationPass(DefinitionPass):
def __init__(self, main, mode):
super().__init__(main)
self.mode = mode
self.seen = {}
self.original_names = {}
def _rename(self, ckt, new_name):
assert ckt not in self.original_names
self.original_names[ckt] = ckt.name
type(ckt).rename(ckt, new_name)
def __call__(self, definition):
for module in definition.bind_modules:
self._run(module)
name = definition.name
key = _hash(definition)
seen = self.seen.setdefault(name, {})
if key not in seen:
if self.mode is UniquificationMode.UNIQUIFY and len(seen) > 0:
suffix = "_unq" + str(len(seen))
new_name = name + suffix
self._rename(definition, new_name)
seen[key] = [definition]
else:
if self.mode is not UniquificationMode.UNIQUIFY:
assert seen[key][0].name == name
elif name != seen[key][0].name:
new_name = seen[key][0].name
self._rename(definition, new_name)
seen[key].append(definition)
def run(self):
super().run()
duplicated = []
for name, definitions in self.seen.items():
if len(definitions) > 1:
duplicated.append((name, definitions))
UniquificationPass.handle(duplicated, self.mode)
@staticmethod
def handle(duplicated, mode):
if len(duplicated):
msg = f"Multiple definitions: {[name for name, _ in duplicated]}"
if mode is UniquificationMode.ERROR:
error(msg)
raise MultipleDefinitionException([name for name, _ in duplicated])
elif mode is UniquificationMode.WARN:
warning(msg)
def _get_mode(mode_or_str):
if isinstance(mode_or_str, str):
try:
return UniquificationMode[mode_or_str]
except KeyError as e:
modes = [k for k in UniquificationMode.__members__]
raise ValueError(f"Valid uniq. modes are {modes}")
if isinstance(mode_or_str, UniquificationMode):
return mode_or_str
raise NotImplementedError(f"Unsupported type: {type(mode_or_str)}")
def reset_names(original_names):
for ckt, original_name in original_names.items():
type(ckt).rename(ckt, original_name)
# This pass runs uniquification according to @mode and returns a dictionary
# mapping any renamed circuits to their original names. If @mode is ERROR or
# WARN the returned dictionary should be empty.
def uniquification_pass(circuit, mode_or_str):
mode = _get_mode(mode_or_str)
pass_ = UniquificationPass(circuit, mode)
pass_.run()
return pass_.original_names
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.