code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from django.shortcuts import render, redirect, reverse
from django.contrib import messages
from django.shortcuts import get_object_or_404
from django.core.paginator import Paginator
from hknweb.utils import markdownify
from hknweb.utils import allow_public_access
from hknweb.events.constants import (
ACCESSLEVEL_TO_DESCRIPTION,
ATTR,
RSVPS_PER_PAGE,
)
from hknweb.events.models import Event, Rsvp, AttendanceForm
from hknweb.events.utils import format_url
from hknweb.utils import get_access_level
@allow_public_access
def show_details(request, id):
return show_details_helper(request, id, reverse("events:index"), True)
def show_details_helper(request, id, back_link: str, can_edit: bool):
event = get_object_or_404(Event, pk=id)
if event.access_level < get_access_level(request.user):
messages.warning(request, "Insufficent permission to access event.")
return redirect(back_link)
context = {
"event": event,
"event_description": markdownify(event.description),
"event_location": format_url(event.location),
"user_access_level": ACCESSLEVEL_TO_DESCRIPTION[get_access_level(request.user)],
"event_access_level": ACCESSLEVEL_TO_DESCRIPTION[event.access_level],
"back_link": back_link,
"can_edit": can_edit and request.user.has_perm("events.change_event"),
}
if not request.user.is_authenticated:
return render(request, "events/show_details.html", context)
rsvps = Rsvp.objects.filter(event=event)
waitlisted = False
waitlist_position = 0
rsvp = None
user_rsvps = rsvps.filter(user=request.user)
if user_rsvps.exists():
# Gets the rsvp object for the user
rsvp = user_rsvps.first()
# Check if waitlisted
if event.rsvp_limit:
rsvps_before = rsvps.filter(created_at__lt=rsvp.created_at).count()
waitlisted = rsvps_before >= event.rsvp_limit
# Get waitlist position
if waitlisted:
position = rsvps.filter(created_at__lt=rsvp.created_at).count()
waitlist_position = position - event.rsvp_limit + 1
# Render only non-waitlisted rsvps
rsvps = event.admitted_set()
waitlists = event.waitlist_set()
limit = event.rsvp_limit
rsvps_page = Paginator(rsvps, RSVPS_PER_PAGE).get_page(
request.GET.get("rsvps_page")
)
waitlists_page = Paginator(waitlists, RSVPS_PER_PAGE).get_page(
request.GET.get("waitlists_page")
)
data = [
{
ATTR.TITLE: "RSVPs",
ATTR.DATA: rsvps_page if len(rsvps_page) > 0 else None,
ATTR.PAGE_PARAM: "rsvps_page",
ATTR.COUNT: str(rsvps.count()) + " / {limit}".format(limit=limit),
},
]
if limit:
data.append(
{
ATTR.TITLE: "Waitlist",
ATTR.DATA: waitlists_page if len(waitlists_page) > 0 else None,
ATTR.PAGE_PARAM: "waitlists_page",
ATTR.COUNT: str(waitlists.count()),
}
)
context = {
**context,
ATTR.DATA: data,
"rsvp": rsvp,
"attendance_form": AttendanceForm.objects.filter(event=event).first(),
"waitlisted": waitlisted,
"waitlist_position": waitlist_position,
}
return render(request, "events/show_details.html", context)
| [
"django.shortcuts.render",
"hknweb.utils.get_access_level",
"hknweb.utils.markdownify",
"hknweb.events.models.AttendanceForm.objects.filter",
"hknweb.events.models.Rsvp.objects.filter",
"django.contrib.messages.warning",
"django.shortcuts.get_object_or_404",
"django.shortcuts.redirect",
"django.shor... | [((727, 758), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Event'], {'pk': 'id'}), '(Event, pk=id)\n', (744, 758), False, 'from django.shortcuts import get_object_or_404\n'), ((1495, 1527), 'hknweb.events.models.Rsvp.objects.filter', 'Rsvp.objects.filter', ([], {'event': 'event'}), '(event=event)\n', (1514, 1527), False, 'from hknweb.events.models import Event, Rsvp, AttendanceForm\n'), ((3306, 3358), 'django.shortcuts.render', 'render', (['request', '"""events/show_details.html"""', 'context'], {}), "(request, 'events/show_details.html', context)\n", (3312, 3358), False, 'from django.shortcuts import render, redirect, reverse\n'), ((612, 635), 'django.shortcuts.reverse', 'reverse', (['"""events:index"""'], {}), "('events:index')\n", (619, 635), False, 'from django.shortcuts import render, redirect, reverse\n'), ((787, 817), 'hknweb.utils.get_access_level', 'get_access_level', (['request.user'], {}), '(request.user)\n', (803, 817), False, 'from hknweb.utils import get_access_level\n'), ((827, 895), 'django.contrib.messages.warning', 'messages.warning', (['request', '"""Insufficent permission to access event."""'], {}), "(request, 'Insufficent permission to access event.')\n", (843, 895), False, 'from django.contrib import messages\n'), ((911, 930), 'django.shortcuts.redirect', 'redirect', (['back_link'], {}), '(back_link)\n', (919, 930), False, 'from django.shortcuts import render, redirect, reverse\n'), ((1001, 1031), 'hknweb.utils.markdownify', 'markdownify', (['event.description'], {}), '(event.description)\n', (1012, 1031), False, 'from hknweb.utils import markdownify\n'), ((1059, 1085), 'hknweb.events.utils.format_url', 'format_url', (['event.location'], {}), '(event.location)\n', (1069, 1085), False, 'from hknweb.events.utils import format_url\n'), ((1429, 1481), 'django.shortcuts.render', 'render', (['request', '"""events/show_details.html"""', 'context'], {}), "(request, 'events/show_details.html', context)\n", (1435, 1481), False, 'from django.shortcuts import render, redirect, reverse\n'), ((1143, 1173), 'hknweb.utils.get_access_level', 'get_access_level', (['request.user'], {}), '(request.user)\n', (1159, 1173), False, 'from hknweb.utils import get_access_level\n'), ((2282, 2314), 'django.core.paginator.Paginator', 'Paginator', (['rsvps', 'RSVPS_PER_PAGE'], {}), '(rsvps, RSVPS_PER_PAGE)\n', (2291, 2314), False, 'from django.core.paginator import Paginator\n'), ((2390, 2426), 'django.core.paginator.Paginator', 'Paginator', (['waitlists', 'RSVPS_PER_PAGE'], {}), '(waitlists, RSVPS_PER_PAGE)\n', (2399, 2426), False, 'from django.core.paginator import Paginator\n'), ((3155, 3197), 'hknweb.events.models.AttendanceForm.objects.filter', 'AttendanceForm.objects.filter', ([], {'event': 'event'}), '(event=event)\n', (3184, 3197), False, 'from hknweb.events.models import Event, Rsvp, AttendanceForm\n')] |
import sys
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # NOQA
import seaborn # NOQA
from spherecluster import sample_vMF
plt.ion()
n_clusters = 3
mus = np.random.randn(3, n_clusters)
mus, r = np.linalg.qr(mus, mode='reduced')
kappas = [15, 15, 15]
num_points_per_class = 250
Xs = []
for nn in range(n_clusters):
new_X = sample_vMF(mus[nn], kappas[nn], num_points_per_class)
Xs.append(new_X.T)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(
1, 1, 1, aspect='equal', projection='3d',
adjustable='box-forced', xlim=[-1.1, 1.1], ylim=[-1.1, 1.1],
zlim=[-1.1, 1.1]
)
colors = ['b', 'r', 'g']
for nn in range(n_clusters):
ax.scatter(Xs[nn][0, :], Xs[nn][1, :], Xs[nn][2, :], c=colors[nn])
ax.set_aspect('equal')
plt.axis('off')
plt.show()
def r_input(val=None):
val = val or ''
if sys.version_info[0] >= 3:
return eval(input(val))
return raw_input(val)
r_input()
| [
"numpy.linalg.qr",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.axis",
"numpy.random.randn",
"spherecluster.sample_vMF",
"matplotlib.pyplot.show"
] | [((177, 186), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (184, 186), True, 'from matplotlib import pyplot as plt\n'), ((209, 239), 'numpy.random.randn', 'np.random.randn', (['(3)', 'n_clusters'], {}), '(3, n_clusters)\n', (224, 239), True, 'import numpy as np\n'), ((249, 282), 'numpy.linalg.qr', 'np.linalg.qr', (['mus'], {'mode': '"""reduced"""'}), "(mus, mode='reduced')\n", (261, 282), True, 'import numpy as np\n'), ((468, 494), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (478, 494), True, 'from matplotlib import pyplot as plt\n'), ((801, 816), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (809, 816), True, 'from matplotlib import pyplot as plt\n'), ((817, 827), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (825, 827), True, 'from matplotlib import pyplot as plt\n'), ((383, 436), 'spherecluster.sample_vMF', 'sample_vMF', (['mus[nn]', 'kappas[nn]', 'num_points_per_class'], {}), '(mus[nn], kappas[nn], num_points_per_class)\n', (393, 436), False, 'from spherecluster import sample_vMF\n')] |
from ariadne import MutationType
from datetime import datetime as dt
from models.scope import Scope
from schemas.helpers.normalize import change_keys
from schemas.scope import ScopeCreate
mutations_resolvers = MutationType()
@mutations_resolvers.field("scopeCreate")
async def resolve_scope_create(_, info, scope) -> dict:
store_data = Scope.get_instance()
data = ScopeCreate(**scope, key=f'{scope["collection"]}{scope["action"]}')
normalize = change_keys(data.dict(exclude_none=True), key="_key")
return await store_data.create(normalize)
| [
"schemas.scope.ScopeCreate",
"models.scope.Scope.get_instance",
"ariadne.MutationType"
] | [((212, 226), 'ariadne.MutationType', 'MutationType', ([], {}), '()\n', (224, 226), False, 'from ariadne import MutationType\n'), ((344, 364), 'models.scope.Scope.get_instance', 'Scope.get_instance', ([], {}), '()\n', (362, 364), False, 'from models.scope import Scope\n'), ((376, 443), 'schemas.scope.ScopeCreate', 'ScopeCreate', ([], {'key': 'f"""{scope[\'collection\']}{scope[\'action\']}"""'}), '(**scope, key=f"{scope[\'collection\']}{scope[\'action\']}")\n', (387, 443), False, 'from schemas.scope import ScopeCreate\n')] |
from typing import Optional
import napari
import napari.layers
import numpy as np
from napari.utils.geometry import project_point_onto_plane
def point_in_bounding_box(point: np.ndarray, bounding_box: np.ndarray) -> bool:
"""Determine whether an nD point is inside an nD bounding box.
Parameters
----------
point : np.ndarray
(n,) array containing nD point coordinates to check.
bounding_box : np.ndarray
(2, n) array containing the min and max of the nD bounding box.
As returned by `Layer._extent_data`.
"""
if np.all(point > bounding_box[0]) and np.all(point < bounding_box[1]):
return True
return False
def drag_data_to_projected_distance(
start_position, end_position, view_direction, vector
):
"""Calculate the projected distance between two mouse events.
Project the drag vector between two mouse events onto a 3D vector
specified in data coordinates.
The general strategy is to
1) find mouse drag start and end positions, project them onto a
pseudo-canvas (a plane aligned with the canvas) in data coordinates.
2) project the mouse drag vector onto the (normalised) vector in data
coordinates
Parameters
----------
start_position : np.ndarray
Starting point of the drag vector in data coordinates
end_position : np.ndarray
End point of the drag vector in data coordinates
view_direction : np.ndarray
Vector defining the plane normal of the plane onto which the drag
vector is projected.
vector : np.ndarray
(3,) unit vector or (n, 3) array thereof on which to project the drag
vector from start_event to end_event. This argument is defined in data
coordinates.
Returns
-------
projected_distance : (1, ) or (n, ) np.ndarray of float
"""
# enforce at least 2d input
vector = np.atleast_2d(vector)
# Store the start and end positions in world coordinates
start_position = np.array(start_position)
end_position = np.array(end_position)
# Project the start and end positions onto a pseudo-canvas, a plane
# parallel to the rendered canvas in data coordinates.
start_position_canvas = start_position
end_position_canvas = project_point_onto_plane(
end_position, start_position_canvas, view_direction
)
# Calculate the drag vector on the pseudo-canvas.
drag_vector_canvas = np.squeeze(
end_position_canvas - start_position_canvas
)
# Project the drag vector onto the specified vector(s), return the distance
return np.einsum('j, ij -> i', drag_vector_canvas, vector).squeeze()
def point_in_layer_bounding_box(point, layer):
bbox = layer._display_bounding_box(layer._dims_displayed).T
if np.any(point < bbox[0]) or np.any(point > bbox[1]):
return False
else:
return True
def rotation_matrices_to_align_vectors(a: np.ndarray, b: np.ndarray):
"""
Find rotation matrices r such that r @ a = b
Implementation designed to avoid trig calls, a and b must be normalised.
based on https://iquilezles.org/www/articles/noacos/noacos.htm
Parameters
----------
a : np.ndarray
(1 or n, 3) normalised vector(s) of length 3.
b : np.ndarray
(1 or n, 3) normalised vector(s) of length 3.
Returns
-------
r : np.ndarray
(3, 3) rotation matrix or (n, 3, 3) array thereof.
"""
# setup
a = a.reshape(-1, 3)
b = b.reshape(-1, 3)
n_vectors = a.shape[0]
# cross product to find axis about which rotation should occur
axis = np.cross(a, b, axis=1)
# dot product equals cosine of angle between normalised vectors
cos_angle = np.einsum('ij, ij -> i', a, b)
# k is a constant which appears as a factor in the rotation matrix
k = 1 / (1 + cos_angle)
# construct rotation matrix
r = np.empty((n_vectors, 3, 3))
r[:, 0, 0] = (axis[:, 0] * axis[:, 0] * k) + cos_angle
r[:, 0, 1] = (axis[:, 1] * axis[:, 0] * k) - axis[:, 2]
r[:, 0, 2] = (axis[:, 2] * axis[:, 0] * k) + axis[:, 1]
r[:, 1, 0] = (axis[:, 0] * axis[:, 1] * k) + axis[:, 2]
r[:, 1, 1] = (axis[:, 1] * axis[:, 1] * k) + cos_angle
r[:, 1, 2] = (axis[:, 2] * axis[:, 1] * k) - axis[:, 0]
r[:, 2, 0] = (axis[:, 0] * axis[:, 2] * k) - axis[:, 1]
r[:, 2, 1] = (axis[:, 1] * axis[:, 2] * k) + axis[:, 0]
r[:, 2, 2] = (axis[:, 2] * axis[:, 2] * k) + cos_angle
return r.squeeze()
def rotation_matrix_from_z_vector(z_vector: np.ndarray):
return rotation_matrices_to_align_vectors(np.array([0, 0, 1]), z_vector)
def theta2rotz(theta: np.ndarray) -> np.ndarray:
"""
Rz = [[c(t), -s(t), 0],
[s(t), c(t), 0],
[ 0, 0, 1]]
"""
theta = np.deg2rad(np.asarray(theta).reshape(-1))
rotation_matrices = np.zeros((theta.shape[0], 3, 3), dtype=float)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
rotation_matrices[:, 2, 2] = 1
rotation_matrices[:, (0, 1), (0, 1)] = cos_theta[:, np.newaxis]
rotation_matrices[:, 0, 1] = -sin_theta
rotation_matrices[:, 1, 0] = sin_theta
return rotation_matrices.squeeze() | [
"numpy.atleast_2d",
"numpy.cross",
"numpy.asarray",
"numpy.any",
"numpy.squeeze",
"numpy.array",
"numpy.zeros",
"numpy.einsum",
"numpy.empty",
"numpy.cos",
"numpy.sin",
"numpy.all",
"napari.utils.geometry.project_point_onto_plane"
] | [((1901, 1922), 'numpy.atleast_2d', 'np.atleast_2d', (['vector'], {}), '(vector)\n', (1914, 1922), True, 'import numpy as np\n'), ((2006, 2030), 'numpy.array', 'np.array', (['start_position'], {}), '(start_position)\n', (2014, 2030), True, 'import numpy as np\n'), ((2050, 2072), 'numpy.array', 'np.array', (['end_position'], {}), '(end_position)\n', (2058, 2072), True, 'import numpy as np\n'), ((2274, 2351), 'napari.utils.geometry.project_point_onto_plane', 'project_point_onto_plane', (['end_position', 'start_position_canvas', 'view_direction'], {}), '(end_position, start_position_canvas, view_direction)\n', (2298, 2351), False, 'from napari.utils.geometry import project_point_onto_plane\n'), ((2445, 2500), 'numpy.squeeze', 'np.squeeze', (['(end_position_canvas - start_position_canvas)'], {}), '(end_position_canvas - start_position_canvas)\n', (2455, 2500), True, 'import numpy as np\n'), ((3622, 3644), 'numpy.cross', 'np.cross', (['a', 'b'], {'axis': '(1)'}), '(a, b, axis=1)\n', (3630, 3644), True, 'import numpy as np\n'), ((3729, 3759), 'numpy.einsum', 'np.einsum', (['"""ij, ij -> i"""', 'a', 'b'], {}), "('ij, ij -> i', a, b)\n", (3738, 3759), True, 'import numpy as np\n'), ((3900, 3927), 'numpy.empty', 'np.empty', (['(n_vectors, 3, 3)'], {}), '((n_vectors, 3, 3))\n', (3908, 3927), True, 'import numpy as np\n'), ((4854, 4899), 'numpy.zeros', 'np.zeros', (['(theta.shape[0], 3, 3)'], {'dtype': 'float'}), '((theta.shape[0], 3, 3), dtype=float)\n', (4862, 4899), True, 'import numpy as np\n'), ((4916, 4929), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4922, 4929), True, 'import numpy as np\n'), ((4946, 4959), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4952, 4959), True, 'import numpy as np\n'), ((568, 599), 'numpy.all', 'np.all', (['(point > bounding_box[0])'], {}), '(point > bounding_box[0])\n', (574, 599), True, 'import numpy as np\n'), ((604, 635), 'numpy.all', 'np.all', (['(point < bounding_box[1])'], {}), '(point < bounding_box[1])\n', (610, 635), True, 'import numpy as np\n'), ((2789, 2812), 'numpy.any', 'np.any', (['(point < bbox[0])'], {}), '(point < bbox[0])\n', (2795, 2812), True, 'import numpy as np\n'), ((2816, 2839), 'numpy.any', 'np.any', (['(point > bbox[1])'], {}), '(point > bbox[1])\n', (2822, 2839), True, 'import numpy as np\n'), ((4594, 4613), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (4602, 4613), True, 'import numpy as np\n'), ((2607, 2658), 'numpy.einsum', 'np.einsum', (['"""j, ij -> i"""', 'drag_vector_canvas', 'vector'], {}), "('j, ij -> i', drag_vector_canvas, vector)\n", (2616, 2658), True, 'import numpy as np\n'), ((4799, 4816), 'numpy.asarray', 'np.asarray', (['theta'], {}), '(theta)\n', (4809, 4816), True, 'import numpy as np\n')] |
# 分析黑魔法防御课界面
import cv2
import sys
sys.path.append(r"C:\\Users\\SAT") # 添加自定义包的路径
from UniversalAutomaticAnswer.conf.confImp import get_yaml_file
from UniversalAutomaticAnswer.screen.screenImp import ScreenImp # 加入自定义包
from UniversalAutomaticAnswer.ocr.ocrImp import OCRImp
from UniversalAutomaticAnswer.util.filter import filterQuestion, filterLine, filterPersonState
from paddleocr import PaddleOCR
# 获取配置文件
conf_path = 'conf/conf.yml'
conf_data = get_yaml_file(conf_path)
# 初始化ocr模型
ocr = OCRImp(conf_data)
# 初始化屏幕操作模块
screen = ScreenImp(conf_data)
# left click
import win32api
import win32con
def left_click(x,y,times=4):
win32api.SetCursorPos((x,y))
import time
while times:
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)
times -= 1
walk_coordinate = [[330,640],[1260,630],[740,550]] # 左 右 中
card_coordinate = [[522,820],[695,798],[838,821],[987,818],[1185,830]] # ~ 1 2 3 4
# charms_coordinate = [[200,770,300,855],[630,700,676,777],[765,690,818,778],[910,700,960,775],[1060,700,1108,786],[556, 878,637, 922]] # states: steps 1 2 3 4 HP
# copy_coordinate = [[540,400,650,500],[980,345,1090,445],[1160,320,1260,420]]
win_rect, img= screen.get_screenshot()
# img_path = './img/harry_charmsclass.png'
# img = cv2.imread(img_path)
# img_steps = img[770:855,200:300]
# img1 = img[700:800,600:700]
# img2 = img[690:778,765:818] # 点击 850 716
# img3 = img[700:775,910:960]
# img4 = img[700:786,1060:1108]
# img5 = img[878:932,556:637] # 蓝条
# walk_coordinate = [[850,716],[846,712],[854,720]]
# card_coordinate = [[522,820],[695,798],[838,821],[987,818],[1122,830]] # ~ 1 2 3 4
import matplotlib.pyplot as plt
# result = ocr.ocr(img, det=True, cls=True)
# print(result)
# plt.imshow(img)
# plt.show()
# """
def is_start(img, str_start):
img_start = screen.get_startMatchBtn(img)
result_start = ocr.ocr(img_start)
content_start = ocr.ocr_content(result_start)
content_start = filterLine(content_start)
if len(content_start)>0 and content_start[0] == str_start:
time.sleep(5)
x, y = 1300, 840
left_click(win_rect[0]+x,win_rect[1]+y,2)
return True
return False
count_steps = 0
epoch_num = 3
while True:
if epoch_num == 0:
break
import time
time.sleep(2)
win_rect, img= screen.get_screenshot()
# img_path = './img/harry_darkclass3.png' #
# img = cv2.imread(img_path)
# print(img.shape)
# img = img[875:920,1185:1300] # [1185, 875, 1300, 920] 点击继续
# img = img[830:880, 1234:1414] # [1234,830,1414,880] 匹配上课
# 识别匹配上课
flag1 = is_start(img, '匹配上课')
flag2 = is_start(img, '学院活动匹配')
if flag1 or flag2: # 识别到了就跳过,重新截图
epoch_num -= 1
continue
# 识别继续按钮
img_continue = img[875:920,1185:1300]
result_continue = ocr.ocr(img_continue)
content_continue = ocr.ocr_content(result_continue)
content_continue = filterLine(content_continue)
if len(content_continue)>0 and content_continue[0] == '点击继续':
x, y = 1200, 890
left_click(win_rect[0]+x,win_rect[1]+y,2)
time.sleep(1)
continue
img_steps, img_1, img_2, img_3, img_4, img_5 = '-1', '15', '15', '15', '15', '11'
img_steps = img[800:850, 200:265]
img_1 = img[710:777, 615:665] # 1
img_2 = img[710:777, 770:820] # 2
img_3 = img[710:777, 920:970] # 3
img_4 = img[720:787, 1060:1110] # 4
img_nextcard = img[768:816, 1205:1246,::-1] # 下一张卡
img_5 = img[878:932,556:637] # 蓝条
result_steps = ocr.ocr(img_steps)
result_1 = ocr.ocr(img_1)
result_2 = ocr.ocr(img_2)
result_3 = ocr.ocr(img_3)
result_4 = ocr.ocr(img_4)
result_nextcard = ocr.ocr(img_nextcard)
result_5 = ocr.ocr(img_5)
result_steps = ocr.ocr_content(result_steps)
result_steps = filterLine(result_steps)
result_1 = ocr.ocr_content(result_1)
result_1 = filterLine(result_1)
result_2 = ocr.ocr_content(result_2)
result_2 = filterLine(result_2)
result_3 = ocr.ocr_content(result_3)
result_3 = filterLine(result_3)
result_4 = ocr.ocr_content(result_4)
result_4 = filterLine(result_4)
result_5 = ocr.ocr_content(result_5)
result_5 = filterLine(result_5)
if (result_steps!=None) and len(result_steps) > 0 and result_steps[0].isdigit():
result_steps = int(result_steps[0][0][0])
else:
result_steps = 0
if (result_1!=None) and len(result_1) > 0 and result_1[0].isdigit():
result_1 = int(result_1[0][0][0])
else:
result_1 = 15
if (result_2!=None) and len(result_2) > 0 and result_2[0].isdigit():
result_2 = int(result_2[0][0][0])
else:
result_2 = 15
if (result_3!=None) and len(result_3) > 0 and result_3[0].isdigit():
result_3 = int(result_3[0][0][0])
else:
result_3 = 15
if (result_4!=None) and len(result_4) > 0 and result_4[0].isdigit():
result_4 = int(result_4[0][0][0])
else:
result_4 = 15
if (result_5!=None) and len(result_5) > 0 and result_5[0].isdigit():
result_5 = int(result_5[0][0][0])
else:
result_5 = -1
fee = [result_1,result_2,result_3,result_4]
idx = fee.index(min(fee))
import random
# idx = random.randint(0, 3)
# if fee[idx]>7:
# continue
walk_idx = random.randint(0, 2)
x_walk, y_walk = walk_coordinate[walk_idx][0], walk_coordinate[walk_idx][1]
x_0, y_0 = card_coordinate[0][0], card_coordinate[0][1] # 伙伴卡
x, y = card_coordinate[idx+1][0], card_coordinate[idx+1][1]
if result_5 == -1 or result_5 > 5:
if count_steps % 3 == 0:
left_click(win_rect[0]+x_walk,win_rect[1]+y_walk,4) # 走一步
left_click(win_rect[0]+x_0,win_rect[1]+y_0,4) # 点击伙伴卡
count_steps += 1
left_click(win_rect[0]+x,win_rect[1]+y,4) # 点击目标卡
print('所剩步数:',result_steps)
print('卡1费用:',result_1)
print('卡2费用:',result_2)
print('卡3费用:',result_3)
print('卡4费用:',result_4)
print('剩余费用:',result_5)
print('点击位置:', x, y)
# """
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# import matplotlib.pyplot as plt
# plt.imshow(img)
# plt.show()
# cv2.imwrite('./img/harry_charmsclass.png',img) | [
"UniversalAutomaticAnswer.ocr.ocrImp.OCRImp",
"UniversalAutomaticAnswer.conf.confImp.get_yaml_file",
"win32api.SetCursorPos",
"time.sleep",
"win32api.mouse_event",
"UniversalAutomaticAnswer.screen.screenImp.ScreenImp",
"UniversalAutomaticAnswer.util.filter.filterLine",
"sys.path.append",
"random.ran... | [((36, 73), 'sys.path.append', 'sys.path.append', (['"""C:\\\\\\\\Users\\\\\\\\SAT"""'], {}), "('C:\\\\\\\\Users\\\\\\\\SAT')\n", (51, 73), False, 'import sys\n'), ((452, 476), 'UniversalAutomaticAnswer.conf.confImp.get_yaml_file', 'get_yaml_file', (['conf_path'], {}), '(conf_path)\n', (465, 476), False, 'from UniversalAutomaticAnswer.conf.confImp import get_yaml_file\n'), ((499, 516), 'UniversalAutomaticAnswer.ocr.ocrImp.OCRImp', 'OCRImp', (['conf_data'], {}), '(conf_data)\n', (505, 516), False, 'from UniversalAutomaticAnswer.ocr.ocrImp import OCRImp\n'), ((539, 559), 'UniversalAutomaticAnswer.screen.screenImp.ScreenImp', 'ScreenImp', (['conf_data'], {}), '(conf_data)\n', (548, 559), False, 'from UniversalAutomaticAnswer.screen.screenImp import ScreenImp\n'), ((639, 668), 'win32api.SetCursorPos', 'win32api.SetCursorPos', (['(x, y)'], {}), '((x, y))\n', (660, 668), False, 'import win32api\n'), ((2008, 2033), 'UniversalAutomaticAnswer.util.filter.filterLine', 'filterLine', (['content_start'], {}), '(content_start)\n', (2018, 2033), False, 'from UniversalAutomaticAnswer.util.filter import filterQuestion, filterLine, filterPersonState\n'), ((2332, 2345), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2342, 2345), False, 'import time\n'), ((2969, 2997), 'UniversalAutomaticAnswer.util.filter.filterLine', 'filterLine', (['content_continue'], {}), '(content_continue)\n', (2979, 2997), False, 'from UniversalAutomaticAnswer.util.filter import filterQuestion, filterLine, filterPersonState\n'), ((3854, 3878), 'UniversalAutomaticAnswer.util.filter.filterLine', 'filterLine', (['result_steps'], {}), '(result_steps)\n', (3864, 3878), False, 'from UniversalAutomaticAnswer.util.filter import filterQuestion, filterLine, filterPersonState\n'), ((3935, 3955), 'UniversalAutomaticAnswer.util.filter.filterLine', 'filterLine', (['result_1'], {}), '(result_1)\n', (3945, 3955), False, 'from UniversalAutomaticAnswer.util.filter import filterQuestion, filterLine, filterPersonState\n'), ((4012, 4032), 'UniversalAutomaticAnswer.util.filter.filterLine', 'filterLine', (['result_2'], {}), '(result_2)\n', (4022, 4032), False, 'from UniversalAutomaticAnswer.util.filter import filterQuestion, filterLine, filterPersonState\n'), ((4089, 4109), 'UniversalAutomaticAnswer.util.filter.filterLine', 'filterLine', (['result_3'], {}), '(result_3)\n', (4099, 4109), False, 'from UniversalAutomaticAnswer.util.filter import filterQuestion, filterLine, filterPersonState\n'), ((4166, 4186), 'UniversalAutomaticAnswer.util.filter.filterLine', 'filterLine', (['result_4'], {}), '(result_4)\n', (4176, 4186), False, 'from UniversalAutomaticAnswer.util.filter import filterQuestion, filterLine, filterPersonState\n'), ((4243, 4263), 'UniversalAutomaticAnswer.util.filter.filterLine', 'filterLine', (['result_5'], {}), '(result_5)\n', (4253, 4263), False, 'from UniversalAutomaticAnswer.util.filter import filterQuestion, filterLine, filterPersonState\n'), ((5353, 5373), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (5367, 5373), False, 'import random\n'), ((709, 772), 'win32api.mouse_event', 'win32api.mouse_event', (['win32con.MOUSEEVENTF_LEFTDOWN', 'x', 'y', '(0)', '(0)'], {}), '(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)\n', (729, 772), False, 'import win32api\n'), ((777, 838), 'win32api.mouse_event', 'win32api.mouse_event', (['win32con.MOUSEEVENTF_LEFTUP', 'x', 'y', '(0)', '(0)'], {}), '(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)\n', (797, 838), False, 'import win32api\n'), ((2105, 2118), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2115, 2118), False, 'import time\n'), ((3147, 3160), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3157, 3160), False, 'import time\n')] |
import datetime
import uuid
from typing import Optional
from models.base import CustomBaseModel
class ConvertVideoIn(CustomBaseModel):
source_path: str
destination_path: str
resolution: str
codec_name: Optional[str] = None
display_aspect_ratio: Optional[str] = None
fps: Optional[int] = None
class ConvertVideoCreate(ConvertVideoIn):
id: uuid.UUID = uuid.uuid4()
created_at: datetime.datetime = datetime.datetime.now()
class ConvertVideoOut(CustomBaseModel):
result: bool
| [
"datetime.datetime.now",
"uuid.uuid4"
] | [((383, 395), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (393, 395), False, 'import uuid\n'), ((432, 455), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (453, 455), False, 'import datetime\n')] |
import logging
import uuid
from typing import Iterable
import numpy as np
import pyaudio
from cltl.backend.api.util import raw_frames_to_np
from cltl.backend.spi.audio import AudioSource
logger = logging.getLogger(__name__)
class PyAudioSource(AudioSource):
BUFFER = 8
def __init__(self, rate, channels, frame_size):
self.id = str(uuid.uuid4())[:6]
self._rate = rate
self._channels = channels
self._frame_size = frame_size
self._pyaudio = pyaudio.PyAudio()
self._active = False
self._start_time = None
self._time = None
@property
def audio(self) -> Iterable[np.array]:
return raw_frames_to_np(self, self.frame_size, self.channels, self.depth)
@property
def rate(self) -> int:
return self._rate
@property
def channels(self) -> int:
return self._channels
@property
def frame_size(self) -> int:
return self._frame_size
@property
def depth(self) -> int:
return 2
@property
def active(self):
return self._active
@property
def time(self):
return self._mic_time - self._start_time
@property
def _mic_time(self):
return self._time
@_mic_time.setter
def _mic_time(self, stream_time):
advanced = stream_time - self._time
if advanced > self._stream.get_input_latency():
logger.exception("Latency exceeded buffer (%.4fsec) - dropped frames: %.4fsec",
self._stream.get_input_latency(), advanced)
self._time = stream_time
def stop(self):
self._active = False
logger.debug("Stopped microphone (%s)", self.id)
def __enter__(self):
self._stream = self._pyaudio.open(self._rate, self._channels, pyaudio.paInt16, input=True,
frames_per_buffer=self.BUFFER * self._frame_size)
self._active = True
self._start_time = self._stream.get_time()
self._time = self._start_time
logger.debug("Opened microphone (%s) with rate: %s, channels: %s, frame_size: %s",
self.id, self._rate, self._channels, self._frame_size)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._active:
self._active = False
self._stream.close()
logger.debug("Closed microphone (%s)", self.id)
else:
logger.warning("Ignored close microphone (%s)", self.id)
def __iter__(self):
return self
def __next__(self):
if not self._active:
raise StopIteration()
data = self._stream.read(self._frame_size, exception_on_overflow=False)
self._mic_time = self._stream.get_time()
return data | [
"logging.getLogger",
"cltl.backend.api.util.raw_frames_to_np",
"pyaudio.PyAudio",
"uuid.uuid4"
] | [((199, 226), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (216, 226), False, 'import logging\n'), ((495, 512), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (510, 512), False, 'import pyaudio\n'), ((673, 739), 'cltl.backend.api.util.raw_frames_to_np', 'raw_frames_to_np', (['self', 'self.frame_size', 'self.channels', 'self.depth'], {}), '(self, self.frame_size, self.channels, self.depth)\n', (689, 739), False, 'from cltl.backend.api.util import raw_frames_to_np\n'), ((353, 365), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (363, 365), False, 'import uuid\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# Copyright 2017 ROBOTIS CO., LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# Author: <NAME> (Leon)
#
# ********* Sync Read and Sync Write Example *********
#
#
# Available Dynamixel model on this example : All models using Protocol 2.0
# This example is tested with two Dynamixel PRO 54-200, and an USB2DYNAMIXEL
# Be sure that Dynamixel PRO properties are already set as %% ID : 1 / Baudnum : 1 (Baudrate : 57600)
#
import os
if os.name == 'nt':
import msvcrt
def getch():
return msvcrt.getch().decode()
else:
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
def getch():
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
from dynamixel_sdk import * # Uses Dynamixel SDK library
# Control table address
ADDR_PRO_TORQUE_ENABLE = 64 # Control table address is different in Dynamixel model
ADDR_PRO_GOAL_POSITION = 116
ADDR_PRO_PRESENT_POSITION = 132
# Data Byte Length
LEN_PRO_GOAL_POSITION = 4
LEN_PRO_PRESENT_POSITION = 4
# Protocol version
PROTOCOL_VERSION = 2.0 # See which protocol version is used in the Dynamixel
# Default setting
DXL1_ID = 1 # Dynamixel#1 ID : 1
DXL2_ID = 2 # Dynamixel#1 ID : 2
BAUDRATE = 57600 # Dynamixel default baudrate : 57600
DEVICENAME = '/dev/ttyUSB0' # Check which port is being used on your controller
# ex) Windows: "COM1" Linux: "/dev/ttyUSB0" Mac: "/dev/tty.usbserial-*"
TORQUE_ENABLE = 1 # Value for enabling the torque
TORQUE_DISABLE = 0 # Value for disabling the torque
DXL_MINIMUM_POSITION_VALUE = 100 # Dynamixel will rotate between this value
DXL_MAXIMUM_POSITION_VALUE = 4000 # and this value (note that the Dynamixel would not move when the position value is out of movable range. Check e-manual about the range of the Dynamixel you use.)
DXL_MOVING_STATUS_THRESHOLD = 20 # Dynamixel moving status threshold
index = 0
dxl_goal_position = [DXL_MINIMUM_POSITION_VALUE, DXL_MAXIMUM_POSITION_VALUE] # Goal position
# Initialize PortHandler instance
# Set the port path
# Get methods and members of PortHandlerLinux or PortHandlerWindows
portHandler = PortHandler(DEVICENAME)
# Initialize PacketHandler instance
# Set the protocol version
# Get methods and members of Protocol1PacketHandler or Protocol2PacketHandler
packetHandler = PacketHandler(PROTOCOL_VERSION)
# Initialize GroupSyncWrite instance
groupSyncWrite = GroupSyncWrite(portHandler, packetHandler, ADDR_PRO_GOAL_POSITION, LEN_PRO_GOAL_POSITION)
# Initialize GroupSyncRead instace for Present Position
groupSyncRead = GroupSyncRead(portHandler, packetHandler, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
# Open port
if portHandler.openPort():
print("Succeeded to open the port")
else:
print("Failed to open the port")
print("Press any key to terminate...")
getch()
quit()
# Set port baudrate
if portHandler.setBaudRate(BAUDRATE):
print("Succeeded to change the baudrate")
else:
print("Failed to change the baudrate")
print("Press any key to terminate...")
getch()
quit()
# Enable Dynamixel#1 Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL1_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_ENABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
else:
print("Dynamixel#%d has been successfully connected" % DXL1_ID)
# Enable Dynamixel#2 Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL2_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_ENABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
else:
print("Dynamixel#%d has been successfully connected" % DXL2_ID)
# Add parameter storage for Dynamixel#1 present position value
dxl_addparam_result = groupSyncRead.addParam(DXL1_ID)
if dxl_addparam_result != True:
print("[ID:%03d] groupSyncRead addparam failed" % DXL1_ID)
quit()
# Add parameter storage for Dynamixel#2 present position value
dxl_addparam_result = groupSyncRead.addParam(DXL2_ID)
if dxl_addparam_result != True:
print("[ID:%03d] groupSyncRead addparam failed" % DXL2_ID)
quit()
while 1:
print("Press any key to continue! (or press ESC to quit!)")
if getch() == chr(0x1b):
break
# Allocate goal position value into byte array
param_goal_position = [DXL_LOBYTE(DXL_LOWORD(dxl_goal_position[index])), DXL_HIBYTE(DXL_LOWORD(dxl_goal_position[index])), DXL_LOBYTE(DXL_HIWORD(dxl_goal_position[index])), DXL_HIBYTE(DXL_HIWORD(dxl_goal_position[index]))]
# Add Dynamixel#1 goal position value to the Syncwrite parameter storage
dxl_addparam_result = groupSyncWrite.addParam(DXL1_ID, param_goal_position)
if dxl_addparam_result != True:
print("[ID:%03d] groupSyncWrite addparam failed" % DXL1_ID)
quit()
# Add Dynamixel#2 goal position value to the Syncwrite parameter storage
dxl_addparam_result = groupSyncWrite.addParam(DXL2_ID, param_goal_position)
if dxl_addparam_result != True:
print("[ID:%03d] groupSyncWrite addparam failed" % DXL2_ID)
quit()
# Syncwrite goal position
dxl_comm_result = groupSyncWrite.txPacket()
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
# Clear syncwrite parameter storage
groupSyncWrite.clearParam()
while 1:
# Syncread present position
dxl_comm_result = groupSyncRead.txRxPacket()
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
# Check if groupsyncread data of Dynamixel#1 is available
dxl_getdata_result = groupSyncRead.isAvailable(DXL1_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
if dxl_getdata_result != True:
print("[ID:%03d] groupSyncRead getdata failed" % DXL1_ID)
quit()
# Check if groupsyncread data of Dynamixel#2 is available
dxl_getdata_result = groupSyncRead.isAvailable(DXL2_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
if dxl_getdata_result != True:
print("[ID:%03d] groupSyncRead getdata failed" % DXL2_ID)
quit()
# Get Dynamixel#1 present position value
dxl1_present_position = groupSyncRead.getData(DXL1_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
# Get Dynamixel#2 present position value
dxl2_present_position = groupSyncRead.getData(DXL2_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
print("[ID:%03d] GoalPos:%03d PresPos:%03d\t[ID:%03d] GoalPos:%03d PresPos:%03d" % (DXL1_ID, dxl_goal_position[index], dxl1_present_position, DXL2_ID, dxl_goal_position[index], dxl2_present_position))
if not ((abs(dxl_goal_position[index] - dxl1_present_position) > DXL_MOVING_STATUS_THRESHOLD) and (abs(dxl_goal_position[index] - dxl2_present_position) > DXL_MOVING_STATUS_THRESHOLD)):
break
# Change goal position
if index == 0:
index = 1
else:
index = 0
# Clear syncread parameter storage
groupSyncRead.clearParam()
# Disable Dynamixel#1 Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL1_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_DISABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
# Disable Dynamixel#2 Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL2_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_DISABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
# Close port
portHandler.closePort()
| [
"sys.stdin.fileno",
"termios.tcsetattr",
"msvcrt.getch",
"termios.tcgetattr",
"sys.stdin.read"
] | [((1293, 1311), 'sys.stdin.fileno', 'sys.stdin.fileno', ([], {}), '()\n', (1309, 1311), False, 'import sys, tty, termios\n'), ((1331, 1352), 'termios.tcgetattr', 'termios.tcgetattr', (['fd'], {}), '(fd)\n', (1348, 1352), False, 'import sys, tty, termios\n'), ((1443, 1460), 'sys.stdin.read', 'sys.stdin.read', (['(1)'], {}), '(1)\n', (1457, 1460), False, 'import sys, tty, termios\n'), ((1490, 1544), 'termios.tcsetattr', 'termios.tcsetattr', (['fd', 'termios.TCSADRAIN', 'old_settings'], {}), '(fd, termios.TCSADRAIN, old_settings)\n', (1507, 1544), False, 'import sys, tty, termios\n'), ((1225, 1239), 'msvcrt.getch', 'msvcrt.getch', ([], {}), '()\n', (1237, 1239), False, 'import msvcrt\n'), ((1406, 1424), 'sys.stdin.fileno', 'sys.stdin.fileno', ([], {}), '()\n', (1422, 1424), False, 'import sys, tty, termios\n')] |
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
def plot_1d(X_train, Y_train, X_test, Y_test, mean=None, std=None, str_figure=None, show_fig=True):
plt.rc('text', usetex=True)
fig = plt.figure(figsize=(8, 6))
ax = fig.gca()
ax.plot(X_test, Y_test, linewidth=4)
if mean is not None:
line, = ax.plot(X_test, mean, linewidth=4)
if mean is not None and std is not None:
ax.fill_between(X_test.flatten(), mean - 1.96 * std, mean + 1.96 * std, alpha=0.25, color=line.get_color())
ax.plot(X_train, Y_train, 'x', linestyle='none', markersize=10, mew=4)
ax.set_xlabel('$x$', fontsize=32)
ax.set_ylabel('$y$', fontsize=32)
ax.tick_params(labelsize=24)
ax.set_xlim([np.min(X_test), np.max(X_test)])
ax.grid()
plt.tight_layout()
if str_figure is not None:
path_figures = '../figures'
if not os.path.exists(path_figures):
os.mkdir(path_figures)
plt.savefig(
os.path.join(path_figures, str_figure + '.pdf'),
format='pdf',
transparent=True
)
if show_fig:
plt.show()
plt.close('all')
def get_parser():
parser = argparse.ArgumentParser(description='')
parser.add_argument('-f', '--function', type=str)
args = parser.parse_args()
return parser, args
def compute_nll(preds_mu, preds_sigma, X_test, Y_test, X_train):
assert len(preds_mu.shape) == len(preds_sigma.shape) == len(X_test.shape) == len(Y_test.shape) == len(X_train.shape) == 1
assert preds_mu.shape[0] == preds_sigma.shape[0] == X_test.shape[0] == Y_test.shape[0]
nll = 0.0
for mu, sigma, x, y in zip(preds_mu, preds_sigma, X_test, Y_test):
if np.any(np.abs(X_train - x) < 0.025):
continue
log_pdf = norm.logpdf(y, loc=mu, scale=sigma)
nll -= log_pdf
nll /= preds_mu.shape[0]
return nll
def compute_kl(preds_mu, preds_sigma, mean_gp, std_gp):
assert len(preds_mu.shape) == len(preds_sigma.shape) == len(mean_gp.shape) == len(std_gp.shape) == 1
assert preds_mu.shape[0] == preds_sigma.shape[0] == mean_gp.shape[0] == std_gp.shape[0]
kl = 0.0
for mu, sigma, mu_gp, sigma_gp in zip(preds_mu, preds_sigma, mean_gp, std_gp):
cur_kl = np.log(sigma_gp / (sigma + 1e-7)) + (sigma**2 + (mu - mu_gp)**2) / (2 * sigma_gp**2) - 1 / 2
kl = cur_kl
kl /= preds_mu.shape[0]
return kl
if __name__ == '__main__':
pass
| [
"os.path.exists",
"numpy.abs",
"argparse.ArgumentParser",
"numpy.log",
"os.path.join",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"scipy.stats.norm.logpdf",
"matplotlib.pyplot.tight_layout",
"numpy.min",
"os.mkdir",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show"
... | [((212, 239), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (218, 239), True, 'import matplotlib.pyplot as plt\n'), ((251, 277), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (261, 277), True, 'import matplotlib.pyplot as plt\n'), ((833, 851), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (849, 851), True, 'import matplotlib.pyplot as plt\n'), ((1191, 1207), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1200, 1207), True, 'import matplotlib.pyplot as plt\n'), ((1240, 1279), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (1263, 1279), False, 'import argparse\n'), ((1175, 1185), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1183, 1185), True, 'import matplotlib.pyplot as plt\n'), ((1848, 1883), 'scipy.stats.norm.logpdf', 'norm.logpdf', (['y'], {'loc': 'mu', 'scale': 'sigma'}), '(y, loc=mu, scale=sigma)\n', (1859, 1883), False, 'from scipy.stats import norm\n'), ((781, 795), 'numpy.min', 'np.min', (['X_test'], {}), '(X_test)\n', (787, 795), True, 'import numpy as np\n'), ((797, 811), 'numpy.max', 'np.max', (['X_test'], {}), '(X_test)\n', (803, 811), True, 'import numpy as np\n'), ((936, 964), 'os.path.exists', 'os.path.exists', (['path_figures'], {}), '(path_figures)\n', (950, 964), False, 'import os\n'), ((978, 1000), 'os.mkdir', 'os.mkdir', (['path_figures'], {}), '(path_figures)\n', (986, 1000), False, 'import os\n'), ((1035, 1082), 'os.path.join', 'os.path.join', (['path_figures', "(str_figure + '.pdf')"], {}), "(path_figures, str_figure + '.pdf')\n", (1047, 1082), False, 'import os\n'), ((1779, 1798), 'numpy.abs', 'np.abs', (['(X_train - x)'], {}), '(X_train - x)\n', (1785, 1798), True, 'import numpy as np\n'), ((2322, 2356), 'numpy.log', 'np.log', (['(sigma_gp / (sigma + 1e-07))'], {}), '(sigma_gp / (sigma + 1e-07))\n', (2328, 2356), True, 'import numpy as np\n')] |
import random
import cocos
from cocos.tiles import TileSet, RectCell, RectMapLayer
from cocos.director import director
from cocos.layer.scrolling import ScrollingManager
import pyglet
from game import Game
from views import WorldMap, CharacterView2
class MainLayer(cocos.layer.Layer):
is_event_handler = True
def __init__(self):
super(MainLayer, self).__init__()
# World/map management
self.seed = random.Random()
self.game = Game(seed=self.seed, world_width=30, world_height=15)
# Children
scroller = ScrollingManager()
scroller.add(WorldMap(self.game.world))
for character in self.game.characters:
scroller.add(CharacterView2(character))
self.add(scroller)
self.schedule(self.update)
def update(self, dt):
self.game.update(dt)
def on_key_press(self, symbol, modifiers):
print("Pressed " + str(symbol))
if __name__ == '__main__':
director.init(width=800, height=600, resizable=False, autoscale=False)
director.set_show_FPS(True)
main_layer = MainLayer()
main_scene = cocos.scene.Scene(main_layer)
director.run(main_scene)
| [
"cocos.scene.Scene",
"random.Random",
"cocos.director.director.run",
"views.CharacterView2",
"cocos.director.director.init",
"views.WorldMap",
"game.Game",
"cocos.layer.scrolling.ScrollingManager",
"cocos.director.director.set_show_FPS"
] | [((977, 1047), 'cocos.director.director.init', 'director.init', ([], {'width': '(800)', 'height': '(600)', 'resizable': '(False)', 'autoscale': '(False)'}), '(width=800, height=600, resizable=False, autoscale=False)\n', (990, 1047), False, 'from cocos.director import director\n'), ((1052, 1079), 'cocos.director.director.set_show_FPS', 'director.set_show_FPS', (['(True)'], {}), '(True)\n', (1073, 1079), False, 'from cocos.director import director\n'), ((1131, 1160), 'cocos.scene.Scene', 'cocos.scene.Scene', (['main_layer'], {}), '(main_layer)\n', (1148, 1160), False, 'import cocos\n'), ((1166, 1190), 'cocos.director.director.run', 'director.run', (['main_scene'], {}), '(main_scene)\n', (1178, 1190), False, 'from cocos.director import director\n'), ((435, 450), 'random.Random', 'random.Random', ([], {}), '()\n', (448, 450), False, 'import random\n'), ((471, 524), 'game.Game', 'Game', ([], {'seed': 'self.seed', 'world_width': '(30)', 'world_height': '(15)'}), '(seed=self.seed, world_width=30, world_height=15)\n', (475, 524), False, 'from game import Game\n'), ((572, 590), 'cocos.layer.scrolling.ScrollingManager', 'ScrollingManager', ([], {}), '()\n', (588, 590), False, 'from cocos.layer.scrolling import ScrollingManager\n'), ((612, 637), 'views.WorldMap', 'WorldMap', (['self.game.world'], {}), '(self.game.world)\n', (620, 637), False, 'from views import WorldMap, CharacterView2\n'), ((711, 736), 'views.CharacterView2', 'CharacterView2', (['character'], {}), '(character)\n', (725, 736), False, 'from views import WorldMap, CharacterView2\n')] |
from flask import Blueprint
blueprint = Blueprint('board', __name__)
from rboard.board import routes
| [
"flask.Blueprint"
] | [((41, 69), 'flask.Blueprint', 'Blueprint', (['"""board"""', '__name__'], {}), "('board', __name__)\n", (50, 69), False, 'from flask import Blueprint\n')] |
from src.commons.big_query.copy_job_async.result_check.result_check_request import \
ResultCheckRequest
from src.commons.big_query.copy_job_async.task_creator import TaskCreator
class BigQueryJobReference(object):
def __init__(self, project_id, job_id, location):
self.project_id = project_id
self.job_id = job_id
self.location = location
def __str__(self):
return "BigQueryJobReference(projectId:{}, job_id:{}, location: {})" \
.format(self.project_id, self.job_id, self.location)
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return type(other) is BigQueryJobReference \
and self.project_id == other.project_id \
and self.job_id == other.job_id \
and self.location == other.location
def __ne__(self, other):
return not (self == other)
def create_post_copy_action(self, copy_job_request):
TaskCreator.create_copy_job_result_check(
ResultCheckRequest(
task_name_suffix=copy_job_request.task_name_suffix,
copy_job_type_id=copy_job_request.copy_job_type_id,
job_reference=self,
retry_count=copy_job_request.retry_count,
post_copy_action_request=copy_job_request.post_copy_action_request
)
)
def to_json(self):
return dict(project_id=self.project_id,
job_id=self.job_id,
location=self.location)
@classmethod
def from_json(cls, json):
return BigQueryJobReference(project_id=json["project_id"],
job_id=json["job_id"],
location=json["location"])
| [
"src.commons.big_query.copy_job_async.result_check.result_check_request.ResultCheckRequest"
] | [((1021, 1286), 'src.commons.big_query.copy_job_async.result_check.result_check_request.ResultCheckRequest', 'ResultCheckRequest', ([], {'task_name_suffix': 'copy_job_request.task_name_suffix', 'copy_job_type_id': 'copy_job_request.copy_job_type_id', 'job_reference': 'self', 'retry_count': 'copy_job_request.retry_count', 'post_copy_action_request': 'copy_job_request.post_copy_action_request'}), '(task_name_suffix=copy_job_request.task_name_suffix,\n copy_job_type_id=copy_job_request.copy_job_type_id, job_reference=self,\n retry_count=copy_job_request.retry_count, post_copy_action_request=\n copy_job_request.post_copy_action_request)\n', (1039, 1286), False, 'from src.commons.big_query.copy_job_async.result_check.result_check_request import ResultCheckRequest\n')] |
from django.contrib import admin, messages
from django.shortcuts import render
from django.utils.translation import gettext_lazy as _
from inline_actions.actions import DefaultActionsMixin, ViewAction
from inline_actions.admin import InlineActionsMixin, InlineActionsModelAdminMixin
from . import forms
from .models import Article, Author, AuthorProxy
class UnPublishActionsMixin(object):
def get_inline_actions(self, request, obj=None):
actions = super(UnPublishActionsMixin, self).get_inline_actions(request, obj)
if obj:
if obj.status == Article.DRAFT:
actions.append('publish')
elif obj.status == Article.PUBLISHED:
actions.append('unpublish')
return actions
def publish(self, request, obj, parent_obj=None):
obj.status = Article.PUBLISHED
obj.save()
messages.info(request, _("Article published."))
publish.short_description = _("Publish") # type: ignore
def unpublish(self, request, obj, parent_obj=None):
obj.status = Article.DRAFT
obj.save()
messages.info(request, _("Article unpublished."))
unpublish.short_description = _("Unpublish") # type: ignore
class TogglePublishActionsMixin(object):
def get_inline_actions(self, request, obj=None):
actions = super(TogglePublishActionsMixin, self).get_inline_actions(
request=request, obj=obj
)
actions.append('toggle_publish')
return actions
def toggle_publish(self, request, obj, parent_obj=None):
if obj.status == Article.DRAFT:
obj.status = Article.PUBLISHED
else:
obj.status = Article.DRAFT
obj.save()
status = 'unpublished' if obj.status == Article.DRAFT else 'published'
messages.info(request, _("Article {}.".format(status)))
def get_toggle_publish_label(self, obj):
label = 'publish' if obj.status == Article.DRAFT else 'unpublish'
return 'Toggle {}'.format(label)
def get_toggle_publish_css(self, obj):
return 'button object-tools' if obj.status == Article.DRAFT else 'default'
class ChangeTitleActionsMixin(object):
def get_inline_actions(self, request, obj=None):
actions = super(ChangeTitleActionsMixin, self).get_inline_actions(request, obj)
actions.append('change_title')
return actions
def change_title(self, request, obj, parent_obj=None):
# explictly check whether the submit button has been pressed
if '_save' in request.POST:
form = forms.ChangeTitleForm(request.POST, instance=obj)
form.save()
return None # return back to list view
elif '_back' in request.POST:
return None # return back to list view
else:
form = forms.ChangeTitleForm(instance=obj)
return render(request, 'change_title.html', context={'form': form})
class ArticleInline(
DefaultActionsMixin,
UnPublishActionsMixin,
TogglePublishActionsMixin,
InlineActionsMixin,
admin.TabularInline,
):
model = Article
fields = (
'title',
'status',
)
readonly_fields = (
'title',
'status',
)
def has_add_permission(self, request, obj=None):
return False
class ArticleNoopInline(InlineActionsMixin, admin.TabularInline):
model = Article
fields = (
'title',
'status',
)
readonly_fields = (
'title',
'status',
)
def get_inline_actions(self, request, obj=None):
actions = super(ArticleNoopInline, self).get_inline_actions(
request=request, obj=obj
)
actions.append('noop_action')
return actions
def noop_action(self, request, obj, parent_obj=None):
pass
@admin.register(AuthorProxy)
class AuthorMultipleInlinesAdmin(InlineActionsModelAdminMixin, admin.ModelAdmin):
inlines = [ArticleInline, ArticleNoopInline]
list_display = ('name',)
inline_actions = None
@admin.register(Author)
class AuthorAdmin(InlineActionsModelAdminMixin, admin.ModelAdmin):
inlines = [ArticleInline]
list_display = ('name',)
inline_actions = None
@admin.register(Article)
class ArticleAdmin(
UnPublishActionsMixin,
TogglePublishActionsMixin,
ChangeTitleActionsMixin,
ViewAction,
InlineActionsModelAdminMixin,
admin.ModelAdmin,
):
list_display = ('title', 'status', 'author')
| [
"django.shortcuts.render",
"django.contrib.admin.register",
"django.utils.translation.gettext_lazy"
] | [((3830, 3857), 'django.contrib.admin.register', 'admin.register', (['AuthorProxy'], {}), '(AuthorProxy)\n', (3844, 3857), False, 'from django.contrib import admin, messages\n'), ((4047, 4069), 'django.contrib.admin.register', 'admin.register', (['Author'], {}), '(Author)\n', (4061, 4069), False, 'from django.contrib import admin, messages\n'), ((4225, 4248), 'django.contrib.admin.register', 'admin.register', (['Article'], {}), '(Article)\n', (4239, 4248), False, 'from django.contrib import admin, messages\n'), ((953, 965), 'django.utils.translation.gettext_lazy', '_', (['"""Publish"""'], {}), "('Publish')\n", (954, 965), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1186, 1200), 'django.utils.translation.gettext_lazy', '_', (['"""Unpublish"""'], {}), "('Unpublish')\n", (1187, 1200), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2880, 2940), 'django.shortcuts.render', 'render', (['request', '"""change_title.html"""'], {'context': "{'form': form}"}), "(request, 'change_title.html', context={'form': form})\n", (2886, 2940), False, 'from django.shortcuts import render\n'), ((895, 918), 'django.utils.translation.gettext_lazy', '_', (['"""Article published."""'], {}), "('Article published.')\n", (896, 918), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1124, 1149), 'django.utils.translation.gettext_lazy', '_', (['"""Article unpublished."""'], {}), "('Article unpublished.')\n", (1125, 1149), True, 'from django.utils.translation import gettext_lazy as _\n')] |
"""Commands module common setup."""
from importlib import import_module
from typing import Sequence
def available_commands():
"""Index available commands."""
return [
{"name": "help", "summary": "Print available commands"},
{"name": "provision", "summary": "Provision an agent"},
{"name": "start", "summary": "Start a new agent process"},
]
def load_command(command: str):
"""Load the module corresponding with a named command."""
module = None
module_path = None
for cmd in available_commands():
if cmd["name"] == command:
module = cmd["name"]
module_path = cmd.get("module")
break
if module and not module_path:
module_path = f"{__package__}.{module}"
if module_path:
return import_module(module_path)
def run_command(command: str, argv: Sequence[str] = None):
"""Execute a named command with command line arguments."""
module = load_command(command) or load_command("help")
module.execute(argv)
| [
"importlib.import_module"
] | [((802, 828), 'importlib.import_module', 'import_module', (['module_path'], {}), '(module_path)\n', (815, 828), False, 'from importlib import import_module\n')] |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x02\x05\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x12\x74\x00\x00\x12\x74\x01\xde\x66\
\x1f\x78\x00\x00\x01\x9a\x49\x44\x41\x54\x58\x47\xc5\x94\x3b\x4e\
\x03\x41\x10\x44\x7d\x01\x22\x12\x02\x9c\x20\x0e\x40\xc2\x2d\xe0\
\x42\xdc\x84\x63\x10\x70\x25\x32\x62\x42\xa3\xb2\x54\xab\x47\x6f\
\xf5\x78\x96\x9f\x83\x27\xe1\xe9\xea\xee\xb7\xe3\xc5\xbb\xd7\xb7\
\xfd\xe1\x9c\x4c\x0b\xdc\x3f\xdd\xc5\x73\x32\x93\xa9\x4c\x09\x68\
\xb0\x49\x75\x31\x93\x49\xfc\x89\xc0\xe3\xf3\x65\xcc\x24\x4e\x0a\
\x6c\x19\xcc\xec\xcd\xcb\xc3\x42\xca\x9a\x4d\x02\xa9\x4e\x98\x95\
\xec\xc5\xc7\xd5\x91\x91\xc4\xbf\x08\x8c\x24\x86\x02\x75\x60\xca\
\x54\xd8\xf3\xab\x02\xa9\x9e\x60\xcf\xd9\x05\xfc\x35\x74\xcb\xdf\
\xaf\x6f\xd7\x02\x0a\x8b\x3a\xa8\xe6\x46\xb0\x77\xb4\x7c\x25\xa0\
\xb0\xaf\x8c\x43\x98\x99\xe1\x54\xaf\x97\xeb\xef\x45\x80\xcb\xab\
\x40\xf7\x14\x1d\xec\x4d\x75\x2f\x17\x51\x80\x03\x74\xfd\x3f\x11\
\x10\xac\xf1\xe9\xc5\x49\x01\x7d\xde\x2a\x20\x38\x43\xfd\xa2\x2e\
\x17\xab\x77\x80\x8d\x6e\x66\x66\x16\xce\xf0\x62\x51\xe7\x7d\x11\
\x10\x6c\xdc\xfa\xf6\x13\xce\x11\x5a\xee\x1b\xa6\xc4\x50\xa0\xd6\
\xcc\x4c\x46\x30\xe7\x1b\x18\x0a\xb0\x41\xb0\xd6\x65\xba\x9c\x60\
\x46\x8b\x2d\xc1\x4c\x2b\x90\xae\x9f\xf5\x4a\xcd\xa6\xbc\x9e\xbc\
\x4a\xb4\x02\x3c\xaf\xb5\x0e\xe6\xb5\x44\x0f\x91\xea\x94\x58\x04\
\x18\x64\x38\xd5\x7c\x3b\x75\x81\xe1\x02\x9e\x73\xa6\x33\x51\x80\
\xd7\xcf\x73\xe1\x73\xd3\x49\xb8\x9e\xce\x4c\x2b\x90\xce\x78\x5e\
\x19\x49\xd4\x5a\xed\x3d\x0a\x30\xe0\xa7\xe7\x99\x60\x93\xd0\x0b\
\x45\xd4\xd7\x89\x90\x3a\x67\x25\x50\x3f\xfb\x8c\x68\xa1\x7f\x54\
\xcc\xac\x44\x9d\xb5\x12\xa8\xd4\x86\xb4\xdc\xa8\xa6\xcc\x16\x89\
\x5d\x0a\x18\x06\xcd\x8c\x80\x18\xdd\x06\xe7\xb5\x02\x0c\x91\x59\
\x01\xd1\x49\x30\x13\xbf\x02\x06\x12\x49\xa2\x2e\x37\x49\x82\xf5\
\xe5\xdf\x70\x2b\x5a\x48\x52\x66\x86\x6f\x0b\xfc\x0e\xfb\xc3\x27\
\x2f\x90\x9e\xc6\xb7\x8c\xf7\x21\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x13\
\x0e\xb7\x46\xa2\
\x00\x69\
\x00\x6e\x00\x76\x00\x65\x00\x6e\x00\x74\x00\x6f\x00\x72\x00\x79\x00\x5f\x00\x76\x00\x61\x00\x6c\x00\x69\x00\x64\x00\x61\x00\x74\
\x00\x6f\x00\x72\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x40\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x40\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x7e\xb7\x66\x8e\xd2\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| [
"PyQt5.QtCore.qVersion",
"PyQt5.QtCore.qUnregisterResourceData",
"PyQt5.QtCore.qRegisterResourceData"
] | [((3776, 3877), 'PyQt5.QtCore.qRegisterResourceData', 'QtCore.qRegisterResourceData', (['rcc_version', 'qt_resource_struct', 'qt_resource_name', 'qt_resource_data'], {}), '(rcc_version, qt_resource_struct,\n qt_resource_name, qt_resource_data)\n', (3804, 3877), False, 'from PyQt5 import QtCore\n'), ((3907, 4010), 'PyQt5.QtCore.qUnregisterResourceData', 'QtCore.qUnregisterResourceData', (['rcc_version', 'qt_resource_struct', 'qt_resource_name', 'qt_resource_data'], {}), '(rcc_version, qt_resource_struct,\n qt_resource_name, qt_resource_data)\n', (3937, 4010), False, 'from PyQt5 import QtCore\n'), ((3543, 3560), 'PyQt5.QtCore.qVersion', 'QtCore.qVersion', ([], {}), '()\n', (3558, 3560), False, 'from PyQt5 import QtCore\n')] |
#!/usr/bin/env python
"""
Given one or more DCC experiment IDs, looks at all read2s that were submitted and updates each r2 file
object such that it's paired_with property points to the correct r1. This works by looking at the aliases
in the r2 file object to see if there is one with _R2_001 in it. If so, it sets paired_with to be
the same alias, but with that segment replace with _R1_001. Thus, this script is nice if submissions
went wrong with regard to the file pairings, and this is one way to fix that.
"""
import argparse
import encode_utils.connection as euc
import re
def get_parser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-i", "--infile", required=True, help="""
The input file with a DCC experiment on each line.""")
return parser
def main():
conn = euc.Connection("prod")
reg = re.compile("_R2_001")
parser = get_parser()
args = parser.parse_args()
ids = []
fh = open(args.infile)
for line in fh:
line = line.strip()
if not line or line.startswith("#"):
continue
ids.append(line)
for i in ids:
h = conn.get_fastqfile_replicate_hash(exp_id=i)
for bio_rep in h:
for tech_rep in h[bio_rep]:
read_files = h[bio_rep][tech_rep].get(2)
# read_files is a list of file objects
if not read_files:
continue
for r in read_files:
aliases = r["aliases"]
for a in aliases:
match = reg.search(a)
if match:
paired_with_name = a.replace(reg.pattern, "_R1_001")
payload = {conn.ENCID_KEY: a}
payload["paired_with"] = paired_with_name
try:
conn.patch(payload=payload)
except Exception:
break
break
if __name__ == "__main__":
main()
| [
"encode_utils.connection.Connection",
"argparse.ArgumentParser",
"re.compile"
] | [((615, 659), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (638, 659), False, 'import argparse\n'), ((829, 851), 'encode_utils.connection.Connection', 'euc.Connection', (['"""prod"""'], {}), "('prod')\n", (843, 851), True, 'import encode_utils.connection as euc\n'), ((862, 883), 're.compile', 're.compile', (['"""_R2_001"""'], {}), "('_R2_001')\n", (872, 883), False, 'import re\n')] |
import copy
import pytest
from river import utils
from river import ensemble
estimator = ensemble.SRPClassifier(
n_models=3, # Smaller ensemble than the default to avoid bottlenecks
seed=42)
@pytest.mark.parametrize('estimator, check', [
pytest.param(
estimator,
check,
id=f'{estimator}:{check.__name__}'
)
for check in utils.estimator_checks.yield_checks(estimator)
# Skipping this test since shuffling features is expected to impact SRP
if check.__name__ not in {'check_shuffle_features_no_impact'}
])
def test_check_estimator(estimator, check):
check(copy.deepcopy(estimator))
| [
"river.utils.estimator_checks.yield_checks",
"river.ensemble.SRPClassifier",
"pytest.param",
"copy.deepcopy"
] | [((93, 136), 'river.ensemble.SRPClassifier', 'ensemble.SRPClassifier', ([], {'n_models': '(3)', 'seed': '(42)'}), '(n_models=3, seed=42)\n', (115, 136), False, 'from river import ensemble\n'), ((618, 642), 'copy.deepcopy', 'copy.deepcopy', (['estimator'], {}), '(estimator)\n', (631, 642), False, 'import copy\n'), ((258, 324), 'pytest.param', 'pytest.param', (['estimator', 'check'], {'id': 'f"""{estimator}:{check.__name__}"""'}), "(estimator, check, id=f'{estimator}:{check.__name__}')\n", (270, 324), False, 'import pytest\n'), ((372, 418), 'river.utils.estimator_checks.yield_checks', 'utils.estimator_checks.yield_checks', (['estimator'], {}), '(estimator)\n', (407, 418), False, 'from river import utils\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"The core logic of how plugins integrate with `popen_nspawn`"
import functools
import subprocess
from contextlib import contextmanager
from typing import Callable, ContextManager, Iterable, Tuple, Union
from antlir.subvol_utils import Subvol
from .args import PopenArgs, _NspawnOpts
from .cmd import _nspawn_setup, _NspawnSetup, _nspawn_subvol_setup
from .plugins import NspawnPlugin
_PopenResult = Tuple[subprocess.Popen, subprocess.Popen]
_SetupSubvolCtxMgr = Callable[[_NspawnOpts], ContextManager[Subvol]]
_NspawnSetupCtxMgr = Callable[
[_NspawnOpts, PopenArgs], ContextManager[_NspawnSetup]
]
_PostSetupPopenCtxMgr = Callable[[_NspawnSetup], ContextManager[_PopenResult]]
@contextmanager
def _setup_subvol(opts: _NspawnOpts) -> Iterable[Subvol]:
with _nspawn_subvol_setup(opts) as subvol:
yield subvol
@contextmanager
def _setup(
subvol: Subvol, opts: _NspawnOpts, popen_args: PopenArgs
) -> Iterable[_NspawnSetup]:
with _nspawn_setup(subvol, opts, popen_args) as setup:
yield setup
@contextmanager
def _popen_plugin_driver(
opts: _NspawnOpts,
popen_args: PopenArgs,
post_setup_popen: _PostSetupPopenCtxMgr,
plugins: Iterable[NspawnPlugin],
) -> _PopenResult:
# Apply the plugins
setup = _setup
setup_subvol = _setup_subvol
for p in plugins:
if p.wrap_setup_subvol is not None:
setup_subvol = functools.partial(p.wrap_setup_subvol, setup_subvol)
if p.wrap_setup is not None:
setup = functools.partial(p.wrap_setup, setup)
if p.wrap_post_setup_popen is not None:
post_setup_popen = functools.partial(
p.wrap_post_setup_popen, post_setup_popen
)
with setup_subvol(opts) as subvol, setup(
subvol, opts, popen_args
) as setup, post_setup_popen(setup) as popen_res:
yield popen_res
| [
"functools.partial"
] | [((1594, 1646), 'functools.partial', 'functools.partial', (['p.wrap_setup_subvol', 'setup_subvol'], {}), '(p.wrap_setup_subvol, setup_subvol)\n', (1611, 1646), False, 'import functools\n'), ((1704, 1742), 'functools.partial', 'functools.partial', (['p.wrap_setup', 'setup'], {}), '(p.wrap_setup, setup)\n', (1721, 1742), False, 'import functools\n'), ((1822, 1882), 'functools.partial', 'functools.partial', (['p.wrap_post_setup_popen', 'post_setup_popen'], {}), '(p.wrap_post_setup_popen, post_setup_popen)\n', (1839, 1882), False, 'import functools\n')] |
# -*- coding: utf-8 -*-
import time
import pandas as pd
# self-made
import manage_mysql
def horseDB(table,search):
con = manage_mysql.connect()
c = con.cursor()
column=[]
value=[]
for i in range(len(search)):
if i%2 == 0:
column.append(search[i])
else:
value.append(search[i])
sql='SELECT * FROM '+table+' where '
for i in range(len(column)):
if i != 0:
sql+=' and '
sql+=column[i]+' = "'+str(value[i])+'"'
result = pd.read_sql(sql,con)
con.close()
return result
def fukusho(group,search,limit):
sql='SELECT '+group+',count(*),sum(fukusho),sum(fukusho)/count(*) From mergedresult '
if search['column'] == '':
sql+='where '
if limit != 0:
ut=time.time()
epoch=int(ut)-60*60*24*limit
else:
epoch=0
sql+='epoch >= '+str(epoch)+' '
elif search['column'] == 'sex':
sql+='where '
sql+=search['column']+' = "'+search['value']+'" '
elif search['column'] == 'age':
sql+='where '
if search['value'] == 7:
sql+='year - birth >= '+str(search['value'])+' '
else:
sql+='year - birth = '+str(search['value'])+' '
elif search['column'] == 'road':
sql+='where '
value=search['value'].split("_")
roadbed=value[1]
roadCondition=value[2]
if roadbed == 'turf':
roadbed = '0'
elif roadbed == 'dirt':
roadbed = '1'
sql+='roadbed = "'+roadbed+'" and '
if roadCondition == 'good':
sql+='(roadCondition = "0") '
elif roadCondition == 'bad':
sql+='(roadCondition = "1" or roadCondition = "2" or roadCondition = "3") '
elif search['column'] == 'distance_category':
sql+='where distance_category = "'
value=search['value'].split("_")
category=value[1]
if category == 'sprint':
category = '0'
elif category == 'mile':
category = '1'
elif category == 'intermediate':
category = '2'
elif category == 'long':
category = '3'
elif category == 'extended':
category = '4'
sql+=category+'" '
elif search['column'] == 'win_class':
sql+='where '
value=search['value'].split("_")
grade=value[1]
sql+=search['column']+' = "'+str(grade)+'" '
elif search['column'] == 'track':
sql+='where '
track=search['value']
track=track.split("_")
course=track[1]
roadbed=track[2]
if roadbed == 'turf':
roadbed = '0'
elif roadbed == 'dirt':
roadbed = '1'
sql+='course = "'+course+'" and roadbed = "'+roadbed+'" '
if len(track) == 4: # (04 or 08) and turf
inout=track[3]
if course == '04':
if inout == 'in':
sql+=' and distance like "%in" '
elif inout == 'out':
sql+=' and distance like "%out" '
elif course == '08':
if inout == 'in':
sql+=' and distance like "%in" '
elif inout == 'out':
sql+=' and distance like "%out" '
elif search['column'] == 'rotation_epoch':
sql+='where '
value=search['value'].split("_")
rotationEpoch=value[2]
if rotationEpoch == 'short': # threshold: 6weeks
sql+=search['column']+' <= 60*60*24*7*6 and '+search['column']+' != 0 '
elif rotationEpoch == 'long':
sql+=search['column']+' > 60*60*24*7*6 '
elif search['column'] == 'rotation_roadbed':
sql+='where '
value=search['value'].split("_")
rotationRoadbed=value[2]
if rotationRoadbed == 'toTurf':
sql+=search['column']+' = 1 and roadbed = "0" '
elif rotationRoadbed == 'toDirt':
sql+=search['column']+' = 1 and roadbed = "1" '
elif search['column'] == 'rotation_distance':
sql+='where '
value=search['value'].split("_")
rotationDistance=value[2]
if rotationDistance == 'shortening':
sql+='distance/(distance-'+search['column']+') < 0.9 '
elif rotationDistance == 'extension':
sql+='distance/(distance-'+search['column']+') > 1.1 '
sql+='GROUP BY '+group
print(sql)
con = manage_mysql.connect()
result = pd.read_sql(sql,con)
con.commit()
con.close()
return result
| [
"pandas.read_sql",
"manage_mysql.connect",
"time.time"
] | [((129, 151), 'manage_mysql.connect', 'manage_mysql.connect', ([], {}), '()\n', (149, 151), False, 'import manage_mysql\n'), ((524, 545), 'pandas.read_sql', 'pd.read_sql', (['sql', 'con'], {}), '(sql, con)\n', (535, 545), True, 'import pandas as pd\n'), ((4468, 4490), 'manage_mysql.connect', 'manage_mysql.connect', ([], {}), '()\n', (4488, 4490), False, 'import manage_mysql\n'), ((4504, 4525), 'pandas.read_sql', 'pd.read_sql', (['sql', 'con'], {}), '(sql, con)\n', (4515, 4525), True, 'import pandas as pd\n'), ((799, 810), 'time.time', 'time.time', ([], {}), '()\n', (808, 810), False, 'import time\n')] |
"""
Code for working with data.
In-memory format (as a list):
- board: Tensor (8, 8, 2) [bool; one-hot]
- move: Tensor (64,) [bool; one-hot]
- value: Tensor () [float32]
On-disk format (to save space and quicken loading):
- board: int64
- move: int64
- value: float32
"""
from typing import Dict, Tuple
import tensorflow as tf # type: ignore
from board import BOARD_SHAPE, BOARD_SQUARES, Board, Loc
EXAMPLE_SPEC = {
"board": tf.io.FixedLenFeature([2], tf.int64),
"move": tf.io.FixedLenFeature([], tf.int64),
"value": tf.io.FixedLenFeature([], tf.float32),
}
# Hack to allow storing bitboards efficiently as tf.Int64.
# Necessary because boards are all valid uint64 but not necessarily valid int64.
# Taken from: https://stackoverflow.com/questions/20766813/how-to-convert-signed-to-
# unsigned-integer-in-python
def _signed_representation(unsigned: int) -> int:
"""Convert an "unsigned" int to its equivalent C "signed" representation."""
return (unsigned & ((1 << 63) - 1)) - (unsigned & (1 << 63))
def _unsigned_representation(signed: int) -> int:
"""Convert a "signed" int to its equivalent C "unsigned" representation."""
return signed & 0xFFFFFFFFFFFFFFFF
# See: https://stackoverflow.com/questions/48333210/tensorflow-how-to-convert-an-
# integer-tensor-to-the-corresponding-binary-tensor
def decode_bitboard(encoded: tf.Tensor) -> tf.Tensor:
"""
Convert from uint64 board representation to a tf.Tensor board.
"""
flat = tf.math.mod(
tf.bitwise.right_shift(encoded, tf.range(BOARD_SQUARES, dtype=tf.int64)), 2
)
board = tf.reshape(flat, BOARD_SHAPE)
# Hack to allow using rot90 on a 2D tensor
return tf.image.rot90(tf.expand_dims(board, axis=-1), k=2)[:, :, 0]
def serialize_example(board: Board, move: Loc, value: float) -> str:
"""
Serialize a single training example into a string.
"""
black = _signed_representation(int(board.black))
white = _signed_representation(int(board.white))
features = {
"board": tf.train.Feature(int64_list=tf.train.Int64List(value=[black, white])),
"move": tf.train.Feature(int64_list=tf.train.Int64List(value=[move.as_int])),
"value": tf.train.Feature(float_list=tf.train.FloatList(value=[value])),
}
ex = tf.train.Example(features=tf.train.Features(feature=features))
return ex.SerializeToString()
def preprocess_example(
serialized: str
) -> Tuple[Dict[str, tf.Tensor], Dict[str, tf.Tensor]]:
"""
Turn a serialized example into the training-ready format.
"""
example = tf.io.parse_single_example(serialized, EXAMPLE_SPEC)
bitboards = example["board"]
black_bb = bitboards[0]
white_bb = bitboards[1]
black = decode_bitboard(black_bb)
white = decode_bitboard(white_bb)
board = tf.stack([black, white], axis=-1)
move = tf.one_hot(example["move"], BOARD_SQUARES)
# TODO: better solution to multi-input Keras model training
return (
{"board": board},
{"policy_softmax": move, "tf_op_layer_Tanh": example["value"]},
)
| [
"tensorflow.one_hot",
"tensorflow.io.parse_single_example",
"tensorflow.train.Int64List",
"tensorflow.range",
"tensorflow.train.Features",
"tensorflow.io.FixedLenFeature",
"tensorflow.train.FloatList",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.stack"
] | [((441, 477), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[2]', 'tf.int64'], {}), '([2], tf.int64)\n', (462, 477), True, 'import tensorflow as tf\n'), ((491, 526), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (512, 526), True, 'import tensorflow as tf\n'), ((541, 578), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.float32'], {}), '([], tf.float32)\n', (562, 578), True, 'import tensorflow as tf\n'), ((1621, 1650), 'tensorflow.reshape', 'tf.reshape', (['flat', 'BOARD_SHAPE'], {}), '(flat, BOARD_SHAPE)\n', (1631, 1650), True, 'import tensorflow as tf\n'), ((2598, 2650), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['serialized', 'EXAMPLE_SPEC'], {}), '(serialized, EXAMPLE_SPEC)\n', (2624, 2650), True, 'import tensorflow as tf\n'), ((2828, 2861), 'tensorflow.stack', 'tf.stack', (['[black, white]'], {'axis': '(-1)'}), '([black, white], axis=-1)\n', (2836, 2861), True, 'import tensorflow as tf\n'), ((2873, 2915), 'tensorflow.one_hot', 'tf.one_hot', (["example['move']", 'BOARD_SQUARES'], {}), "(example['move'], BOARD_SQUARES)\n", (2883, 2915), True, 'import tensorflow as tf\n'), ((1559, 1598), 'tensorflow.range', 'tf.range', (['BOARD_SQUARES'], {'dtype': 'tf.int64'}), '(BOARD_SQUARES, dtype=tf.int64)\n', (1567, 1598), True, 'import tensorflow as tf\n'), ((1725, 1755), 'tensorflow.expand_dims', 'tf.expand_dims', (['board'], {'axis': '(-1)'}), '(board, axis=-1)\n', (1739, 1755), True, 'import tensorflow as tf\n'), ((2332, 2367), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'features'}), '(feature=features)\n', (2349, 2367), True, 'import tensorflow as tf\n'), ((2081, 2121), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[black, white]'}), '(value=[black, white])\n', (2099, 2121), True, 'import tensorflow as tf\n'), ((2168, 2207), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[move.as_int]'}), '(value=[move.as_int])\n', (2186, 2207), True, 'import tensorflow as tf\n'), ((2255, 2288), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': '[value]'}), '(value=[value])\n', (2273, 2288), True, 'import tensorflow as tf\n')] |
from typing import List
import requests
from pathlib import Path
from datetime import date, datetime
from bs4 import BeautifulSoup
from helper.classes import Channel, Program
from helper.utils import get_channel_by_name, get_epg_datetime
TIMEZONE_OFFSET = "+0800"
PROGRAM_URL = "https://epg.beinsports.com/utctime_id.php?cdate={date}&offset=+8&mins=00&category=sports&id=123"
def get_all_channels():
return [Channel(
"channels_1",
"beInSPORTS1.Id",
"beIN SPORTS 1",
"",
True),
Channel(
"channels_2",
"beInSPORTS2.Id",
"beIN SPORTS 2",
"",
True)]
def get_programs_by_channel(channel_name: str, *args) -> List[Program]:
# TODO: Accept days as input and increment the date_input in an outer for
# loop
date_input = date.today()
datetime_today = datetime.now().replace(
hour=0, minute=0, second=0, microsecond=0)
url = PROGRAM_URL.format(
date=date_input)
channel = get_channel_by_name(channel_name, Path(__file__).stem)
try:
r = requests.get(url)
except requests.exceptions.RequestException as e:
raise SystemExit(e)
if r.status_code != 200:
raise Exception(r.raise_for_status())
soup = BeautifulSoup(r.text, features="html.parser")
divs = soup.find_all("div", {"id": channel.id})
programs = []
for div in divs:
line = div.find_all("li", {"parent": "slider_1"})
for value in line:
time_period = str(value.find("p", {"class": "time"}).string)
time_start, time_end = time_period.split("-")
start_hour, start_minute = time_start.split(":")
start_time = datetime_today.replace(
hour=int(start_hour), minute=int(start_minute))
end_hour, end_minute = time_end.split(":")
end_time = datetime_today.replace(
hour=int(end_hour), minute=int(end_minute))
obj = Program(
channel_name=channel.tvg_id,
title=value.find("p", {"class": "title"}).string,
description=value.find("p", {"class": "format"}).string,
start=get_epg_datetime(start_time, TIMEZONE_OFFSET),
stop=get_epg_datetime(end_time, TIMEZONE_OFFSET)
)
programs.append(obj)
return programs
| [
"helper.classes.Channel",
"pathlib.Path",
"helper.utils.get_epg_datetime",
"requests.get",
"bs4.BeautifulSoup",
"datetime.datetime.now",
"datetime.date.today"
] | [((833, 845), 'datetime.date.today', 'date.today', ([], {}), '()\n', (843, 845), False, 'from datetime import date, datetime\n'), ((1278, 1323), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text'], {'features': '"""html.parser"""'}), "(r.text, features='html.parser')\n", (1291, 1323), False, 'from bs4 import BeautifulSoup\n'), ((415, 481), 'helper.classes.Channel', 'Channel', (['"""channels_1"""', '"""beInSPORTS1.Id"""', '"""beIN SPORTS 1"""', '""""""', '(True)'], {}), "('channels_1', 'beInSPORTS1.Id', 'beIN SPORTS 1', '', True)\n", (422, 481), False, 'from helper.classes import Channel, Program\n'), ((532, 598), 'helper.classes.Channel', 'Channel', (['"""channels_2"""', '"""beInSPORTS2.Id"""', '"""beIN SPORTS 2"""', '""""""', '(True)'], {}), "('channels_2', 'beInSPORTS2.Id', 'beIN SPORTS 2', '', True)\n", (539, 598), False, 'from helper.classes import Channel, Program\n'), ((1090, 1107), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1102, 1107), False, 'import requests\n'), ((867, 881), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (879, 881), False, 'from datetime import date, datetime\n'), ((1047, 1061), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1051, 1061), False, 'from pathlib import Path\n'), ((2204, 2249), 'helper.utils.get_epg_datetime', 'get_epg_datetime', (['start_time', 'TIMEZONE_OFFSET'], {}), '(start_time, TIMEZONE_OFFSET)\n', (2220, 2249), False, 'from helper.utils import get_channel_by_name, get_epg_datetime\n'), ((2272, 2315), 'helper.utils.get_epg_datetime', 'get_epg_datetime', (['end_time', 'TIMEZONE_OFFSET'], {}), '(end_time, TIMEZONE_OFFSET)\n', (2288, 2315), False, 'from helper.utils import get_channel_by_name, get_epg_datetime\n')] |
#! /usr/bin/env python3
"""Converts cpplint output to JUnit XML format."""
import argparse
import collections
import os
import re
import sys
from typing import Dict, List
from xml.etree import ElementTree
from exitstatus import ExitStatus
class CpplintError(object):
def __init__(self, file: str, line: int, message: str) -> None:
"""Constructor.
Args:
file: File error originated on.
line: Line error originated on.
message: Error message.
"""
self.file = file
self.line = line
self.message = message
def parse_arguments() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Converts cpplint output to JUnit XML format.')
parser.add_argument('input_file', type=str, help='cpplint stdout text file.')
parser.add_argument('output_file', type=str, help='JUnit XML output file.')
return parser.parse_args()
def parse_cpplint(file_name: str) -> Dict[str, List[CpplintError]]:
"""Parses a cpplint output file.
Args:
file_name: cpplint output file.
Returns:
Parsed errors grouped by file name.
Raises:
IOError: File does not exist (More specifically FileNotFoundError on Python 3).
"""
with open(file_name, 'rt') as file:
lines = file.readlines()
errors = collections.defaultdict(list)
for line in lines:
line = line.rstrip()
match = re.search(r'(\S+):(\d+):\s+(.+)', line)
if match is not None:
error = CpplintError(file=match.group(1),
line=int(match.group(2)),
message=match.group(3))
errors[error.file].append(error)
return errors
def generate_test_suite(errors: Dict[str, List[CpplintError]]) -> ElementTree.ElementTree:
"""Creates a JUnit XML tree from parsed cpplint errors.
Args:
errors: Parsed cpplint errors.
Returns:
XML test suite.
"""
test_suite = ElementTree.Element('testsuite')
test_suite.attrib['errors'] = str(len(errors))
test_suite.attrib['failures'] = str(0)
test_suite.attrib['name'] = 'cpplint errors'
test_suite.attrib['tests'] = str(len(errors))
test_suite.attrib['time'] = str(1)
for file_name, errors in errors.items():
test_case = ElementTree.SubElement(test_suite,
'testcase',
name=os.path.relpath(file_name))
for error in errors:
ElementTree.SubElement(test_case,
'error',
file=os.path.relpath(error.file),
line=str(error.line),
message='{}: {}'.format(error.line, error.message))
return ElementTree.ElementTree(test_suite)
def main() -> ExitStatus: # pragma: no cover
"""Main function.
Returns:
Exit code.
"""
args = parse_arguments()
try:
errors = parse_cpplint(args.input_file)
except IOError as e:
print(str(e))
return ExitStatus.failure
if len(errors) > 0:
tree = generate_test_suite(errors)
tree.write(args.output_file, encoding='utf-8', xml_declaration=True)
return ExitStatus.success
if __name__ == '__main__': # pragma: no cover
sys.exit(main())
| [
"argparse.ArgumentParser",
"xml.etree.ElementTree.Element",
"xml.etree.ElementTree.ElementTree",
"collections.defaultdict",
"os.path.relpath",
"re.search"
] | [((656, 744), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Converts cpplint output to JUnit XML format."""'}), "(description=\n 'Converts cpplint output to JUnit XML format.')\n", (679, 744), False, 'import argparse\n'), ((2053, 2085), 'xml.etree.ElementTree.Element', 'ElementTree.Element', (['"""testsuite"""'], {}), "('testsuite')\n", (2072, 2085), False, 'from xml.etree import ElementTree\n'), ((2894, 2929), 'xml.etree.ElementTree.ElementTree', 'ElementTree.ElementTree', (['test_suite'], {}), '(test_suite)\n', (2917, 2929), False, 'from xml.etree import ElementTree\n'), ((1349, 1378), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1372, 1378), False, 'import collections\n'), ((1460, 1501), 're.search', 're.search', (['"""(\\\\S+):(\\\\d+):\\\\s+(.+)"""', 'line'], {}), "('(\\\\S+):(\\\\d+):\\\\s+(.+)', line)\n", (1469, 1501), False, 'import re\n'), ((2522, 2548), 'os.path.relpath', 'os.path.relpath', (['file_name'], {}), '(file_name)\n', (2537, 2548), False, 'import os\n'), ((2709, 2736), 'os.path.relpath', 'os.path.relpath', (['error.file'], {}), '(error.file)\n', (2724, 2736), False, 'import os\n')] |
import unittest
import requests_mock
from werkzeug.test import EnvironBuilder
from werkzeug.wrappers import Request
from perimeterx import px_constants
from perimeterx.px_config import PxConfig
from perimeterx.px_context import PxContext
from perimeterx.px_proxy import PxProxy
class Test_PXProxy(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config = PxConfig({'app_id': 'PXfake_app_id'})
cls.headers = {'X-FORWARDED-FOR': '127.0.0.1',
'remote-addr': '127.0.0.1',
'content_length': '100'}
def test_should_reverse_request(self):
builder = EnvironBuilder(headers=self.headers, path='/fake_app_id/init.js')
env = builder.get_environ()
request = Request(env)
context = PxContext(request, self.config)
px_proxy = PxProxy(self.config)
should_reverse = px_proxy.should_reverse_request(context.uri)
self.assertTrue(should_reverse)
should_reverse = px_proxy.should_reverse_request(context.uri)
self.assertTrue(should_reverse)
should_reverse = px_proxy.should_reverse_request(context.uri)
self.assertTrue(should_reverse)
@requests_mock.mock()
def test_send_reverse_client_request(self, mock):
content = 'client js content'
builder = EnvironBuilder(headers=self.headers, path='/fake_app_id/init.js')
env = builder.get_environ()
request = Request(env)
context = PxContext(request, self.config)
headers = {'host': px_constants.CLIENT_HOST,
px_constants.FIRST_PARTY_HEADER: '1',
px_constants.ENFORCER_TRUE_IP_HEADER: context.ip,
px_constants.FIRST_PARTY_FORWARDED_FOR: '127.0.0.1'}
mock.get(url='https://client.perimeterx.net/PXfake_app_id/main.min.js', text=content, request_headers=headers,
status_code=200, reason='OK')
px_proxy = PxProxy(self.config)
status, headers, body = px_proxy.send_reverse_client_request(config=self.config, ctx=context)
self.assertEqual(content, body.decode("utf-8"))
@requests_mock.mock()
def test_send_reverse_captcha_request(self, mock):
content = 'captcha js content'
builder = EnvironBuilder(headers=self.headers, path='/fake_app_id/captcha/captcha.js', query_string='a=c&u=cfe74220-f484-11e8-9b14-d7280325a290&v=0701bb80-f482-11e8-8a31-a37cf9620569&m=0')
env = builder.get_environ()
request = Request(env)
context = PxContext(request, self.config)
headers = {'host': px_constants.CAPTCHA_HOST,
px_constants.FIRST_PARTY_HEADER: '1',
px_constants.ENFORCER_TRUE_IP_HEADER: context.ip,
px_constants.FIRST_PARTY_FORWARDED_FOR: '127.0.0.1'}
mock.get(
url='https://captcha.px-cdn.net/PXfake_app_id/captcha.js?a=c&u=cfe74220-f484-11e8-9b14-d7280325a290&v=0701bb80-f482-11e8-8a31-a37cf9620569&m=0',
text=content, request_headers=headers, status_code=200, reason='OK')
px_proxy = PxProxy(self.config)
status, headers, body = px_proxy.send_reverse_captcha_request(config=self.config, ctx=context)
self.assertEqual(content, body.decode("utf-8"))
@requests_mock.mock()
def test_send_reverse_xhr_request(self, mock):
content = 'xhr content'
builder = EnvironBuilder(headers=self.headers, path='/fake_app_id/xhr/api/v1/collector', method='POST')
env = builder.get_environ()
request = Request(env)
context = PxContext(request, self.config)
headers = {'host': self.config.collector_host,
px_constants.FIRST_PARTY_HEADER: '1',
px_constants.ENFORCER_TRUE_IP_HEADER: context.ip,
px_constants.FIRST_PARTY_FORWARDED_FOR: '127.0.0.1'}
mock.post(url='https://collector-pxfake_app_id.perimeterx.net/api/v1/collector', text=content,
request_headers=headers, status_code=200, reason='OK')
px_proxy = PxProxy(self.config)
status, headers, body = px_proxy.send_reverse_xhr_request(config=self.config, ctx=context, body=content)
self.assertEqual(content, body.decode("utf-8"))
| [
"perimeterx.px_context.PxContext",
"requests_mock.mock",
"perimeterx.px_config.PxConfig",
"werkzeug.test.EnvironBuilder",
"perimeterx.px_proxy.PxProxy",
"werkzeug.wrappers.Request"
] | [((1201, 1221), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (1219, 1221), False, 'import requests_mock\n'), ((2137, 2157), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (2155, 2157), False, 'import requests_mock\n'), ((3304, 3324), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (3322, 3324), False, 'import requests_mock\n'), ((385, 422), 'perimeterx.px_config.PxConfig', 'PxConfig', (["{'app_id': 'PXfake_app_id'}"], {}), "({'app_id': 'PXfake_app_id'})\n", (393, 422), False, 'from perimeterx.px_config import PxConfig\n'), ((640, 705), 'werkzeug.test.EnvironBuilder', 'EnvironBuilder', ([], {'headers': 'self.headers', 'path': '"""/fake_app_id/init.js"""'}), "(headers=self.headers, path='/fake_app_id/init.js')\n", (654, 705), False, 'from werkzeug.test import EnvironBuilder\n'), ((761, 773), 'werkzeug.wrappers.Request', 'Request', (['env'], {}), '(env)\n', (768, 773), False, 'from werkzeug.wrappers import Request\n'), ((792, 823), 'perimeterx.px_context.PxContext', 'PxContext', (['request', 'self.config'], {}), '(request, self.config)\n', (801, 823), False, 'from perimeterx.px_context import PxContext\n'), ((843, 863), 'perimeterx.px_proxy.PxProxy', 'PxProxy', (['self.config'], {}), '(self.config)\n', (850, 863), False, 'from perimeterx.px_proxy import PxProxy\n'), ((1332, 1397), 'werkzeug.test.EnvironBuilder', 'EnvironBuilder', ([], {'headers': 'self.headers', 'path': '"""/fake_app_id/init.js"""'}), "(headers=self.headers, path='/fake_app_id/init.js')\n", (1346, 1397), False, 'from werkzeug.test import EnvironBuilder\n'), ((1453, 1465), 'werkzeug.wrappers.Request', 'Request', (['env'], {}), '(env)\n', (1460, 1465), False, 'from werkzeug.wrappers import Request\n'), ((1484, 1515), 'perimeterx.px_context.PxContext', 'PxContext', (['request', 'self.config'], {}), '(request, self.config)\n', (1493, 1515), False, 'from perimeterx.px_context import PxContext\n'), ((1952, 1972), 'perimeterx.px_proxy.PxProxy', 'PxProxy', (['self.config'], {}), '(self.config)\n', (1959, 1972), False, 'from perimeterx.px_proxy import PxProxy\n'), ((2270, 2474), 'werkzeug.test.EnvironBuilder', 'EnvironBuilder', ([], {'headers': 'self.headers', 'path': '"""/fake_app_id/captcha/captcha.js"""', 'query_string': '"""a=c&u=cfe74220-f484-11e8-9b14-d7280325a290&v=0701bb80-f482-11e8-8a31-a37cf9620569&m=0"""'}), "(headers=self.headers, path='/fake_app_id/captcha/captcha.js',\n query_string=\n 'a=c&u=cfe74220-f484-11e8-9b14-d7280325a290&v=0701bb80-f482-11e8-8a31-a37cf9620569&m=0'\n )\n", (2284, 2474), False, 'from werkzeug.test import EnvironBuilder\n'), ((2516, 2528), 'werkzeug.wrappers.Request', 'Request', (['env'], {}), '(env)\n', (2523, 2528), False, 'from werkzeug.wrappers import Request\n'), ((2547, 2578), 'perimeterx.px_context.PxContext', 'PxContext', (['request', 'self.config'], {}), '(request, self.config)\n', (2556, 2578), False, 'from perimeterx.px_context import PxContext\n'), ((3118, 3138), 'perimeterx.px_proxy.PxProxy', 'PxProxy', (['self.config'], {}), '(self.config)\n', (3125, 3138), False, 'from perimeterx.px_proxy import PxProxy\n'), ((3426, 3524), 'werkzeug.test.EnvironBuilder', 'EnvironBuilder', ([], {'headers': 'self.headers', 'path': '"""/fake_app_id/xhr/api/v1/collector"""', 'method': '"""POST"""'}), "(headers=self.headers, path=\n '/fake_app_id/xhr/api/v1/collector', method='POST')\n", (3440, 3524), False, 'from werkzeug.test import EnvironBuilder\n'), ((3575, 3587), 'werkzeug.wrappers.Request', 'Request', (['env'], {}), '(env)\n', (3582, 3587), False, 'from werkzeug.wrappers import Request\n'), ((3606, 3637), 'perimeterx.px_context.PxContext', 'PxContext', (['request', 'self.config'], {}), '(request, self.config)\n', (3615, 3637), False, 'from perimeterx.px_context import PxContext\n'), ((4087, 4107), 'perimeterx.px_proxy.PxProxy', 'PxProxy', (['self.config'], {}), '(self.config)\n', (4094, 4107), False, 'from perimeterx.px_proxy import PxProxy\n')] |
import collections
import functools
import json
import logging
import multiprocessing
import os
import time
from collections import OrderedDict
from queue import PriorityQueue, Empty
from typing import List, Tuple, Any
from itertools import cycle, islice
import minerl.herobraine.env_spec
from minerl.herobraine.hero import spaces
import cv2
import os
import numpy as np
import gym
logger = logging.getLogger(__name__)
from minerl.data.version import assert_version, assert_prefix
import copy
import tqdm
import queue
import minerl.data.util
from minerl.data.util import forever, minibatch_gen
import concurrent
from IPython import embed
if os.name != "nt":
class WindowsError(OSError):
pass
def tree_slice(tree, slc):
if isinstance(tree, OrderedDict):
return OrderedDict(
[(k, tree_slice(v, slc)) for k, v in tree.items()]
)
else:
return tree[slc]
class DataPipeline:
"""
Creates a data pipeline object used to itterate through the MineRL-v0 dataset
"""
def __init__(self,
data_directory: os.path,
environment: str,
num_workers: int,
worker_batch_size: int,
min_size_to_dequeue: int,
random_seed=42):
"""
Sets up a tensorflow dataset to load videos from a given data directory.
:param data_directory:
:type data_directory:
:param num_workers:
:type num_workers:
:param worker_batch_size:
:type worker_batch_size:
:param min_size_to_dequeue:
:type min_size_to_dequeue:
:param random_seed:
"""
self.seed = random_seed
self.data_dir = data_directory
self.environment = environment
self.number_of_workers = num_workers
self.worker_batch_size = worker_batch_size
self.size_to_dequeue = min_size_to_dequeue
self.processing_pool = multiprocessing.Pool(self.number_of_workers)
self._env_spec = gym.envs.registration.spec(self.environment)._kwargs['env_spec']
self._action_space = gym.envs.registration.spec(self.environment)._kwargs['action_space']
self._observation_space = gym.envs.registration.spec(self.environment)._kwargs['observation_space']
@property
def spec(self) -> minerl.herobraine.env_spec.EnvSpec:
return self._env_spec
@property
def action_space(self):
"""
Returns: action space of current MineRL environment
"""
return self._action_space
@property
def observation_space(self):
"""
Returns: action space of current MineRL environment
"""
return self._observation_space
# return result
def load_data(self, stream_name: str, skip_interval=0, include_metadata=False, video_name='recording.mp4'):
"""Iterates over an individual trajectory named stream_name.
Args:
stream_name (str): The stream name desired to be iterated through.
skip_interval (int, optional): How many sices should be skipped.. Defaults to 0.
include_metadata (bool, optional): Whether or not meta data about the loaded trajectory should be included.. Defaults to False.
Yields:
A tuple of (state, player_action, reward_from_action, next_state, is_next_state_terminal).
These are tuples are yielded in order of the episode.
"""
if '/' in stream_name:
file_dir = stream_name
else:
file_dir = os.path.join(self.data_dir, stream_name)
if DataPipeline._is_blacklisted(stream_name):
raise RuntimeError("This stream is corrupted (and will be removed in the next version of the data!)")
seq = DataPipeline._load_data_pyfunc(file_dir, -1, None, self.environment, skip_interval=skip_interval,
include_metadata=include_metadata, video_name=video_name)
if include_metadata:
observation_seq, action_seq, reward_seq, next_observation_seq, done_seq, meta = seq
else:
observation_seq, action_seq, reward_seq, next_observation_seq, done_seq = seq
# make a copty
gym_spec = gym.envs.registration.spec(self.environment)
target_space = copy.deepcopy(gym_spec._kwargs['observation_space'])
x = list(target_space.spaces.items())
target_space.spaces = collections.OrderedDict(
sorted(x, key=lambda x:
x[0] if x[0] is not 'pov' else 'z')
)
# Now we just need to slice the dict.
for idx in tqdm.tqdm(range(len(reward_seq))):
# Wrap in dict
action_dict = tree_slice(action_seq, idx)
observation_dict = tree_slice(observation_seq, idx)
next_observation_dict = tree_slice(next_observation_seq, idx)
yield_list = [observation_dict, action_dict, reward_seq[idx], next_observation_dict, done_seq[idx]]
yield yield_list + [meta] if include_metadata else yield_list
def get_trajectory_names(self):
"""Gets all the trajectory names
Returns:
A list of experiment names: [description]
"""
return [os.path.basename(x) for x in self._get_all_valid_recordings(self.data_dir)]
############################
# PRIVATE METHODS #
############################
@staticmethod
def read_frame(cap):
try:
ret, frame = cap.read()
if ret:
cv2.cvtColor(frame, code=cv2.COLOR_BGR2RGB, dst=frame)
frame = np.asarray(np.clip(frame, 0, 255), dtype=np.uint8)
return ret, frame
except Exception as err:
logger.error("error reading capture device:", err)
raise err
@staticmethod
def _roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to <NAME>
pending = len(iterables)
nexts = cycle(iter(it).next for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
# Todo: Make data pipeline split files per push.
@staticmethod
def _load_data_pyfunc(file_dir: str, max_seq_len: int, data_queue, env_str="", skip_interval=0,
include_metadata=False, video_name='recording.mp4'):
"""
Enqueueing mechanism for loading a trajectory from a file onto the data_queue
:param file_dir: file path to data directory
:param skip_interval: Number of time steps to skip between each sample
:param max_seq_len: Number of time steps in each enqueued batch
:param data_queue: multiprocessing data queue, or None to return streams directly
:param include_metadata: whether or not to return an additional tuple containing metadata
:return:
"""
logger.debug("Loading from file {}".format(file_dir))
video_path = str(os.path.join(file_dir, video_name))
numpy_path = str(os.path.join(file_dir, 'rendered.npz'))
meta_path = str(os.path.join(file_dir, 'metadata.json'))
try:
# Start video decompression
cap = cv2.VideoCapture(video_path)
# Load numpy file
state = np.load(numpy_path, allow_pickle=True)
# Load metadata file
with open(meta_path) as file:
meta = json.load(file)
if 'stream_name' not in meta:
meta['stream_name'] = file_dir
action_dict = collections.OrderedDict([(key, state[key]) for key in state if key.startswith('action$')])
reward_vec = state['reward']
info_dict = collections.OrderedDict([(key, state[key]) for key in state if key.startswith('observation$')])
# Recursively sorts nested dicts
def recursive_sort(dct):
for key in list(dct.keys()):
if isinstance(dct[key], OrderedDict):
dct[key] = recursive_sort(dct[key])
dct[key] = OrderedDict(sorted(dct[key].items()))
return dct
def unflatten(dct, sep='$'):
out_dict = OrderedDict({})
for k, v in dct.items():
keys = k.split(sep)
cur_dict = out_dict
for key in keys[:-1]:
if key not in cur_dict:
cur_dict[key] = OrderedDict({})
cur_dict = cur_dict[key]
cur_dict[keys[-1]] = v
# Sort dict recursively
recursive_sort(out_dict)
return out_dict
# There is no action or reward for the terminal state of an episode.
# Hence in Publish.py we shorten the action and reward vector to reflect this.
# We know FOR SURE that the last video frame corresponds to the last state (from Universal.json).
num_states = len(reward_vec) + 1
max_frame_num = meta['true_video_frame_count']
frames = []
frame_num, stop_idx = 0, 0
# Advance video capture past first i-frame to start of experiment
cap = cv2.VideoCapture(video_path)
# for _ in range(max_frame_num - num_states):
# ret, _ = DataPipeline.read_frame(cap)
# frame_num += 1
# if not ret:
# raise RuntimeError()
# Rendered Frames
# Loop through the video and construct frames
# of observations to be sent via the multiprocessing queue
# in chunks of worker_batch_size to the batch_iter loop.
while True:
ret = True
start_idx = stop_idx
# Collect up to worker_batch_size number of frames
try:
# Go until max_seq_len +1 for S_t, A_t, -> R_t, S_{t+1}, D_{t+1}
while ret and frame_num < max_frame_num and (len(frames) < max_seq_len + 1 or max_seq_len == -1):
ret, frame = DataPipeline.read_frame(cap)
frames.append(frame)
frame_num += 1
except Exception as err:
logger.error("error reading capture device:", err)
raise err
if len(frames) <= 1:
break
if frame_num == max_frame_num:
frames[-1] = frames[-2]
# Next sarsd pair index
stop_idx = start_idx + len(frames) - 1
# print('Num frames in batch:', stop_idx - start_idx)
# Load non-image data from npz
current_observation_data = OrderedDict()
action_data = OrderedDict()
next_observation_data = OrderedDict()
try:
for key in list(info_dict.keys()) + ['observation$pov']:
if 'pov' in key:
current_observation_data[key] = np.asanyarray(frames[:-1])
next_observation_data[key] = np.asanyarray(frames[1:])
else:
current_observation_data[key] = np.asanyarray(info_dict[key][start_idx:stop_idx])
next_observation_data[key] = np.asanyarray(info_dict[key][start_idx + 1:stop_idx + 1])
# We are getting (S_t, A_t -> R_t), S_{t+1}, D_{t+1} so there are less actions and rewards
for key in action_dict:
action_data[key] = np.asanyarray(action_dict[key][start_idx: stop_idx])
reward_data = np.asanyarray(reward_vec[start_idx:stop_idx], dtype=np.float32)
done_data = [False for _ in range(len(reward_data))]
if frame_num == max_frame_num:
done_data[-1] = True
except Exception as err:
logger.error("error drawing batch from npz file:", err)
raise err
# unflatten these dictioanries.
current_observation_data = unflatten(current_observation_data)['observation']
action_data = unflatten(action_data)['action']
next_observation_data = unflatten(next_observation_data)['observation']
batches = [current_observation_data, action_data, reward_data, next_observation_data,
np.array(done_data, dtype=np.bool)]
if include_metadata:
batches += [meta]
if data_queue is None:
return batches
else:
data_queue.put(batches)
logger.debug("Enqueued from file {}".format(file_dir))
if not ret:
break
else:
frames = [frames[-1]]
logger.error("Finished")
return None
except WindowsError as e:
logger.debug("Caught windows error {} - this is expected when closing the data pool".format(e))
return None
except FileNotFoundError as e:
print("File not found!")
raise e
except Exception as e:
logger.error("Exception caught on file \"{}\" by a worker of the data pipeline.".format(file_dir))
logger.error(repr(e))
return None
def batch_iter(self,
batch_size: int,
seq_len: int,
num_epochs: int = -1,
preload_buffer_size: int = 2,
seed: int = None,
include_metadata: bool = False):
"""Returns batches of sequences length SEQ_LEN of the data of size BATCH_SIZE.
The iterator produces batches sequentially. If an element of a batch reaches the
end of its
Args:
batch_size (int): The batch size.
seq_len (int): The size of sequences to produce.
num_epochs (int, optional): The number of epochs to iterate over the data. Defaults to -1.
preload_buffer_size (int, optional): Increase to IMPROVE PERFORMANCE. The data iterator uses a queue to prevent blocking, the queue size is the number of trajectories to load into the buffer. Adjust based on memory constraints. Defaults to 32.
seed (int, optional): [int]. NOT IMPLEMENTED Defaults to None.
include_metadata (bool, optional): Include metadata on the source trajectory. Defaults to False.
Returns:
Generator: A generator that yields (sarsd) batches
"""
# Todo: Not implemented/
for epoch in (range(num_epochs) if num_epochs > 0 else forever()):
trajectory_queue = queue.Queue(maxsize=preload_buffer_size)
def traj_iter():
for _ in jobs:
s, a, r, sp1, d = trajectory_queue.get()
yield dict(
obs=s,
act=a,
reward=r,
next_obs=sp1,
done=d
)
jobs = [(f, -1, None) for f in self._get_all_valid_recordings(self.data_dir)]
np.random.shuffle(jobs)
trajectory_loader = minerl.data.util.OrderedJobStreamer(
job,
jobs,
trajectory_queue,
# executor=concurrent.futures.ThreadPoolExecutor,
max_workers=preload_buffer_size
)
trajectory_loader.start()
for seg_batch in minibatch_gen(traj_iter(), batch_size=batch_size, nsteps=seq_len):
yield seg_batch['obs'], seg_batch['act'], seg_batch['reward'], seg_batch['next_obs'], seg_batch['done']
trajectory_loader.shutdown()
@staticmethod
def _is_blacklisted(path):
for p in [
'tempting_capers_shapeshifter-14'
]:
if p in path:
return True
return False
@staticmethod
def _get_all_valid_recordings(path):
directoryList = []
# return nothing if path is a file
if os.path.isfile(path):
return []
# Skip this file.
if DataPipeline._is_blacklisted(path):
return []
# add dir to directory list if it contains .txt files
if len([f for f in os.listdir(path) if f.endswith('.mp4')]) > 0:
if len([f for f in os.listdir(path) if f.endswith('.npz')]) > 0:
assert_prefix(path)
directoryList.append(path)
for d in os.listdir(path):
new_path = os.path.join(path, d)
if os.path.isdir(new_path):
directoryList += DataPipeline._get_all_valid_recordings(new_path)
directoryList = np.array(directoryList)
np.random.shuffle(directoryList)
return directoryList.tolist()
###
# DEPRECATED API
###
def seq_iter(self, num_epochs=-1, max_sequence_len=32, queue_size=None, seed=None, include_metadata=False):
"""DEPRECATED METHOD FOR SAMPLING DATA FROM THE MINERL DATASET.
This function is now :code:`DataPipeline.batch_iter()`
"""
raise DeprecationWarning(
"The `DataPipeline.seq_iter` method is deprecated! Please use DataPipeline.batch_iter()."
"\nNOTE: The new method `DataPipeline.batch_iter` has a different return signature! "
"\n\t Please see how to use it @ http://www.minerl.io/docs/tutorials/data_sampling.html")
def sarsd_iter(self, num_epochs=-1, max_sequence_len=32, queue_size=None, seed=None, include_metadata=False):
"""
Returns a generator for iterating through (state, action, reward, next_state, is_terminal)
tuples in the dataset.
Loads num_workers files at once as defined in minerl.data.make() and return up to
max_sequence_len consecutive samples wrapped in a dict observation space
Args:
num_epochs (int, optional): number of epochs to iterate over or -1
to loop forever. Defaults to -1
max_sequence_len (int, optional): maximum number of consecutive samples - may be less. Defaults to 32
seed (int, optional): seed for random directory walk - note, specifying seed as well as a finite num_epochs
will cause the ordering of examples to be the same after every call to seq_iter
queue_size (int, optional): maximum number of elements to buffer at a time, each worker may hold an
additional item while waiting to enqueue. Defaults to 16*self.number_of_workers or 2*
self.number_of_workers if max_sequence_len == -1
include_metadata (bool, optional): adds an additional member to the tuple containing metadata about the
stream the data was loaded from. Defaults to False
Yields:
A tuple of (state, player_action, reward_from_action, next_state, is_next_state_terminal, (metadata)).
Each element is in the format of the environment action/state/reward space and contains as many
samples are requested.
"""
raise DeprecationWarning(
"The `DataPipeline.sarsd_iter` method is deprecated! Please use DataPipeline.batch_iter().")
def job(arg):
return DataPipeline._load_data_pyfunc(*arg)
| [
"logging.getLogger",
"numpy.clip",
"numpy.asanyarray",
"numpy.array",
"copy.deepcopy",
"minerl.data.version.assert_prefix",
"os.listdir",
"os.path.isdir",
"minerl.data.util.forever",
"collections.OrderedDict",
"os.path.isfile",
"cv2.cvtColor",
"itertools.islice",
"gym.envs.registration.spe... | [((393, 420), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (410, 420), False, 'import logging\n'), ((1962, 2006), 'multiprocessing.Pool', 'multiprocessing.Pool', (['self.number_of_workers'], {}), '(self.number_of_workers)\n', (1982, 2006), False, 'import multiprocessing\n'), ((4271, 4315), 'gym.envs.registration.spec', 'gym.envs.registration.spec', (['self.environment'], {}), '(self.environment)\n', (4297, 4315), False, 'import gym\n'), ((4339, 4391), 'copy.deepcopy', 'copy.deepcopy', (["gym_spec._kwargs['observation_space']"], {}), "(gym_spec._kwargs['observation_space'])\n", (4352, 4391), False, 'import copy\n'), ((16574, 16594), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (16588, 16594), False, 'import os\n'), ((17024, 17040), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (17034, 17040), False, 'import os\n'), ((17234, 17257), 'numpy.array', 'np.array', (['directoryList'], {}), '(directoryList)\n', (17242, 17257), True, 'import numpy as np\n'), ((17266, 17298), 'numpy.random.shuffle', 'np.random.shuffle', (['directoryList'], {}), '(directoryList)\n', (17283, 17298), True, 'import numpy as np\n'), ((3574, 3614), 'os.path.join', 'os.path.join', (['self.data_dir', 'stream_name'], {}), '(self.data_dir, stream_name)\n', (3586, 3614), False, 'import os\n'), ((5273, 5292), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (5289, 5292), False, 'import os\n'), ((7174, 7208), 'os.path.join', 'os.path.join', (['file_dir', 'video_name'], {}), '(file_dir, video_name)\n', (7186, 7208), False, 'import os\n'), ((7235, 7273), 'os.path.join', 'os.path.join', (['file_dir', '"""rendered.npz"""'], {}), "(file_dir, 'rendered.npz')\n", (7247, 7273), False, 'import os\n'), ((7299, 7338), 'os.path.join', 'os.path.join', (['file_dir', '"""metadata.json"""'], {}), "(file_dir, 'metadata.json')\n", (7311, 7338), False, 'import os\n'), ((7412, 7440), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (7428, 7440), False, 'import cv2\n'), ((7492, 7530), 'numpy.load', 'np.load', (['numpy_path'], {'allow_pickle': '(True)'}), '(numpy_path, allow_pickle=True)\n', (7499, 7530), True, 'import numpy as np\n'), ((9478, 9506), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (9494, 9506), False, 'import cv2\n'), ((15107, 15116), 'minerl.data.util.forever', 'forever', ([], {}), '()\n', (15114, 15116), False, 'from minerl.data.util import forever, minibatch_gen\n'), ((15150, 15190), 'queue.Queue', 'queue.Queue', ([], {'maxsize': 'preload_buffer_size'}), '(maxsize=preload_buffer_size)\n', (15161, 15190), False, 'import queue\n'), ((15635, 15658), 'numpy.random.shuffle', 'np.random.shuffle', (['jobs'], {}), '(jobs)\n', (15652, 15658), True, 'import numpy as np\n'), ((17065, 17086), 'os.path.join', 'os.path.join', (['path', 'd'], {}), '(path, d)\n', (17077, 17086), False, 'import os\n'), ((17102, 17125), 'os.path.isdir', 'os.path.isdir', (['new_path'], {}), '(new_path)\n', (17115, 17125), False, 'import os\n'), ((2033, 2077), 'gym.envs.registration.spec', 'gym.envs.registration.spec', (['self.environment'], {}), '(self.environment)\n', (2059, 2077), False, 'import gym\n'), ((2127, 2171), 'gym.envs.registration.spec', 'gym.envs.registration.spec', (['self.environment'], {}), '(self.environment)\n', (2153, 2171), False, 'import gym\n'), ((2230, 2274), 'gym.envs.registration.spec', 'gym.envs.registration.spec', (['self.environment'], {}), '(self.environment)\n', (2256, 2274), False, 'import gym\n'), ((5579, 5633), 'cv2.cvtColor', 'cv2.cvtColor', (['frame'], {'code': 'cv2.COLOR_BGR2RGB', 'dst': 'frame'}), '(frame, code=cv2.COLOR_BGR2RGB, dst=frame)\n', (5591, 5633), False, 'import cv2\n'), ((7630, 7645), 'json.load', 'json.load', (['file'], {}), '(file)\n', (7639, 7645), False, 'import json\n'), ((8437, 8452), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (8448, 8452), False, 'from collections import OrderedDict\n'), ((11044, 11057), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11055, 11057), False, 'from collections import OrderedDict\n'), ((11088, 11101), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11099, 11101), False, 'from collections import OrderedDict\n'), ((11142, 11155), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11153, 11155), False, 'from collections import OrderedDict\n'), ((16943, 16962), 'minerl.data.version.assert_prefix', 'assert_prefix', (['path'], {}), '(path)\n', (16956, 16962), False, 'from minerl.data.version import assert_version, assert_prefix\n'), ((5669, 5691), 'numpy.clip', 'np.clip', (['frame', '(0)', '(255)'], {}), '(frame, 0, 255)\n', (5676, 5691), True, 'import numpy as np\n'), ((12009, 12072), 'numpy.asanyarray', 'np.asanyarray', (['reward_vec[start_idx:stop_idx]'], {'dtype': 'np.float32'}), '(reward_vec[start_idx:stop_idx], dtype=np.float32)\n', (12022, 12072), True, 'import numpy as np\n'), ((12814, 12848), 'numpy.array', 'np.array', (['done_data'], {'dtype': 'np.bool'}), '(done_data, dtype=np.bool)\n', (12822, 12848), True, 'import numpy as np\n'), ((6292, 6314), 'itertools.islice', 'islice', (['nexts', 'pending'], {}), '(nexts, pending)\n', (6298, 6314), False, 'from itertools import cycle, islice\n'), ((11921, 11972), 'numpy.asanyarray', 'np.asanyarray', (['action_dict[key][start_idx:stop_idx]'], {}), '(action_dict[key][start_idx:stop_idx])\n', (11934, 11972), True, 'import numpy as np\n'), ((16804, 16820), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (16814, 16820), False, 'import os\n'), ((8708, 8723), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (8719, 8723), False, 'from collections import OrderedDict\n'), ((11355, 11381), 'numpy.asanyarray', 'np.asanyarray', (['frames[:-1]'], {}), '(frames[:-1])\n', (11368, 11381), True, 'import numpy as np\n'), ((11439, 11464), 'numpy.asanyarray', 'np.asanyarray', (['frames[1:]'], {}), '(frames[1:])\n', (11452, 11464), True, 'import numpy as np\n'), ((11555, 11604), 'numpy.asanyarray', 'np.asanyarray', (['info_dict[key][start_idx:stop_idx]'], {}), '(info_dict[key][start_idx:stop_idx])\n', (11568, 11604), True, 'import numpy as np\n'), ((11662, 11719), 'numpy.asanyarray', 'np.asanyarray', (['info_dict[key][start_idx + 1:stop_idx + 1]'], {}), '(info_dict[key][start_idx + 1:stop_idx + 1])\n', (11675, 11719), True, 'import numpy as np\n'), ((16881, 16897), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (16891, 16897), False, 'import os\n')] |
# Copyright 2021 MosaicML. All Rights Reserved.
"""The CPU device used for training."""
from __future__ import annotations
import logging
from contextlib import contextmanager
from typing import Any, Dict, Generator, TypeVar, Union
import torch
from composer.core import Precision
from composer.trainer.devices.device import Device, T_nnModule
logger = logging.getLogger(__name__)
__all__ = ["DeviceCPU"]
T_nnModule = TypeVar("T_nnModule", bound=torch.nn.Module)
class DeviceCPU(Device):
"""An extension of :class:`~composer.trainer.devices.device.Device` for CPUs.
This class takes no arguments.
"""
dist_backend = "gloo"
_device = torch.device('cpu')
def module_to_device(self, module: T_nnModule) -> T_nnModule:
return module.to(self._device)
def tensor_to_device(self, tensor: torch.Tensor) -> torch.Tensor:
return tensor.to(self._device)
@contextmanager
def precision_context(self, precision: Union[str, Precision]) -> Generator[None, None, None]:
precision = Precision(precision)
if precision == Precision.FP32:
yield
else:
raise ValueError(f"Precision {precision} not supported for a CPU")
def state_dict(self) -> Dict[str, Any]:
# CPU device has no RNG state
return {}
def load_state_dict(self, state: Dict[str, Any]) -> None:
if len(state) != 0:
raise ValueError("CPU device has no state.")
| [
"logging.getLogger",
"composer.core.Precision",
"torch.device",
"typing.TypeVar"
] | [((359, 386), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (376, 386), False, 'import logging\n'), ((426, 470), 'typing.TypeVar', 'TypeVar', (['"""T_nnModule"""'], {'bound': 'torch.nn.Module'}), "('T_nnModule', bound=torch.nn.Module)\n", (433, 470), False, 'from typing import Any, Dict, Generator, TypeVar, Union\n'), ((665, 684), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (677, 684), False, 'import torch\n'), ((1040, 1060), 'composer.core.Precision', 'Precision', (['precision'], {}), '(precision)\n', (1049, 1060), False, 'from composer.core import Precision\n')] |
import os
import re
import json
import ast
import csv
import sys
import shutil
# ["allreduce-lambf16", "reducescatter-lamb-allgatherf16", "test-lambf16"] + \
# ["allreduce-adamf16", "reducescatter-adam-allgatherf16", "test-adamf16"] +\
all_binaries = ["adam-ar-c", "adam-rs-c-ag", "adam-fuse-rs-c-ag"] + \
["lamb-ar-c", "lamb-rs-c-ag", "lamb-fuse-rs-c-ag"] + \
["python3 optimbench.py --optimizer FusedLAMB --fp16", "python3 optimbench.py --optimizer FusedLAMB", "python3 optimbench.py --optimizer FusedAdam --fp16", "python3 optimbench.py --optimizer FusedAdam"] + \
["multi-process-adam-scattered lamb", "multi-process-adam-scattered adam", "multi-process-adam-scatteredf16 adam", "multi-process-adam-scatteredf16 lamb"]
all_gpus = [2**i for i in range(1, 9)]
all_channels = [2,16,32,64,80]
all_algos = ["ring", "tree"]
all_protocols = ["ll", "ll128", "simple", "default"]
all_sizes = [2**i for i in range(10, 30+1)] + [335708160]
bert_layer_size = 335708160
def slurp(file_path):
f = open(file_path, "r")
s = f.read()
f.close()
return s
#Get data from the job's name
def binary_from_job_name(job_name):
return re.findall(r'binary=(.+)-p1', job_name)[0]
def gpu_from_job_name(job_name):
return re.findall(r'gpu=(.+?)-', job_name)[0]
def channels_from_job_name(job_name):
return re.findall(r'channels=(.+?)-', job_name)[0]
def algo_from_job_name(job_name):
return re.findall(r'algo=(.+?)-', job_name)[0]
def protocol_from_job_name(job_name):
return re.findall(r'protocol=(.+?)!', job_name)[0]
#Process stdout from each binary
def process_stdout(stdout_txt):
all_data = re.findall(r"{.+}", stdout_txt)
data_in_dict = {}
for i in all_data:
i = i.replace("{", '{"')
i = i.replace(":", '":')
i = i.replace(",", ',"')
j = ast.literal_eval(i)
for k in dict(j):
j[k.strip()] = j[k]
if (k != k.strip()):
j.pop(k)
data_in_dict[j["SZ"]] = j
return data_in_dict
# A Dictionary of Binary X # of GPUs X # of Channel X Algorithms X Protocols
full_data_dict = {}
for binary in all_binaries:
full_data_dict[binary] = {}
for gpu in all_gpus:
full_data_dict[binary][gpu] = {}
for channel in all_channels:
full_data_dict[binary][gpu][channel] = {}
for algo in all_algos:
full_data_dict[binary][gpu][channel][algo] = {}
for protocol in all_protocols:
full_data_dict[binary][gpu][channel][algo][protocol] = {}
def process_dir(_dir):
f = os.path.join(_dir, "json.json")
command = slurp(f)
binary = ""
for b in all_binaries:
if b in command:
binary = b
break
gpus = int(re.findall(r"-np (\d+)", command)[0])
channels = int(re.findall(r"NCCL_MIN_NCHANNELS=(\d+)", command)[0])
algo = re.findall(r"NCCL_ALGO=(\w+)", command)[0].lower()
if "NCCL_PROTO" in command:
protocol = re.findall(r"NCCL_PROTO=([\w\d]+)", command)[0].lower()
else:
protocol = "default"
assert binary in all_binaries, "Possible invalid binary name '%s'"%binary
assert gpus in all_gpus, "Possible invalid number of gpus '%s'"%gpus
assert channels in all_channels, "Possible invalid number of channels '%s'"%channels
assert algo in all_algos, "Possible invalid number of algo '%s'"%algo
assert protocol in all_protocols, "Possible invalid number of protocol '%s'"%protocol
stdout_txt = slurp(os.path.join(_dir, "stdout.txt"))
data = process_stdout(stdout_txt)
global full_data_dict
prev_data = full_data_dict[binary][gpus][channels][algo][protocol]
if (len(data) == 0):
return
full_data_dict[binary][gpus][channels][algo][protocol] = data
def get_time(d):
if "TotalTime" in d:
return d["TotalTime"]
if "Total" in d:
return d["Total"]
if "Time" in d:
return d["Time"]
raise Exception("Time not found in " + str(d))
def process_results_dir(results_dir):
for d in os.listdir(results_dir):
full_path = os.path.join(results_dir, d)
if os.path.isdir(full_path):
process_dir(full_path)
| [
"os.listdir",
"os.path.join",
"ast.literal_eval",
"os.path.isdir",
"re.findall"
] | [((1630, 1660), 're.findall', 're.findall', (['"""{.+}"""', 'stdout_txt'], {}), "('{.+}', stdout_txt)\n", (1640, 1660), False, 'import re\n'), ((2585, 2616), 'os.path.join', 'os.path.join', (['_dir', '"""json.json"""'], {}), "(_dir, 'json.json')\n", (2597, 2616), False, 'import os\n'), ((4062, 4085), 'os.listdir', 'os.listdir', (['results_dir'], {}), '(results_dir)\n', (4072, 4085), False, 'import os\n'), ((1152, 1190), 're.findall', 're.findall', (['"""binary=(.+)-p1"""', 'job_name'], {}), "('binary=(.+)-p1', job_name)\n", (1162, 1190), False, 'import re\n'), ((1239, 1273), 're.findall', 're.findall', (['"""gpu=(.+?)-"""', 'job_name'], {}), "('gpu=(.+?)-', job_name)\n", (1249, 1273), False, 'import re\n'), ((1327, 1366), 're.findall', 're.findall', (['"""channels=(.+?)-"""', 'job_name'], {}), "('channels=(.+?)-', job_name)\n", (1337, 1366), False, 'import re\n'), ((1416, 1451), 're.findall', 're.findall', (['"""algo=(.+?)-"""', 'job_name'], {}), "('algo=(.+?)-', job_name)\n", (1426, 1451), False, 'import re\n'), ((1505, 1544), 're.findall', 're.findall', (['"""protocol=(.+?)!"""', 'job_name'], {}), "('protocol=(.+?)!', job_name)\n", (1515, 1544), False, 'import re\n'), ((1818, 1837), 'ast.literal_eval', 'ast.literal_eval', (['i'], {}), '(i)\n', (1834, 1837), False, 'import ast\n'), ((3511, 3543), 'os.path.join', 'os.path.join', (['_dir', '"""stdout.txt"""'], {}), "(_dir, 'stdout.txt')\n", (3523, 3543), False, 'import os\n'), ((4107, 4135), 'os.path.join', 'os.path.join', (['results_dir', 'd'], {}), '(results_dir, d)\n', (4119, 4135), False, 'import os\n'), ((4147, 4171), 'os.path.isdir', 'os.path.isdir', (['full_path'], {}), '(full_path)\n', (4160, 4171), False, 'import os\n'), ((2765, 2798), 're.findall', 're.findall', (['"""-np (\\\\d+)"""', 'command'], {}), "('-np (\\\\d+)', command)\n", (2775, 2798), False, 'import re\n'), ((2822, 2870), 're.findall', 're.findall', (['"""NCCL_MIN_NCHANNELS=(\\\\d+)"""', 'command'], {}), "('NCCL_MIN_NCHANNELS=(\\\\d+)', command)\n", (2832, 2870), False, 'import re\n'), ((2886, 2925), 're.findall', 're.findall', (['"""NCCL_ALGO=(\\\\w+)"""', 'command'], {}), "('NCCL_ALGO=(\\\\w+)', command)\n", (2896, 2925), False, 'import re\n'), ((2988, 3033), 're.findall', 're.findall', (['"""NCCL_PROTO=([\\\\w\\\\d]+)"""', 'command'], {}), "('NCCL_PROTO=([\\\\w\\\\d]+)', command)\n", (2998, 3033), False, 'import re\n')] |
from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import DataSource, DataSink, PipelineContext, Query, validate_query
from cassiopeia_championgg.dto import ChampionGGStatsListDto, ChampionGGStatsDto
from cassiopeia.datastores.uniquekeys import convert_region_to_platform
from .common import SimpleKVDiskService
T = TypeVar("T")
class ChampionGGDiskService(SimpleKVDiskService):
@DataSource.dispatch
def get(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> T:
pass
@DataSource.dispatch
def get_many(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
pass
@DataSink.dispatch
def put(self, type: Type[T], item: T, context: PipelineContext = None) -> None:
pass
@DataSink.dispatch
def put_many(self, type: Type[T], items: Iterable[T], context: PipelineContext = None) -> None:
pass
_validate_get_gg_champion_list_query = Query. \
has("patch").as_(str).also. \
can_have("elo").with_default(lambda *args, **kwargs: "PLATINUM_DIAMOND_MASTER_CHALLENGER", supplies_type=str)
@get.register(ChampionGGStatsListDto)
@validate_query(_validate_get_gg_champion_list_query, convert_region_to_platform)
def get_champion_list(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> ChampionGGStatsListDto:
patch = query["patch"]
elo = query["elo"]
key = "{clsname}.{patch}.{elo}".format(clsname=ChampionGGStatsListDto.__name__,
patch=patch,
elo=elo)
data = self._get(key)
data["data"] = [ChampionGGStatsDto(champion) for champion in data["data"]]
return ChampionGGStatsListDto(data)
@put.register(ChampionGGStatsListDto)
def put_champion_list(self, item: ChampionGGStatsListDto, context: PipelineContext = None) -> None:
key = "{clsname}.{patch}.{elo}".format(clsname=ChampionGGStatsListDto.__name__,
patch=item["patch"],
elo=item["elo"])
self._put(key, item)
| [
"typing.TypeVar",
"cassiopeia_championgg.dto.ChampionGGStatsListDto",
"cassiopeia_championgg.dto.ChampionGGStatsDto",
"datapipelines.validate_query",
"datapipelines.Query.has"
] | [((351, 363), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (358, 363), False, 'from typing import Type, TypeVar, MutableMapping, Any, Iterable\n'), ((1234, 1319), 'datapipelines.validate_query', 'validate_query', (['_validate_get_gg_champion_list_query', 'convert_region_to_platform'], {}), '(_validate_get_gg_champion_list_query, convert_region_to_platform\n )\n', (1248, 1319), False, 'from datapipelines import DataSource, DataSink, PipelineContext, Query, validate_query\n'), ((1830, 1858), 'cassiopeia_championgg.dto.ChampionGGStatsListDto', 'ChampionGGStatsListDto', (['data'], {}), '(data)\n', (1852, 1858), False, 'from cassiopeia_championgg.dto import ChampionGGStatsListDto, ChampionGGStatsDto\n'), ((1756, 1784), 'cassiopeia_championgg.dto.ChampionGGStatsDto', 'ChampionGGStatsDto', (['champion'], {}), '(champion)\n', (1774, 1784), False, 'from cassiopeia_championgg.dto import ChampionGGStatsListDto, ChampionGGStatsDto\n'), ((1021, 1039), 'datapipelines.Query.has', 'Query.has', (['"""patch"""'], {}), "('patch')\n", (1030, 1039), False, 'from datapipelines import DataSource, DataSink, PipelineContext, Query, validate_query\n')] |
"""
Tests for pika.frame
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
from pika import exceptions
from pika import frame
from pika import spec
class FrameTests(unittest.TestCase):
BASIC_ACK = ('\x01\x00\x01\x00\x00\x00\r\x00<\x00P\x00\x00\x00\x00\x00\x00'
'\x00d\x00\xce')
BODY_FRAME = '\x03\x00\x01\x00\x00\x00\x14I like it that sound\xce'
BODY_FRAME_VALUE = 'I like it that sound'
CONTENT_HEADER = ('\x02\x00\x01\x00\x00\x00\x0f\x00<\x00\x00\x00'
'\x00\x00\x00\x00\x00\x00d\x10\x00\x02\xce')
HEARTBEAT = '\x08\x00\x00\x00\x00\x00\x00\xce'
PROTOCOL_HEADER = 'AMQP\x00\x00\t\x01'
def frame_marshal_not_implemented_test(self):
frame_obj = frame.Frame(0x000A000B, 1)
self.assertRaises(NotImplementedError, frame_obj.marshal)
def frame_underscore_marshal_test(self):
basic_ack = frame.Method(1, spec.Basic.Ack(100))
self.assertEqual(basic_ack.marshal(), self.BASIC_ACK)
def headers_marshal_test(self):
header = frame.Header(1, 100,
spec.BasicProperties(delivery_mode=2))
self.assertEqual(header.marshal(), self.CONTENT_HEADER)
def body_marshal_test(self):
body = frame.Body(1, 'I like it that sound')
self.assertEqual(body.marshal(), self.BODY_FRAME)
def heartbeat_marshal_test(self):
heartbeat = frame.Heartbeat()
self.assertEqual(heartbeat.marshal(), self.HEARTBEAT)
def protocol_header_marshal_test(self):
protocol_header = frame.ProtocolHeader()
self.assertEqual(protocol_header.marshal(), self.PROTOCOL_HEADER)
def decode_protocol_header_instance_test(self):
self.assertIsInstance(frame.decode_frame(self.PROTOCOL_HEADER)[1],
frame.ProtocolHeader)
def decode_protocol_header_bytes_test(self):
self.assertEqual(frame.decode_frame(self.PROTOCOL_HEADER)[0], 8)
def decode_method_frame_instance_test(self):
self.assertIsInstance(frame.decode_frame(self.BASIC_ACK)[1],
frame.Method)
def decode_protocol_header_failure_test(self):
self.assertEqual(frame.decode_frame('AMQPa'), (0, None))
def decode_method_frame_bytes_test(self):
self.assertEqual(frame.decode_frame(self.BASIC_ACK)[0], 21)
def decode_method_frame_method_test(self):
self.assertIsInstance(frame.decode_frame(self.BASIC_ACK)[1].method,
spec.Basic.Ack)
def decode_header_frame_instance_test(self):
self.assertIsInstance(frame.decode_frame(self.CONTENT_HEADER)[1],
frame.Header)
def decode_header_frame_bytes_test(self):
self.assertEqual(frame.decode_frame(self.CONTENT_HEADER)[0], 23)
def decode_header_frame_properties_test(self):
frame_value = frame.decode_frame(self.CONTENT_HEADER)[1]
self.assertIsInstance(frame_value.properties, spec.BasicProperties)
def decode_frame_decoding_failure_test(self):
self.assertEqual(frame.decode_frame('\x01\x00\x01\x00\x00\xce'),
(0, None))
def decode_frame_decoding_no_end_byte_test(self):
self.assertEqual(frame.decode_frame(self.BASIC_ACK[:-1]), (0, None))
def decode_frame_decoding_wrong_end_byte_test(self):
self.assertRaises(exceptions.InvalidFrameError,
frame.decode_frame,
self.BASIC_ACK[:-1] + 'A')
def decode_body_frame_instance_test(self):
self.assertIsInstance(frame.decode_frame(self.BODY_FRAME)[1],
frame.Body)
def decode_body_frame_fragment_test(self):
self.assertEqual(frame.decode_frame(self.BODY_FRAME)[1].fragment,
self.BODY_FRAME_VALUE)
def decode_body_frame_fragment_consumed_bytes_test(self):
self.assertEqual(frame.decode_frame(self.BODY_FRAME)[0], 28)
def decode_heartbeat_frame_test(self):
self.assertIsInstance(frame.decode_frame(self.HEARTBEAT)[1],
frame.Heartbeat)
def decode_heartbeat_frame_bytes_consumed_test(self):
self.assertEqual(frame.decode_frame(self.HEARTBEAT)[0], 8)
def decode_frame_invalid_frame_type_test(self):
self.assertRaises(exceptions.InvalidFrameError,
frame.decode_frame,
'\x09\x00\x00\x00\x00\x00\x00\xce')
| [
"pika.frame.ProtocolHeader",
"pika.spec.Basic.Ack",
"pika.spec.BasicProperties",
"pika.frame.Body",
"pika.frame.Frame",
"pika.frame.decode_frame",
"pika.frame.Heartbeat"
] | [((756, 778), 'pika.frame.Frame', 'frame.Frame', (['(655371)', '(1)'], {}), '(655371, 1)\n', (767, 778), False, 'from pika import frame\n'), ((1271, 1308), 'pika.frame.Body', 'frame.Body', (['(1)', '"""I like it that sound"""'], {}), "(1, 'I like it that sound')\n", (1281, 1308), False, 'from pika import frame\n'), ((1426, 1443), 'pika.frame.Heartbeat', 'frame.Heartbeat', ([], {}), '()\n', (1441, 1443), False, 'from pika import frame\n'), ((1577, 1599), 'pika.frame.ProtocolHeader', 'frame.ProtocolHeader', ([], {}), '()\n', (1597, 1599), False, 'from pika import frame\n'), ((931, 950), 'pika.spec.Basic.Ack', 'spec.Basic.Ack', (['(100)'], {}), '(100)\n', (945, 950), False, 'from pika import spec\n'), ((1119, 1156), 'pika.spec.BasicProperties', 'spec.BasicProperties', ([], {'delivery_mode': '(2)'}), '(delivery_mode=2)\n', (1139, 1156), False, 'from pika import spec\n'), ((2217, 2244), 'pika.frame.decode_frame', 'frame.decode_frame', (['"""AMQPa"""'], {}), "('AMQPa')\n", (2235, 2244), False, 'from pika import frame\n'), ((2904, 2943), 'pika.frame.decode_frame', 'frame.decode_frame', (['self.CONTENT_HEADER'], {}), '(self.CONTENT_HEADER)\n', (2922, 2943), False, 'from pika import frame\n'), ((3099, 3142), 'pika.frame.decode_frame', 'frame.decode_frame', (["'\\x01\\x00\\x01\\x00\\x00Î'"], {}), "('\\x01\\x00\\x01\\x00\\x00Î')\n", (3117, 3142), False, 'from pika import frame\n'), ((3263, 3302), 'pika.frame.decode_frame', 'frame.decode_frame', (['self.BASIC_ACK[:-1]'], {}), '(self.BASIC_ACK[:-1])\n', (3281, 3302), False, 'from pika import frame\n'), ((1757, 1797), 'pika.frame.decode_frame', 'frame.decode_frame', (['self.PROTOCOL_HEADER'], {}), '(self.PROTOCOL_HEADER)\n', (1775, 1797), False, 'from pika import frame\n'), ((1929, 1969), 'pika.frame.decode_frame', 'frame.decode_frame', (['self.PROTOCOL_HEADER'], {}), '(self.PROTOCOL_HEADER)\n', (1947, 1969), False, 'from pika import frame\n'), ((2057, 2091), 'pika.frame.decode_frame', 'frame.decode_frame', (['self.BASIC_ACK'], {}), '(self.BASIC_ACK)\n', (2075, 2091), False, 'from pika import frame\n'), ((2329, 2363), 'pika.frame.decode_frame', 'frame.decode_frame', (['self.BASIC_ACK'], {}), '(self.BASIC_ACK)\n', (2347, 2363), False, 'from pika import frame\n'), ((2622, 2661), 'pika.frame.decode_frame', 'frame.decode_frame', (['self.CONTENT_HEADER'], {}), '(self.CONTENT_HEADER)\n', (2640, 2661), False, 'from pika import frame\n'), ((2782, 2821), 'pika.frame.decode_frame', 'frame.decode_frame', (['self.CONTENT_HEADER'], {}), '(self.CONTENT_HEADER)\n', (2800, 2821), False, 'from pika import frame\n'), ((3606, 3641), 'pika.frame.decode_frame', 'frame.decode_frame', (['self.BODY_FRAME'], {}), '(self.BODY_FRAME)\n', (3624, 3641), False, 'from pika import frame\n'), ((3946, 3981), 'pika.frame.decode_frame', 'frame.decode_frame', (['self.BODY_FRAME'], {}), '(self.BODY_FRAME)\n', (3964, 3981), False, 'from pika import frame\n'), ((4064, 4098), 'pika.frame.decode_frame', 'frame.decode_frame', (['self.HEARTBEAT'], {}), '(self.HEARTBEAT)\n', (4082, 4098), False, 'from pika import frame\n'), ((4234, 4268), 'pika.frame.decode_frame', 'frame.decode_frame', (['self.HEARTBEAT'], {}), '(self.HEARTBEAT)\n', (4252, 4268), False, 'from pika import frame\n'), ((2450, 2484), 'pika.frame.decode_frame', 'frame.decode_frame', (['self.BASIC_ACK'], {}), '(self.BASIC_ACK)\n', (2468, 2484), False, 'from pika import frame\n'), ((3761, 3796), 'pika.frame.decode_frame', 'frame.decode_frame', (['self.BODY_FRAME'], {}), '(self.BODY_FRAME)\n', (3779, 3796), False, 'from pika import frame\n')] |
import logging
from datetime import datetime, timedelta
import requests
from core.utils.customClasses import UserFilter
from core.utils.default_responses import (api_accepted_202,
api_bad_request_400,
api_block_by_policy_451,
api_created_201,
api_payment_required_402)
from core.utils.func import REF_PERCANTAGE, create_ref_link
from django.contrib.auth import authenticate
from django.shortcuts import get_object_or_404
from rest_framework import generics, permissions
from rest_framework.authtoken.models import Token
from rest_framework.generics import GenericAPIView
from rest_framework.mixins import UpdateModelMixin
from rest_framework.response import Response
from rest_framework.views import APIView
from apps.blog.models import PostAction, PostBought
from apps.blog.serializers import PostGetShortSerializers
from .models import *
from .serializers import *
class UserActivationView(APIView):
def get(self, request, uid, token):
protocol = 'https://' if request.is_secure() else 'http://'
web_url = protocol + request.get_host()
post_url = web_url + "/auth/users/activate/"
post_data = {'uid': uid, 'token': token}
result = requests.post(post_url, data=post_data)
content = result.text
return Response(content)
class UserMeRetrieveAPI(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserMeSerializer
def get_object(self):
return self.request.user
class UserRetrieveAPI(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserOwnProfileGetSerializer
def get_object(self):
return self.request.user
class UserSearchRetrieveAPI(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserShortRetrieveSeriliazer
filterset_class = UserFilter
class UserProfileRetrieveAPI(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserGetSerializer
def retrieve(self, request, username):
user = User.objects.get(username=username)
req_user = request.user
data_compare = request.GET.get('datetime', 0)
limit = request.GET.get('limit', 50)
offset = request.GET.get('offset', 0)
results = []
sub_check = True if Subscription.objects.filter(
target=user, source=req_user, end_date__gte=datetime.now()).exists() else False
sub_dict = {
'subscribed': sub_check
}
if data_compare == 0:
for post in user.user_post.filter(archived=False).order_by('-publication_date'):
post_data = PostGetShortSerializers(
instance=post, context={'request': request}).data
res_dict = {}
res_dict['post'] = post_data
if post.access_level == 1:
res_dict['post']['payed'] = (
True if PostBought.objects.filter(
post=post, user=user).exists() else False
)
else:
res_dict['post']['payed'] = sub_check
post_action_queryset = PostAction.objects.filter(
post=post, user=request.user)
if post_action_queryset.exists():
for action in post_action_queryset:
if action.like:
res_dict['post']['liked'] = True
res_dict['post']['like_id'] = action.pk
break
else:
res_dict['post']['liked'] = False
res_dict['post']['like_id'] = None
else:
res_dict['post']['liked'] = False
res_dict['post']['like_id'] = None
if request.user in post.favourites.all():
res_dict['post']['favourite'] = True
else:
res_dict['post']['favourite'] = False
results.append(res_dict)
return api_accepted_202({
**self.serializer_class(instance=user, context={'request': request}).data,
**{'posts': results[offset:limit+offset]},
**sub_dict
})
def get_serializer_context(self):
return {'request': self.request}
class UserCardListAPI(generics.ListAPIView):
serializer_class = CardGetSerializer
def get_queryset(self):
user = self.request.user
return Card.objects.filter(
user=user
)
class UserBlockedListAPI(generics.ListAPIView):
serializer_class = UserShortRetrieveSeriliazer
def get_queryset(self):
user = self.request.user
return user.blocked_users.all()
class UserSettingsRetrieveAPI(generics.RetrieveAPIView):
serializer_class = SettingsSerializer
queryset = User.objects.all()
def get_object(self):
return self.request.user
class UserLoginAPI(generics.GenericAPIView):
permission_classes = permissions.AllowAny,
serializer_class = UserCreationSerializer
def post(self, request):
email = request.data['email']
password = request.data['password']
user = authenticate(username=email, password=password)
if user is not None:
token, _ = Token.objects.get_or_create(user=user)
return api_created_201(
{
"auth_token": str(token)
}
)
else:
return api_bad_request_400(
{
"non_field_errors": [
"Невозможно войти с предоставленными учетными данными."
]
}
)
class UserCreateAPI(generics.GenericAPIView):
permission_classes = permissions.AllowAny,
serializer_class = UserCreationSerializer
def post(self, request):
try:
if request.data.get('referrer'):
ref_user = User.objects.get(pk=request.data['referrer'])
ref_user.repheral_users.add()
else:
ref_user = None
username = request.data['username']
user, created = User.objects.get_or_create(
email=request.data['email'],
username=username,
ref_link=create_ref_link(username),
referrer=ref_user
)
assert created, "Already exists"
user.set_password(request.data['password'])
ref_user.repheral_users.add(user)
user.save()
token, created = Token.objects.get_or_create(user=user)
return api_created_201(
{
"auth_token": str(token)
}
)
except Exception as e:
logging.error(e)
return api_block_by_policy_451({"info": "already exists"})
class UserAPI(generics.DestroyAPIView):
queryset = User.objects.all()
serializer_class = UserCreationSerializer
def get_object(self):
return self.request.user
class UserPartialUpdateAPI(GenericAPIView, UpdateModelMixin):
queryset = User.objects.all()
serializer_class = UserPartialSerializer
def get_object(self):
return self.request.user
def put(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class CreateSubscriptioAPI(generics.CreateAPIView):
queryset = User.objects.all()
serializer_class = SubscriptionCreateSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
try:
serializer.is_valid(raise_exception=True)
except AssertionError:
return api_block_by_policy_451({"status": "not enought credits"})
self.perform_create(serializer)
return Response(serializer.data)
def get_serializer_context(self):
return {'request': self.request}
class UserSubscription(GenericAPIView):
queryset = User.objects.all()
serializer_class = SubscriptionCreateSerializer
def post(self, request, pk):
user = request.user
subscribe_target = get_object_or_404(User, pk=pk)
if user.credit_amount > subscribe_target.subscribtion_price:
user.my_subscribes.add(subscribe_target)
subscribe_target.fans_amount += 1
subscribe_target.earned_credits_amount += subscribe_target.subscribtion_price
subscribe_target.save()
referrer = subscribe_target.referrer
if referrer:
referrer.earned_credits_amount += subscribe_target.subscribtion_price * REF_PERCANTAGE
referrer.save()
user.save()
subscription_datetime = datetime.now()
Subscription.objects.create(
source=user,
target=subscribe_target,
start_date=subscription_datetime.timestamp(),
end_date=subscription_datetime + timedelta(
days=subscribe_target.subscribtion_duration
).timestamp()
).save()
return api_accepted_202(
{
"subscriber": user.pk,
"subscribed": subscribe_target.pk
}
)
return api_payment_required_402(
{
"need_to_pay": subscribe_target.subscribtion_price - user.credit_amount
}
)
class CardRetrieveAPI(generics.RetrieveAPIView):
queryset = Card.objects.all()
serializer_class = CardGetSerializer
class CardCreateAPI(generics.CreateAPIView):
queryset = Card.objects.all()
serializer_class = CardCreationSerializer
def get_serializer_context(self):
return {'request': self.request}
class CardAPI(generics.RetrieveUpdateDestroyAPIView):
queryset = Card.objects.all()
serializer_class = CardCreationSerializer
class CardPartialUpdateAPI(GenericAPIView, UpdateModelMixin):
queryset = Card.objects.all()
serializer_class = CardCreationSerializer
def put(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class DonationRetrieveAPI(generics.RetrieveAPIView):
queryset = Donation.objects.all()
serializer_class = DonationGetSerializer
class AddBlockedUserAPI(generics.GenericAPIView):
permission_classes = (permissions.IsAuthenticated, )
queryset = User.objects.all()
serializer_class = UserBlockSerializer
def get_object(self):
return self.request.user
def put(self, request, *args, **kwargs):
user = User.objects.get(username=request.data['username'])
if request.data['block']:
self.request.user.blocked_users.add(user)
else:
self.request.user.blocked_users.remove(user)
self.request.user.save()
data = {
'user': user.pk,
'block': request.data['block']
}
return Response(data)
class DonationCreateAPI(generics.CreateAPIView):
queryset = Donation.objects.all()
serializer_class = DonationCreationSerializer
def get_serializer_context(self):
return {'request': self.request}
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
try:
serializer.is_valid(raise_exception=True)
except ValueError:
return api_block_by_policy_451({"status": "not enought credits"})
self.perform_create(serializer)
return Response(serializer.data)
def get_serializer_context(self):
return {'request': self.request}
class PaymentRetrieveAPI(generics.RetrieveAPIView):
queryset = Payment.objects.all()
serializer_class = PaymentGetSerializer
class PaymentCreateAPI(generics.CreateAPIView):
queryset = Payment.objects.all()
serializer_class = PaymentCreationSerializer
def get_serializer_context(self):
return {'request': self.request}
class PendingUserCreateAPI(generics.CreateAPIView):
queryset = PendingUser.objects.all()
serializer_class = PendingUserCreationSerializer
def get_object(self):
return self.request.user
class UserOnlineRetrieveAPI(generics.RetrieveAPIView):
queryset = UserOnline.objects.all()
serializer_class = UserOnlineGetSerializer
class UserOnlineCreateAPI(generics.GenericAPIView):
queryset = UserOnline.objects.all()
serializer_class = UserOnlineCreationSerializer
def get_serializer_context(self):
return {'request': self.request}
def post(self, request):
serializer = self.get_serializer(data=request.data)
try:
serializer.is_valid(raise_exception=True)
except AssertionError:
return api_bad_request_400({"status": "bad request"})
UserOnline.objects.create_or_update(
user=request.user
)
return Response(serializer.data)
class DonationPayedUserRetrieveAPI(generics.ListAPIView):
queryset = Donation.objects.all()
serializer_class = DonationGetSerializer
def get_queryset(self):
reciever = self.request.user
return Donation.objects.filter(
reciever=reciever
).order_by('-datetime')
class DonationPayedUserToRetrieveAPI(generics.ListAPIView):
queryset = Donation.objects.all()
serializer_class = DonationGetSerializer
def get_queryset(self):
sender = self.request.user
return Donation.objects.filter(
sender=sender
).order_by('-datetime')
class PaymentUserHistoryRetrieveAPI(generics.ListAPIView):
queryset = Payment.objects.all()
serializer_class = PaymentGetSerializer
def get_queryset(self):
sender = self.request.user
return Payment.objects.filter(
card__user=sender
).order_by('-datetime')
class PayStatsHistoryRetrieveAPI(APIView):
def get(self, request, *args,):
current_month = datetime.now().month
user = request.user
donations = Donation.objects.filter(
receiver=user,
datetime__datetime__date__month=current_month,
).order_by('-datetime')
donation_amount = sum((donation.amount for donation in donations))
subscriptions = Subscription.objects.filter(
target=user,
start_date__date__month=current_month,
).order_by('-start_date')
subscription_amount = sum((
user.subscribtion_duration
for _ in range(len(subscriptions))
))
result_sum = subscription_amount + donation_amount
result = {
'result_sum': result_sum,
'donations': DonationGetSerializer(
instance=donations,
many=True
).data,
'subscriptions': SubscriptionGetSerializer(
instance=subscriptions,
many=True
).data,
}
return Response(result)
| [
"django.contrib.auth.authenticate",
"core.utils.default_responses.api_bad_request_400",
"requests.post",
"apps.blog.serializers.PostGetShortSerializers",
"core.utils.func.create_ref_link",
"core.utils.default_responses.api_block_by_policy_451",
"core.utils.default_responses.api_payment_required_402",
... | [((1350, 1389), 'requests.post', 'requests.post', (['post_url'], {'data': 'post_data'}), '(post_url, data=post_data)\n', (1363, 1389), False, 'import requests\n'), ((1435, 1452), 'rest_framework.response.Response', 'Response', (['content'], {}), '(content)\n', (1443, 1452), False, 'from rest_framework.response import Response\n'), ((5407, 5454), 'django.contrib.auth.authenticate', 'authenticate', ([], {'username': 'email', 'password': 'password'}), '(username=email, password=password)\n', (5419, 5454), False, 'from django.contrib.auth import authenticate\n'), ((8081, 8106), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (8089, 8106), False, 'from rest_framework.response import Response\n'), ((8404, 8434), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'pk': 'pk'}), '(User, pk=pk)\n', (8421, 8434), False, 'from django.shortcuts import get_object_or_404\n'), ((9560, 9664), 'core.utils.default_responses.api_payment_required_402', 'api_payment_required_402', (["{'need_to_pay': subscribe_target.subscribtion_price - user.credit_amount}"], {}), "({'need_to_pay': subscribe_target.\n subscribtion_price - user.credit_amount})\n", (9584, 9664), False, 'from core.utils.default_responses import api_accepted_202, api_bad_request_400, api_block_by_policy_451, api_created_201, api_payment_required_402\n'), ((11236, 11250), 'rest_framework.response.Response', 'Response', (['data'], {}), '(data)\n', (11244, 11250), False, 'from rest_framework.response import Response\n'), ((11806, 11831), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (11814, 11831), False, 'from rest_framework.response import Response\n'), ((13195, 13220), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (13203, 13220), False, 'from rest_framework.response import Response\n'), ((15249, 15265), 'rest_framework.response.Response', 'Response', (['result'], {}), '(result)\n', (15257, 15265), False, 'from rest_framework.response import Response\n'), ((5507, 5545), 'rest_framework.authtoken.models.Token.objects.get_or_create', 'Token.objects.get_or_create', ([], {'user': 'user'}), '(user=user)\n', (5534, 5545), False, 'from rest_framework.authtoken.models import Token\n'), ((5710, 5815), 'core.utils.default_responses.api_bad_request_400', 'api_bad_request_400', (["{'non_field_errors': ['Невозможно войти с предоставленными учетными данными.']}"], {}), "({'non_field_errors': [\n 'Невозможно войти с предоставленными учетными данными.']})\n", (5729, 5815), False, 'from core.utils.default_responses import api_accepted_202, api_bad_request_400, api_block_by_policy_451, api_created_201, api_payment_required_402\n'), ((6808, 6846), 'rest_framework.authtoken.models.Token.objects.get_or_create', 'Token.objects.get_or_create', ([], {'user': 'user'}), '(user=user)\n', (6835, 6846), False, 'from rest_framework.authtoken.models import Token\n'), ((8998, 9012), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9010, 9012), False, 'from datetime import datetime, timedelta\n'), ((9380, 9456), 'core.utils.default_responses.api_accepted_202', 'api_accepted_202', (["{'subscriber': user.pk, 'subscribed': subscribe_target.pk}"], {}), "({'subscriber': user.pk, 'subscribed': subscribe_target.pk})\n", (9396, 9456), False, 'from core.utils.default_responses import api_accepted_202, api_bad_request_400, api_block_by_policy_451, api_created_201, api_payment_required_402\n'), ((14252, 14266), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14264, 14266), False, 'from datetime import datetime, timedelta\n'), ((3338, 3393), 'apps.blog.models.PostAction.objects.filter', 'PostAction.objects.filter', ([], {'post': 'post', 'user': 'request.user'}), '(post=post, user=request.user)\n', (3363, 3393), False, 'from apps.blog.models import PostAction, PostBought\n'), ((7021, 7037), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (7034, 7037), False, 'import logging\n'), ((7057, 7108), 'core.utils.default_responses.api_block_by_policy_451', 'api_block_by_policy_451', (["{'info': 'already exists'}"], {}), "({'info': 'already exists'})\n", (7080, 7108), False, 'from core.utils.default_responses import api_accepted_202, api_bad_request_400, api_block_by_policy_451, api_created_201, api_payment_required_402\n'), ((7967, 8025), 'core.utils.default_responses.api_block_by_policy_451', 'api_block_by_policy_451', (["{'status': 'not enought credits'}"], {}), "({'status': 'not enought credits'})\n", (7990, 8025), False, 'from core.utils.default_responses import api_accepted_202, api_bad_request_400, api_block_by_policy_451, api_created_201, api_payment_required_402\n'), ((11692, 11750), 'core.utils.default_responses.api_block_by_policy_451', 'api_block_by_policy_451', (["{'status': 'not enought credits'}"], {}), "({'status': 'not enought credits'})\n", (11715, 11750), False, 'from core.utils.default_responses import api_accepted_202, api_bad_request_400, api_block_by_policy_451, api_created_201, api_payment_required_402\n'), ((13048, 13094), 'core.utils.default_responses.api_bad_request_400', 'api_bad_request_400', (["{'status': 'bad request'}"], {}), "({'status': 'bad request'})\n", (13067, 13094), False, 'from core.utils.default_responses import api_accepted_202, api_bad_request_400, api_block_by_policy_451, api_created_201, api_payment_required_402\n'), ((2804, 2872), 'apps.blog.serializers.PostGetShortSerializers', 'PostGetShortSerializers', ([], {'instance': 'post', 'context': "{'request': request}"}), "(instance=post, context={'request': request})\n", (2827, 2872), False, 'from apps.blog.serializers import PostGetShortSerializers\n'), ((6532, 6557), 'core.utils.func.create_ref_link', 'create_ref_link', (['username'], {}), '(username)\n', (6547, 6557), False, 'from core.utils.func import REF_PERCANTAGE, create_ref_link\n'), ((2550, 2564), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2562, 2564), False, 'from datetime import datetime, timedelta\n'), ((3099, 3146), 'apps.blog.models.PostBought.objects.filter', 'PostBought.objects.filter', ([], {'post': 'post', 'user': 'user'}), '(post=post, user=user)\n', (3124, 3146), False, 'from apps.blog.models import PostAction, PostBought\n'), ((9235, 9289), 'datetime.timedelta', 'timedelta', ([], {'days': 'subscribe_target.subscribtion_duration'}), '(days=subscribe_target.subscribtion_duration)\n', (9244, 9289), False, 'from datetime import datetime, timedelta\n')] |
"""
The system RoBERTa trains on the AGB dataset with softmax loss function.
At every 1000 training steps, the model is evaluated on the AGB dev set.
"""
from torch.utils.data import DataLoader
from sentence_transformers import models, losses
from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, LabelGenerationEvaluator
from sentence_transformers.readers import *
import logging
import torch
import os
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
# # Read the dataset
root_dir = "/data/salmasian/sentence_transformers"
for i in range(1, 6):
run_dir = os.path.join(root_dir, f"run{i}")
for model_dir in sorted(os.listdir(run_dir)):
curr_dir = os.path.join(run_dir, model_dir)
# skip non-consecutive models
if f"og_{i}" not in curr_dir:
continue
print(f"Working on model {model_dir}")
# Delete when we re-evaluate...
labels_file = os.path.join(curr_dir, "prediction_labels.csv")
pred_file = os.path.join(curr_dir, "prediction_results.csv")
if os.path.isfile(labels_file):
os.remove(os.path.join(curr_dir, "prediction_labels.csv"))
if os.path.isfile(pred_file):
os.remove(os.path.join(curr_dir, "prediction_results.csv"))
# Model path
model_save_path = curr_dir
batch_size = 24
agb_reader = TestAGBReader('datasets/og-test')
train_num_labels = agb_reader.get_num_labels()
model = SentenceTransformer(model_save_path, device="cuda:0")
train_loss = losses.SoftmaxLoss(model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=train_num_labels)
train_loss.classifier = torch.load(os.path.join(model_save_path, "2_Softmax/pytorch_model.bin"))
print("test")
test_dir = "/data/daumiller/sentence-transformers/examples/datasets/og-test"
for fn in sorted(os.listdir(test_dir)):
examples = agb_reader.get_examples(fn)
if not examples:
continue
# Hack to avoid problems with docs almost as long as batch size
if len(examples) == batch_size + 1:
batch_size_used = batch_size - 3
else:
batch_size_used = batch_size
test_data = SentencesDataset(examples=examples, model=model, shorten=True)
test_dataloader = DataLoader(test_data, shuffle=False, batch_size=batch_size_used)
evaluator = LabelGenerationEvaluator(test_dataloader, softmax_model=train_loss)
model.evaluate(evaluator, model_save_path)
| [
"os.listdir",
"sentence_transformers.SentenceTransformer",
"sentence_transformers.SentencesDataset",
"os.path.join",
"sentence_transformers.LoggingHandler",
"os.path.isfile",
"torch.utils.data.DataLoader",
"sentence_transformers.evaluation.LabelGenerationEvaluator"
] | [((914, 947), 'os.path.join', 'os.path.join', (['root_dir', 'f"""run{i}"""'], {}), "(root_dir, f'run{i}')\n", (926, 947), False, 'import os\n'), ((976, 995), 'os.listdir', 'os.listdir', (['run_dir'], {}), '(run_dir)\n', (986, 995), False, 'import os\n'), ((1017, 1049), 'os.path.join', 'os.path.join', (['run_dir', 'model_dir'], {}), '(run_dir, model_dir)\n', (1029, 1049), False, 'import os\n'), ((1256, 1303), 'os.path.join', 'os.path.join', (['curr_dir', '"""prediction_labels.csv"""'], {}), "(curr_dir, 'prediction_labels.csv')\n", (1268, 1303), False, 'import os\n'), ((1324, 1372), 'os.path.join', 'os.path.join', (['curr_dir', '"""prediction_results.csv"""'], {}), "(curr_dir, 'prediction_results.csv')\n", (1336, 1372), False, 'import os\n'), ((1385, 1412), 'os.path.isfile', 'os.path.isfile', (['labels_file'], {}), '(labels_file)\n', (1399, 1412), False, 'import os\n'), ((1496, 1521), 'os.path.isfile', 'os.path.isfile', (['pred_file'], {}), '(pred_file)\n', (1510, 1521), False, 'import os\n'), ((1803, 1856), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_save_path'], {'device': '"""cuda:0"""'}), "(model_save_path, device='cuda:0')\n", (1822, 1856), False, 'from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer\n'), ((746, 762), 'sentence_transformers.LoggingHandler', 'LoggingHandler', ([], {}), '()\n', (760, 762), False, 'from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer\n'), ((2134, 2194), 'os.path.join', 'os.path.join', (['model_save_path', '"""2_Softmax/pytorch_model.bin"""'], {}), "(model_save_path, '2_Softmax/pytorch_model.bin')\n", (2146, 2194), False, 'import os\n'), ((2329, 2349), 'os.listdir', 'os.listdir', (['test_dir'], {}), '(test_dir)\n', (2339, 2349), False, 'import os\n'), ((2717, 2779), 'sentence_transformers.SentencesDataset', 'SentencesDataset', ([], {'examples': 'examples', 'model': 'model', 'shorten': '(True)'}), '(examples=examples, model=model, shorten=True)\n', (2733, 2779), False, 'from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer\n'), ((2810, 2874), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'shuffle': '(False)', 'batch_size': 'batch_size_used'}), '(test_data, shuffle=False, batch_size=batch_size_used)\n', (2820, 2874), False, 'from torch.utils.data import DataLoader\n'), ((2899, 2966), 'sentence_transformers.evaluation.LabelGenerationEvaluator', 'LabelGenerationEvaluator', (['test_dataloader'], {'softmax_model': 'train_loss'}), '(test_dataloader, softmax_model=train_loss)\n', (2923, 2966), False, 'from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, LabelGenerationEvaluator\n'), ((1436, 1483), 'os.path.join', 'os.path.join', (['curr_dir', '"""prediction_labels.csv"""'], {}), "(curr_dir, 'prediction_labels.csv')\n", (1448, 1483), False, 'import os\n'), ((1545, 1593), 'os.path.join', 'os.path.join', (['curr_dir', '"""prediction_results.csv"""'], {}), "(curr_dir, 'prediction_results.csv')\n", (1557, 1593), False, 'import os\n')] |
import ipywidgets as widgets
from traitlets import Unicode, Int, validate
import os
import json
from datetime import datetime,timedelta
from IPython.display import Javascript
from IPython.display import HTML
from cognipy.ontology import Ontology
from IPython.display import clear_output
_JS_initialized = False
def _InitJS():
global _JS_initialized
if _JS_initialized:
return
with open(os.path.dirname(os.path.abspath(__file__))+"/edit.js", 'r') as file:
_JS_initialized = True
display( Javascript(file.read()) )
display( HTML("Welcome to CogniPy") )
class OntoeditWidget(widgets.DOMWidget):
_view_name = Unicode('OntoeditView').tag(sync=True)
_model_name = Unicode('OntoeditModel').tag(sync=True)
_view_module = Unicode('ontoedit').tag(sync=True)
_model_module = Unicode('ontoedit').tag(sync=True)
value = Unicode('').tag(sync=True)
cursor = Int(0).tag(sync=True)
dot = Int(0).tag(sync=True)
hints = Unicode('').tag(sync=True)
hintsX = Int(0).tag(sync=True)
hintT = Unicode('').tag(sync=True)
def escape(html):
"""Returns the given HTML with ampersands, quotes and carets encoded."""
return html.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''')
from functools import reduce
def getcommonletters(strlist):
return ''.join([x[0] for x in zip(*strlist) \
if reduce(lambda a,b:(a == b) and a or None,x)])
def findcommonstart(strlist):
strlist = strlist[:]
prev = None
while True:
common = getcommonletters(strlist)
if common == prev:
break
strlist.append(common)
prev = common
return getcommonletters(strlist)
def CnlEditBox(snap_filename,ontol = None, height='300px'):
_InitJS()
e=widgets.Output()
onto = ontol
def reload_onto():
nonlocal onto,ontol
if ontol is None:
if not os.path.exists(snap_filename):
onto = Ontology("cnl/string","Every thing is a thing.")
else:
onto = Ontology("cnl/file",snap_filename,stop_on_error=False)
with e:
clear_output()
if onto.get_load_error() is not None:
print(str(onto.get_load_error()))
reload_onto()
if not os.path.exists(snap_filename):
open(snap_filename, 'a').close()
def autoCompl(s):
pos=s.rfind('.', 0, len(s))
pos=0 if pos<0 else pos+1
inn=s[pos:len(s)].lstrip(' \n\t')
ac= onto.autocomplete(inn)
return ac
reloading = False
def onChange(change):
# print(change)
nonlocal reloading
if change.name=="value":
if reloading:
reloading = False
while True:
try:
with open(snap_filename, 'w') as file:
file.write(change.new)
break
except:
continue
reload_onto()
elif change.name=="cursor":
s = change.owner.value[0:change.new]
acl=[]
if onto is None:
return
#acl=['!!!SYNTAX ERROR!!!\r\n'+syntax_error]
else:
acl=autoCompl(s)
acl.sort()
options=[escape(x) for x in acl]
oopts = [o for o in acl if o[0]!='<']
change.owner.hints="<br/>".join(options)
pos = max(s.rfind(i) for i in [' ','\t', '\n', '.'])
change.owner.hintsX=pos+1
change.owner.hintT=findcommonstart(oopts)
elif change.name=="dot":
reloading = True
txt = None
with open(snap_filename, 'r') as file:
txt = file.read()
w=OntoeditWidget(
value = txt,
placeholder='Type something',
disabled=False,
layout=widgets.Layout(width='90%', height= '100%'),
style={'description_width': 'initial'}
)
o=widgets.Output()
w.observe(onChange, names=['cursor','value','dot'])
xx= widgets.VBox([e,w,o], layout={'height': height})
xx.getvalue=lambda : w.value
return xx
def CnlQueryForConcept(snap_filename,onto):
_InitJS()
if not os.path.exists(snap_filename):
open(snap_filename, 'a').close()
def autoCompl(onto,s):
pos=s.rfind('.', 0, len(s))
pos=0 if pos<0 else pos+1
return onto.autocomplete("Every-single-thing that is "+s)
def onChange(change):
# print(change)
if change.name=="value":
while True:
try:
with open(snap_filename, 'w') as file:
file.write(change.new)
break
except:
continue
elif change.name=="cursor":
s = change.owner.value[0:change.new]
acl=autoCompl(onto,s)
acl.sort()
options=[escape(x) for x in acl]
oopts = [o for o in acl if o[0]!='<']
change.owner.hints="<br/>".join(options)
pos = max(s.rfind(i) for i in [' ','\t', '\n', '.'])
change.owner.hintsX=pos+1
change.owner.hintT=findcommonstart(oopts)
txt = None
with open(snap_filename, 'r') as file:
txt = file.read()
w=OntoeditWidget(
value = txt,
placeholder='Type something',
disabled=False,
layout=widgets.Layout(width='90%', height= '100%'),
style={'description_width': 'initial'}
)
w.observe(onChange, names=['cursor','value'])
o=widgets.Output()
xx= widgets.VBox([w,o], layout={'height': '100px'})
xx.getvalue=lambda : w.value
return xx
| [
"os.path.exists",
"ipywidgets.VBox",
"functools.reduce",
"ipywidgets.Output",
"IPython.display.clear_output",
"traitlets.Int",
"ipywidgets.Layout",
"os.path.abspath",
"IPython.display.HTML",
"traitlets.Unicode",
"cognipy.ontology.Ontology"
] | [((1833, 1849), 'ipywidgets.Output', 'widgets.Output', ([], {}), '()\n', (1847, 1849), True, 'import ipywidgets as widgets\n'), ((4196, 4212), 'ipywidgets.Output', 'widgets.Output', ([], {}), '()\n', (4210, 4212), True, 'import ipywidgets as widgets\n'), ((4277, 4327), 'ipywidgets.VBox', 'widgets.VBox', (['[e, w, o]'], {'layout': "{'height': height}"}), "([e, w, o], layout={'height': height})\n", (4289, 4327), True, 'import ipywidgets as widgets\n'), ((5872, 5888), 'ipywidgets.Output', 'widgets.Output', ([], {}), '()\n', (5886, 5888), True, 'import ipywidgets as widgets\n'), ((5897, 5945), 'ipywidgets.VBox', 'widgets.VBox', (['[w, o]'], {'layout': "{'height': '100px'}"}), "([w, o], layout={'height': '100px'})\n", (5909, 5945), True, 'import ipywidgets as widgets\n'), ((2404, 2433), 'os.path.exists', 'os.path.exists', (['snap_filename'], {}), '(snap_filename)\n', (2418, 2433), False, 'import os\n'), ((4444, 4473), 'os.path.exists', 'os.path.exists', (['snap_filename'], {}), '(snap_filename)\n', (4458, 4473), False, 'import os\n'), ((568, 594), 'IPython.display.HTML', 'HTML', (['"""Welcome to CogniPy"""'], {}), "('Welcome to CogniPy')\n", (572, 594), False, 'from IPython.display import HTML\n'), ((656, 679), 'traitlets.Unicode', 'Unicode', (['"""OntoeditView"""'], {}), "('OntoeditView')\n", (663, 679), False, 'from traitlets import Unicode, Int, validate\n'), ((713, 737), 'traitlets.Unicode', 'Unicode', (['"""OntoeditModel"""'], {}), "('OntoeditModel')\n", (720, 737), False, 'from traitlets import Unicode, Int, validate\n'), ((772, 791), 'traitlets.Unicode', 'Unicode', (['"""ontoedit"""'], {}), "('ontoedit')\n", (779, 791), False, 'from traitlets import Unicode, Int, validate\n'), ((827, 846), 'traitlets.Unicode', 'Unicode', (['"""ontoedit"""'], {}), "('ontoedit')\n", (834, 846), False, 'from traitlets import Unicode, Int, validate\n'), ((874, 885), 'traitlets.Unicode', 'Unicode', (['""""""'], {}), "('')\n", (881, 885), False, 'from traitlets import Unicode, Int, validate\n'), ((914, 920), 'traitlets.Int', 'Int', (['(0)'], {}), '(0)\n', (917, 920), False, 'from traitlets import Unicode, Int, validate\n'), ((946, 952), 'traitlets.Int', 'Int', (['(0)'], {}), '(0)\n', (949, 952), False, 'from traitlets import Unicode, Int, validate\n'), ((980, 991), 'traitlets.Unicode', 'Unicode', (['""""""'], {}), "('')\n", (987, 991), False, 'from traitlets import Unicode, Int, validate\n'), ((1020, 1026), 'traitlets.Int', 'Int', (['(0)'], {}), '(0)\n', (1023, 1026), False, 'from traitlets import Unicode, Int, validate\n'), ((1054, 1065), 'traitlets.Unicode', 'Unicode', (['""""""'], {}), "('')\n", (1061, 1065), False, 'from traitlets import Unicode, Int, validate\n'), ((4068, 4110), 'ipywidgets.Layout', 'widgets.Layout', ([], {'width': '"""90%"""', 'height': '"""100%"""'}), "(width='90%', height='100%')\n", (4082, 4110), True, 'import ipywidgets as widgets\n'), ((5694, 5736), 'ipywidgets.Layout', 'widgets.Layout', ([], {'width': '"""90%"""', 'height': '"""100%"""'}), "(width='90%', height='100%')\n", (5708, 5736), True, 'import ipywidgets as widgets\n'), ((1438, 1482), 'functools.reduce', 'reduce', (['(lambda a, b: a == b and a or None)', 'x'], {}), '(lambda a, b: a == b and a or None, x)\n', (1444, 1482), False, 'from functools import reduce\n'), ((1963, 1992), 'os.path.exists', 'os.path.exists', (['snap_filename'], {}), '(snap_filename)\n', (1977, 1992), False, 'import os\n'), ((2017, 2066), 'cognipy.ontology.Ontology', 'Ontology', (['"""cnl/string"""', '"""Every thing is a thing."""'], {}), "('cnl/string', 'Every thing is a thing.')\n", (2025, 2066), False, 'from cognipy.ontology import Ontology\n'), ((2107, 2163), 'cognipy.ontology.Ontology', 'Ontology', (['"""cnl/file"""', 'snap_filename'], {'stop_on_error': '(False)'}), "('cnl/file', snap_filename, stop_on_error=False)\n", (2115, 2163), False, 'from cognipy.ontology import Ontology\n'), ((424, 449), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (439, 449), False, 'import os\n'), ((2206, 2220), 'IPython.display.clear_output', 'clear_output', ([], {}), '()\n', (2218, 2220), False, 'from IPython.display import clear_output\n')] |
import subprocess
import os
from glim.core import Facade
from glim import Log
from glim import paths
DEFAULT_CONFIG = {
'source': os.path.join(paths.APP_PATH, 'assets/js'),
}
class JSLint(object):
def __init__(self, config):
self.config = DEFAULT_CONFIG
for key, value in config.items():
self.config[key] = value
Log.debug("config")
def check(self):
try:
command = 'jslint'
arguments = '%s%s' % (self.config['source'], '/*')
Log.debug("command: %s" % command)
Log.debug("arguments: %s" % arguments)
# find ./public/javascripts/ -name '*.js' -print0 | xargs -0 jslint
cmd = "find %s -name '*.js' -print0 | xargs -0 jslint" % self.config['source']
# cmd = '%s %s' % (command, arguments)
Log.debug("cmd: %s" % cmd)
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = p.communicate()
Log.info("Linting javascript..")
Log.write(out)
Log.error(err)
except Exception as e:
Log.error(e)
class JSLintFacade(Facade):
accessor = JSLint
| [
"glim.Log.write",
"subprocess.Popen",
"os.path.join",
"glim.Log.debug",
"glim.Log.error",
"glim.Log.info"
] | [((134, 175), 'os.path.join', 'os.path.join', (['paths.APP_PATH', '"""assets/js"""'], {}), "(paths.APP_PATH, 'assets/js')\n", (146, 175), False, 'import os\n'), ((328, 347), 'glim.Log.debug', 'Log.debug', (['"""config"""'], {}), "('config')\n", (337, 347), False, 'from glim import Log\n'), ((454, 488), 'glim.Log.debug', 'Log.debug', (["('command: %s' % command)"], {}), "('command: %s' % command)\n", (463, 488), False, 'from glim import Log\n'), ((492, 530), 'glim.Log.debug', 'Log.debug', (["('arguments: %s' % arguments)"], {}), "('arguments: %s' % arguments)\n", (501, 530), False, 'from glim import Log\n'), ((730, 756), 'glim.Log.debug', 'Log.debug', (["('cmd: %s' % cmd)"], {}), "('cmd: %s' % cmd)\n", (739, 756), False, 'from glim import Log\n'), ((764, 850), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'shell': '(True)'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell\n =True)\n', (780, 850), False, 'import subprocess\n'), ((906, 938), 'glim.Log.info', 'Log.info', (['"""Linting javascript.."""'], {}), "('Linting javascript..')\n", (914, 938), False, 'from glim import Log\n'), ((942, 956), 'glim.Log.write', 'Log.write', (['out'], {}), '(out)\n', (951, 956), False, 'from glim import Log\n'), ((960, 974), 'glim.Log.error', 'Log.error', (['err'], {}), '(err)\n', (969, 974), False, 'from glim import Log\n'), ((1003, 1015), 'glim.Log.error', 'Log.error', (['e'], {}), '(e)\n', (1012, 1015), False, 'from glim import Log\n')] |
import os
import pandas as pd
import numpy as np
import torch
from torchvision import transforms
from torch.utils.data import Dataset
import matplotlib.pyplot as plt
from skimage import io
import pdb
class FrameDataset(Dataset):
def __init__(self, csv_file, train_dir):
self.labels = pd.read_csv(csv_file)
self.train_dir = train_dir
self.transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((66,220)),
transforms.ToTensor(),
transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]),
])
def show_img(self, img, denormalize = True):
inv_normalize = transforms.Normalize(mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225], std=[1/0.229, 1/0.224, 1/0.225])
if denormalize:
img = inv_normalize(img)
plt.imshow(np.transpose(img.numpy(), (1,2,0)))
plt.show()
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
img_path = os.path.join(self.train_dir, self.labels.iloc[index][0])
image = io.imread(img_path)
y_label = torch.tensor(float(self.labels.iloc[index][1]))
if self.transform:
image = self.transform(image)
return (image, y_label)
| [
"torchvision.transforms.ToPILImage",
"pandas.read_csv",
"os.path.join",
"skimage.io.imread",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"matplotlib.pyplot.show"
] | [((297, 318), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (308, 318), True, 'import pandas as pd\n'), ((695, 813), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225]', 'std': '[1 / 0.229, 1 / 0.224, 1 / 0.225]'}), '(mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225],\n std=[1 / 0.229, 1 / 0.224, 1 / 0.225])\n', (715, 813), False, 'from torchvision import transforms\n'), ((923, 933), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (931, 933), True, 'import matplotlib.pyplot as plt\n'), ((1044, 1100), 'os.path.join', 'os.path.join', (['self.train_dir', 'self.labels.iloc[index][0]'], {}), '(self.train_dir, self.labels.iloc[index][0])\n', (1056, 1100), False, 'import os\n'), ((1117, 1136), 'skimage.io.imread', 'io.imread', (['img_path'], {}), '(img_path)\n', (1126, 1136), False, 'from skimage import io\n'), ((412, 435), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (433, 435), False, 'from torchvision import transforms\n'), ((449, 477), 'torchvision.transforms.Resize', 'transforms.Resize', (['(66, 220)'], {}), '((66, 220))\n', (466, 477), False, 'from torchvision import transforms\n'), ((490, 511), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (509, 511), False, 'from torchvision import transforms\n'), ((525, 600), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (545, 600), False, 'from torchvision import transforms\n')] |
# Authors: <NAME>
# License: MIT
import theano
import theano.tensor as TT
def pairwise_theano_tensor_prepare(dtype):
X = TT.matrix(dtype=str(dtype))
dists = TT.sqrt(
TT.sum(
TT.sqr(X[:, None, :] - X),
axis=2))
name = 'pairwise_theano_broadcast_' + dtype
rval = theano.function([X],
theano.Out(dists, borrow=True),
allow_input_downcast=True, name=name)
rval.__name__ = name
return rval
def pairwise_theano_blas_prepare(dtype):
X = TT.matrix(dtype=str(dtype))
X_norm_2 = (X ** 2).sum(axis=1)
dists = TT.sqrt(2 * X_norm_2 - TT.dot(X, X.T))
name = 'pairwise_theano_blas_' + dtype
rval = theano.function([X],
theano.Out(dists, borrow=True),
allow_input_downcast=True, name=name)
rval.__name__ = name
return rval
benchmarks = (
pairwise_theano_tensor_prepare('float32'),
pairwise_theano_tensor_prepare('float64'),
pairwise_theano_blas_prepare('float32'),
pairwise_theano_blas_prepare('float64'),
)
| [
"theano.tensor.sqr",
"theano.Out",
"theano.tensor.dot"
] | [((359, 389), 'theano.Out', 'theano.Out', (['dists'], {'borrow': '(True)'}), '(dists, borrow=True)\n', (369, 389), False, 'import theano\n'), ((765, 795), 'theano.Out', 'theano.Out', (['dists'], {'borrow': '(True)'}), '(dists, borrow=True)\n', (775, 795), False, 'import theano\n'), ((204, 229), 'theano.tensor.sqr', 'TT.sqr', (['(X[:, None, :] - X)'], {}), '(X[:, None, :] - X)\n', (210, 229), True, 'import theano.tensor as TT\n'), ((647, 661), 'theano.tensor.dot', 'TT.dot', (['X', 'X.T'], {}), '(X, X.T)\n', (653, 661), True, 'import theano.tensor as TT\n')] |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import pytest
from renormalizer.mps import Mps, Mpo, MpDm, ThermalProp
from renormalizer.mps.backend import np
from renormalizer.tests.parameter import holstein_model
from renormalizer.utils import Quantity
creation_operator = Mpo.onsite(
holstein_model, r"a^\dagger", dof_set={holstein_model.mol_num // 2}
)
def check_property(mp):
electron_occupation = np.zeros((holstein_model.mol_num))
electron_occupation[holstein_model.mol_num // 2] = 1
assert mp.norm == pytest.approx(1)
assert np.allclose(mp.e_occupations, electron_occupation)
assert np.allclose(mp.ph_occupations, 0)
def test_mps():
gs_mps = Mps.ground_state(holstein_model, max_entangled=False)
mps = creation_operator @ gs_mps
check_property(mps)
def test_mpo():
gs_dm = MpDm.max_entangled_gs(holstein_model)
beta = Quantity(10, "K").to_beta()
tp = ThermalProp(gs_dm, exact=True, space="GS")
tp.evolve(None, 500, beta / 1j)
gs_dm = tp.latest_mps
mp = creation_operator @ gs_dm
check_property(mp)
| [
"pytest.approx",
"renormalizer.mps.backend.np.allclose",
"renormalizer.mps.backend.np.zeros",
"renormalizer.mps.Mps.ground_state",
"renormalizer.utils.Quantity",
"renormalizer.mps.Mpo.onsite",
"renormalizer.mps.MpDm.max_entangled_gs",
"renormalizer.mps.ThermalProp"
] | [((309, 388), 'renormalizer.mps.Mpo.onsite', 'Mpo.onsite', (['holstein_model', '"""a^\\\\dagger"""'], {'dof_set': '{holstein_model.mol_num // 2}'}), "(holstein_model, 'a^\\\\dagger', dof_set={holstein_model.mol_num // 2})\n", (319, 388), False, 'from renormalizer.mps import Mps, Mpo, MpDm, ThermalProp\n'), ((447, 479), 'renormalizer.mps.backend.np.zeros', 'np.zeros', (['holstein_model.mol_num'], {}), '(holstein_model.mol_num)\n', (455, 479), False, 'from renormalizer.mps.backend import np\n'), ((589, 639), 'renormalizer.mps.backend.np.allclose', 'np.allclose', (['mp.e_occupations', 'electron_occupation'], {}), '(mp.e_occupations, electron_occupation)\n', (600, 639), False, 'from renormalizer.mps.backend import np\n'), ((651, 684), 'renormalizer.mps.backend.np.allclose', 'np.allclose', (['mp.ph_occupations', '(0)'], {}), '(mp.ph_occupations, 0)\n', (662, 684), False, 'from renormalizer.mps.backend import np\n'), ((716, 769), 'renormalizer.mps.Mps.ground_state', 'Mps.ground_state', (['holstein_model'], {'max_entangled': '(False)'}), '(holstein_model, max_entangled=False)\n', (732, 769), False, 'from renormalizer.mps import Mps, Mpo, MpDm, ThermalProp\n'), ((861, 898), 'renormalizer.mps.MpDm.max_entangled_gs', 'MpDm.max_entangled_gs', (['holstein_model'], {}), '(holstein_model)\n', (882, 898), False, 'from renormalizer.mps import Mps, Mpo, MpDm, ThermalProp\n'), ((947, 989), 'renormalizer.mps.ThermalProp', 'ThermalProp', (['gs_dm'], {'exact': '(True)', 'space': '"""GS"""'}), "(gs_dm, exact=True, space='GS')\n", (958, 989), False, 'from renormalizer.mps import Mps, Mpo, MpDm, ThermalProp\n'), ((561, 577), 'pytest.approx', 'pytest.approx', (['(1)'], {}), '(1)\n', (574, 577), False, 'import pytest\n'), ((910, 927), 'renormalizer.utils.Quantity', 'Quantity', (['(10)', '"""K"""'], {}), "(10, 'K')\n", (918, 927), False, 'from renormalizer.utils import Quantity\n')] |
# -*- coding: utf-8 -*-
import sys
import numpy as np
import torch
from torch.autograd import Variable
from pytorch2keras.converter import pytorch_to_keras
import torchvision
import os.path as osp
import os
os.environ['KERAS_BACKEND'] = 'tensorflow'
from keras import backend as K
K.clear_session()
K.set_image_dim_ordering('tf')
import test
import tensorflow as tf
import torch
from torch import nn
from torchsummary import summary
from torch.autograd import Variable
import tensorflow
from tensorflow.python.keras.backend import get_session
from tensorflow.python.keras.models import load_model
from tensorflow.python.framework import graph_util, graph_io
from keras.utils import plot_model
# K.set_image_data_format('channels_first') 0
import cv2
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def softmax(x):
exp_x = np.exp(x)
softmax_x = exp_x / np.sum(exp_x)
return softmax_x
def check_error(output, k_model, input_np, epsilon=1e-3):
pytorch_output = output[0].data.cpu().numpy()
# pytorch_output = np.max(pytorch_output)
# print('torch:',pytorch_output)
# print('=====================')
# print('torch:',pytorch_output)
keras_output = k_model.predict(input_np)
keras_output = keras_output[0]
# keras_output = np.max(keras_output)
# print('=====================')
# print('keras pre:',keras_output)
error = np.max(pytorch_output - keras_output)
print('Error:', error)
assert error < epsilon
return error
import numpy as np
def normalization0_1(data):
_range = np.max(data) - np.min(data)
data = (data - np.min(data)) / _range
mean = [0.485, 0.456, 0.406]
std_ad = [0.229, 0.224, 0.225]
return np.divide(np.subtract(data, mean), std_ad)
def h5_to_pb(h5_model, output_dir, model_name, out_prefix="output_", ):
if osp.exists(output_dir) == False:
os.mkdir(output_dir)
out_nodes = ["output_0_1"] ##get from init_graph
# out_nodes.append(out_prefix + str(0))
tf.identity(h5_model.output[0], out_prefix + str(0))
sess = get_session()
init_graph = sess.graph.as_graph_def() ##get out_nodes
main_graph = graph_util.convert_variables_to_constants(sess, init_graph, out_nodes)
graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False)
if __name__ == '__main__':
##step1: load pytorch model
# model = test.main()
model = torch.load("/home/dp/Desktop/algorithms/Pelee.Pytorch/weights/Pelee_COCO_size304_epoch40.pth")
model = model.cuda() ##cuda
summary(model, (3, 304, 304)) ##summary(model, (channels, pic_h, pic_w))
model.eval()
##step2: pytorch .pth to keras .h5 and test .h5
input_np = np.random.uniform(0, 1, (1, 3, 304, 304))
input_var = Variable(torch.FloatTensor(input_np)).cuda() ##cuda
# input_var = Variable(torch.FloatTensor(input_np))
k_model = pytorch_to_keras(model, input_var, (3, 304, 304,), verbose=True, name_policy='short')
k_model.summary()
k_model.save('my_model.h5')
output = model(input_var)
check_error(output, k_model, input_np) ## check the error between .pth and .h5
##step3: load .h5 and .h5 to .pb
tf.keras.backend.clear_session()
tf.keras.backend.set_learning_phase(0) ##不可少,
my_model = load_model('my_model.h5')
h5_to_pb(my_model, output_dir='./model/', model_name='model.pb')
##step4: load .pb and test .pb
pb_path = './model/model.pb'
with tf.Session() as sess:
tf.global_variables_initializer().run()
graph_def = tf.GraphDef()
with tf.gfile.GFile(pb_path, 'rb') as f:
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name="")
pic_file = './datasets/data'
pic_list = os.listdir(pic_file)
for name in pic_list:
img_path = '{}/{}'.format(pic_file, name)
im = cv2.imread(img_path)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
img = cv2.resize(im, (304, 304))
img = np.asarray(img, dtype=np.float32)
img = normalization0_1(img)
img_data = np.transpose(img, (2, 0, 1))
img_input = np.asarray(img_data, dtype=np.float32)[np.newaxis, :, :, :]
input = sess.graph.get_tensor_by_name("input_0:0")
output = sess.graph.get_tensor_by_name("output_0_1:0")
pre_label = sess.run([output], feed_dict={input: img_input})
pre_label = pre_label[0][0]
# print(pre_label)
pre_label = np.argmax(softmax(pre_label))
print('------------------------')
print('{} prelabel is {}'.format(pic_name, pre_label))
| [
"tensorflow.gfile.GFile",
"os.path.exists",
"os.listdir",
"tensorflow.Session",
"numpy.asarray",
"numpy.subtract",
"numpy.max",
"numpy.exp",
"tensorflow.python.keras.backend.get_session",
"tensorflow.GraphDef",
"keras.backend.clear_session",
"tensorflow.python.keras.models.load_model",
"nump... | [((284, 301), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (299, 301), True, 'from keras import backend as K\n'), ((302, 332), 'keras.backend.set_image_dim_ordering', 'K.set_image_dim_ordering', (['"""tf"""'], {}), "('tf')\n", (326, 332), True, 'from keras import backend as K\n'), ((828, 837), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (834, 837), True, 'import numpy as np\n'), ((1375, 1412), 'numpy.max', 'np.max', (['(pytorch_output - keras_output)'], {}), '(pytorch_output - keras_output)\n', (1381, 1412), True, 'import numpy as np\n'), ((2050, 2063), 'tensorflow.python.keras.backend.get_session', 'get_session', ([], {}), '()\n', (2061, 2063), False, 'from tensorflow.python.keras.backend import get_session\n'), ((2141, 2211), 'tensorflow.python.framework.graph_util.convert_variables_to_constants', 'graph_util.convert_variables_to_constants', (['sess', 'init_graph', 'out_nodes'], {}), '(sess, init_graph, out_nodes)\n', (2182, 2211), False, 'from tensorflow.python.framework import graph_util, graph_io\n'), ((2216, 2292), 'tensorflow.python.framework.graph_io.write_graph', 'graph_io.write_graph', (['main_graph', 'output_dir'], {'name': 'model_name', 'as_text': '(False)'}), '(main_graph, output_dir, name=model_name, as_text=False)\n', (2236, 2292), False, 'from tensorflow.python.framework import graph_util, graph_io\n'), ((2392, 2496), 'torch.load', 'torch.load', (['"""/home/dp/Desktop/algorithms/Pelee.Pytorch/weights/Pelee_COCO_size304_epoch40.pth"""'], {}), "(\n '/home/dp/Desktop/algorithms/Pelee.Pytorch/weights/Pelee_COCO_size304_epoch40.pth'\n )\n", (2402, 2496), False, 'import torch\n'), ((2524, 2553), 'torchsummary.summary', 'summary', (['model', '(3, 304, 304)'], {}), '(model, (3, 304, 304))\n', (2531, 2553), False, 'from torchsummary import summary\n'), ((2684, 2725), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1, 3, 304, 304)'], {}), '(0, 1, (1, 3, 304, 304))\n', (2701, 2725), True, 'import numpy as np\n'), ((2865, 2954), 'pytorch2keras.converter.pytorch_to_keras', 'pytorch_to_keras', (['model', 'input_var', '(3, 304, 304)'], {'verbose': '(True)', 'name_policy': '"""short"""'}), "(model, input_var, (3, 304, 304), verbose=True, name_policy\n ='short')\n", (2881, 2954), False, 'from pytorch2keras.converter import pytorch_to_keras\n'), ((3162, 3194), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (3192, 3194), True, 'import tensorflow as tf\n'), ((3199, 3237), 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(0)'], {}), '(0)\n', (3234, 3237), True, 'import tensorflow as tf\n'), ((3261, 3286), 'tensorflow.python.keras.models.load_model', 'load_model', (['"""my_model.h5"""'], {}), "('my_model.h5')\n", (3271, 3286), False, 'from tensorflow.python.keras.models import load_model\n'), ((862, 875), 'numpy.sum', 'np.sum', (['exp_x'], {}), '(exp_x)\n', (868, 875), True, 'import numpy as np\n'), ((1549, 1561), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (1555, 1561), True, 'import numpy as np\n'), ((1564, 1576), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (1570, 1576), True, 'import numpy as np\n'), ((1708, 1731), 'numpy.subtract', 'np.subtract', (['data', 'mean'], {}), '(data, mean)\n', (1719, 1731), True, 'import numpy as np\n'), ((1822, 1844), 'os.path.exists', 'osp.exists', (['output_dir'], {}), '(output_dir)\n', (1832, 1844), True, 'import os.path as osp\n'), ((1863, 1883), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (1871, 1883), False, 'import os\n'), ((3435, 3447), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3445, 3447), True, 'import tensorflow as tf\n'), ((3525, 3538), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (3536, 3538), True, 'import tensorflow as tf\n'), ((3750, 3770), 'os.listdir', 'os.listdir', (['pic_file'], {}), '(pic_file)\n', (3760, 3770), False, 'import os\n'), ((1596, 1608), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (1602, 1608), True, 'import numpy as np\n'), ((3553, 3582), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['pb_path', '"""rb"""'], {}), "(pb_path, 'rb')\n", (3567, 3582), True, 'import tensorflow as tf\n'), ((3653, 3692), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (3672, 3692), True, 'import tensorflow as tf\n'), ((3872, 3892), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (3882, 3892), False, 'import cv2\n'), ((3910, 3945), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (3922, 3945), False, 'import cv2\n'), ((3964, 3990), 'cv2.resize', 'cv2.resize', (['im', '(304, 304)'], {}), '(im, (304, 304))\n', (3974, 3990), False, 'import cv2\n'), ((4009, 4042), 'numpy.asarray', 'np.asarray', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (4019, 4042), True, 'import numpy as np\n'), ((4106, 4134), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (4118, 4134), True, 'import numpy as np\n'), ((2751, 2778), 'torch.FloatTensor', 'torch.FloatTensor', (['input_np'], {}), '(input_np)\n', (2768, 2778), False, 'import torch\n'), ((3465, 3498), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3496, 3498), True, 'import tensorflow as tf\n'), ((4159, 4197), 'numpy.asarray', 'np.asarray', (['img_data'], {'dtype': 'np.float32'}), '(img_data, dtype=np.float32)\n', (4169, 4197), True, 'import numpy as np\n')] |
from typing import List, Union
import json
class Product:
def __init__(self, name: str, code: str, price: float):
self.name = name
self.code = code
self.price = price
# Breakdown coupon's description into quantifiable attributes
# For example: BOGO on coffee can be translated as an object
# with certain properties should look like
# bogo_obj.target = "CF1"
# bogo_obj.apply_on = "CF1"
# bogo_obj.discount = 100
# bogo_obj.discount_type = "percent"
# bogo_obj.trigger_limit = 1
# bogo_obj.limit = 0 # no limit
# bogo_obj.apply_all = False
class Coupon:
def __init__(
self,
name: str,
description: str,
target: str,
apply_on: str,
discount: float,
discount_type: Union["percent", "fixed"],
trigger_limit: int,
limit: int,
apply_all: bool,
):
self.name = name
self.description = description
self.target = target
self.apply_on = apply_on
self.discount = discount
self.discount_type = discount_type
self.trigger_limit = trigger_limit
self.limit = limit
self.apply_all = apply_all
class BasketItem:
def __init__(
self,
product: Product,
coupon: Coupon = None,
discount: float = 0.00,
should_apply: bool = True,
):
self.product = product
self.coupon = coupon
self.discount = discount
self.should_apply = should_apply
class Basket:
def __init__(self, items: List[BasketItem] = None):
self.basket_items = items
def to_dict(self):
return {
"basket_items": json.loads(
json.dumps(self.basket_items, default=lambda x: x.__dict__)
),
"total": self.total(),
}
def total(self):
total = 0.00
for item in self.basket_items:
total += item.product.price
total -= item.discount
return round(total, 2)
| [
"json.dumps"
] | [((1689, 1748), 'json.dumps', 'json.dumps', (['self.basket_items'], {'default': '(lambda x: x.__dict__)'}), '(self.basket_items, default=lambda x: x.__dict__)\n', (1699, 1748), False, 'import json\n')] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# -----------------------------------------------------------------------------------
import asyncio
def get_running_loop():
try:
return asyncio.get_running_loop()
except AttributeError: # 3.5 / 3.6
loop = asyncio._get_running_loop() # pylint: disable=protected-access
if loop is None:
raise RuntimeError('No running event loop')
return loop
| [
"asyncio.get_running_loop",
"asyncio._get_running_loop"
] | [((402, 428), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (426, 428), False, 'import asyncio\n'), ((484, 511), 'asyncio._get_running_loop', 'asyncio._get_running_loop', ([], {}), '()\n', (509, 511), False, 'import asyncio\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
qa test for elevators
Authors: likejiao(<EMAIL>)
Date: 2019/06/16 19:30:16
"""
import sys
import time
import copy
import traceback
from metagym.liftsim.environment.env import LiftSim
from metagym.liftsim.environment.mansion.person_generators.generator_proxy import PersonGenerator
from metagym.liftsim.environment.mansion.mansion_config import MansionConfig
from metagym.liftsim.environment.mansion.utils import ElevatorState, MansionState, ElevatorAction
from metagym.liftsim.environment.mansion.mansion_manager import MansionManager
from rule_benchmark.dispatcher import Rule_dispatcher
fail_flag = False
stop_count = 10
def state_check(state, next_state, action):
global fail_flag
global stop_count
try:
assert isinstance(state, MansionState)
# for e in state.ElevatorStates:
for i in range(len(state.ElevatorStates)):
ele = copy.deepcopy(state.ElevatorStates[i])
assert isinstance(ele, ElevatorState)
next_ele = copy.deepcopy(next_state.ElevatorStates[i])
assert isinstance(next_ele, ElevatorState)
act = copy.deepcopy(action[i])
assert isinstance(act, ElevatorAction)
# type
ele_Floor = ele.Floor
ele_Velocity = ele.Velocity
ele_LoadWeight = ele.LoadWeight
next_ele_Floor = next_ele.Floor
next_ele_Velocity = next_ele.Velocity
next_ele_LoadWeight = next_ele.LoadWeight
assert isinstance(ele_Floor, float)
assert isinstance(ele.MaximumFloor, int)
assert isinstance(ele_Velocity, float)
assert isinstance(ele.MaximumSpeed, float)
assert isinstance(ele.Direction, int)
assert isinstance(ele.CurrentDispatchTarget, int)
assert isinstance(ele.DispatchTargetDirection, int)
assert isinstance(ele_LoadWeight, float)
assert isinstance(ele.MaximumLoad, int)
assert isinstance(ele.OverloadedAlarm, float)
assert isinstance(ele.DoorIsOpening, bool)
assert isinstance(ele.DoorIsClosing, bool)
assert isinstance(ele.ReservedTargetFloors, list)
# change
ele_Floor = round(ele_Floor, 2)
ele_Velocity = round(ele_Velocity, 2)
ele_LoadWeight = round(ele_LoadWeight, 2)
next_ele_Velocity = round(next_ele_Velocity, 2)
ele_Velocity = round(ele_Velocity, 2)
next_ele_LoadWeight = round(next_ele_LoadWeight, 2)
# range
assert ele_Floor > 0 and ele_Floor <= ele.MaximumFloor
assert ele_Velocity >= (0 - ele.MaximumSpeed) and ele_Velocity <= ele.MaximumSpeed
assert ele.Direction in [-1, 0, 1]
assert ele.CurrentDispatchTarget >= -1 and ele.CurrentDispatchTarget <= ele.MaximumFloor
assert ele.DispatchTargetDirection in [-1, 1]
assert ele_LoadWeight >= 0 and ele_LoadWeight <= ele.MaximumLoad
assert ele.OverloadedAlarm >= 0 and ele.OverloadedAlarm <= 2.0
assert ele.DoorState >= 0 and ele.DoorState <= 1
assert ele.DoorIsClosing in [True, False]
assert ele.DoorIsOpening in [True, False]
for t in ele.ReservedTargetFloors:
assert t >= 1 and t <= ele.MaximumFloor
#relation
if(ele_Velocity == 0 and ele.Direction != 0):
assert (ele_Floor % 1) == 0 or \
(ele_Floor % 1 != 0 and next_ele.Direction == 0)
if(round(ele_Floor, 1) % 1 != 0 and ele.Direction != 0):
assert ele_Velocity != 0 or next_ele_Velocity != 0 or\
next_ele.Direction == 0 or ele_Floor == ele.CurrentDispatchTarget
assert (ele.DoorIsClosing and ele.DoorIsOpening) == False
if(ele.DoorState < 1 and ele.DoorState > 0):
assert (ele.DoorIsClosing or ele.DoorIsOpening) == True
assert ele_Floor % 1 == 0
# if(ele.DoorState in [0.0, 1.0]):
# assert (ele.DoorIsClosing or ele.DoorIsOpening) == False # ignore
if(ele.DoorState in [0.0, 1.0]):
if((ele.DoorIsClosing or ele.DoorIsOpening) == True):
if(next_ele.DoorState in [0.0, 1.0]):
assert (next_ele.DoorIsClosing or next_ele.DoorIsOpening) == False
if((ele_Floor % 1 != 0) or ((ele.DoorIsClosing and ele.DoorIsOpening) == True)):
assert ele.DoorState == 0.0
assert ele.DoorIsClosing == False or next_ele.DoorIsClosing == False
assert ele.DoorIsOpening == False
if(ele_Velocity != 0.0 and ele.Direction != 0):
assert ele.DoorState == 0.0
if(ele_Velocity != 0.0 and len(ele.ReservedTargetFloors) > 0):
assert ele_LoadWeight > 0
if(ele_Velocity != 0.0 and ele_LoadWeight > 0):
assert len(ele.ReservedTargetFloors) > 0
if(next_ele.OverloadedAlarm > 0 and ele.OverloadedAlarm == 0):
assert next_ele_LoadWeight >= ele.MaximumLoad - 200
if(len(ele.ReservedTargetFloors) != 0):
assert ele_LoadWeight >= 20
# dynamic check
delta_Floor = round(next_ele_Floor - ele_Floor, 2)
assert delta_Floor * next_ele_Velocity >= 0 or delta_Floor * ele_Velocity >= 0
target_list = ele.ReservedTargetFloors[:]
# if(ele.CurrentDispatchTarget != 0):
# target_list.append(ele.CurrentDispatchTarget)
if(delta_Floor > 0 and ele_Velocity != 0.0 and ele_Floor % 1 != 0): # going up
min_target = min(target_list) if len(target_list) > 0 else ele.MaximumFloor + 1
assert ele_Floor <= min_target
assert next_ele_Velocity > 0 or ele_Velocity > 0 or ele.Direction == 0
if(delta_Floor < 0 and ele_Velocity != 0.0 and ele_Floor % 1 != 0): # going down
max_target = max(target_list) if len(target_list) > 0 else 0
assert ele_Floor >= max_target
assert next_ele_Velocity < 0 or ele_Velocity < 0 or ele.Direction == 0
# if(delta_Floor == 0):
# assert next_ele_Velocity == 0 or ele_Velocity * next_ele_Velocity <= 0
if((next_ele_LoadWeight - ele_LoadWeight) > 0.01):
assert ele.DoorState > 0 or ele.DoorIsOpening or ele.DoorIsClosing
if((next_ele_LoadWeight - ele_LoadWeight) < -0.01):
assert ele.DoorState > 0 or ele.DoorIsOpening or ele.DoorIsClosing
if(ele.OverloadedAlarm < next_ele.OverloadedAlarm):
assert ele.DoorState > 0 or ele.DoorIsOpening or ele.DoorIsClosing
assert len(next_ele.ReservedTargetFloors) == len(ele.ReservedTargetFloors) #?????
# assert next_ele_LoadWeight >= ele_LoadWeight # not right
if(len(next_ele.ReservedTargetFloors) > len(ele.ReservedTargetFloors)):
assert (next_ele_LoadWeight - ele_LoadWeight) >= 0 #!!!
assert ele.DoorState > 0 or ele.DoorIsOpening or ele.DoorIsClosing
if(len(next_ele.ReservedTargetFloors) < len(ele.ReservedTargetFloors)):
# assert (next_ele_LoadWeight - ele_LoadWeight) < 0 # not right
assert ele.DoorState > 0 or ele.DoorIsOpening or ele.DoorIsClosing
# if(ele.OverloadedAlarm > 0):
# assert ele.ReservedTargetFloors == next_ele.ReservedTargetFloors
# assert ele_LoadWeight == next_ele_LoadWeight
# assert ele.DoorState > 0 or ele.DoorIsOpening or ele.DoorIsClosing
if(fail_flag):
stop_count -= 1
if(stop_count == 0):
print('\n\nSome error appear before several steps, please check\n\n')
exit(1)
except AssertionError:
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
filename, line, func, text = tb_info[-1]
print('An error occurred on line {} in statement {}'.format(line, text))
print('\n========================== ele num: ', i)
print('\nlast: ', ele)
print('\nthis: ', next_ele)
print('\n========================== please check\n\n')
fail_flag = True
def print_state(state, action):
assert isinstance(state, MansionState)
print('Num\tact\tact.dir\tFloor\t\tMaxF\tV\t\tMaxV\tDir\tTarget\tTDir\tLoad\tMaxL\tOver\tDoor\topening\tclosing\tReservedTargetFloors')
i = 0
for i in range(len(state.ElevatorStates)):
ele = state.ElevatorStates[i]
act = action[i]
assert isinstance(ele, ElevatorState)
assert isinstance(act, ElevatorAction)
print(i,"\t|",act.TargetFloor,"\t|",act.DirectionIndicator,"\t|",
'%2.4f'%ele.Floor,"\t|",ele.MaximumFloor,"\t|",
'%2.7f'%ele.Velocity,"\t|",ele.MaximumSpeed,"\t|",
ele.Direction,"\t|",ele.CurrentDispatchTarget,"\t|",ele.DispatchTargetDirection,"\t|",
int(ele.LoadWeight),"\t|",ele.MaximumLoad,"\t|",'%.2f'%ele.OverloadedAlarm,"\t|",
ele.DoorState,"\t|",int(ele.DoorIsOpening),"\t|",int(ele.DoorIsClosing),"\t|",ele.ReservedTargetFloors)
i += 1
print('------------------RequiringUpwardFloors', state.RequiringUpwardFloors)
print('------------------RequiringDownwardFloors', state.RequiringDownwardFloors)
print('')
# time.sleep(2)
def print_next_state(state):
assert isinstance(state, MansionState)
print('Num\tact\tact.dir\tFloor\t\tMaxF\tV\tMaxV\tDir\tTarget\tTDir\tLoad\tMaxL\tOver\tDoor\topening\tclosing\tRT')
i = 0
for i in range(len(state.ElevatorStates)):
ele = state.ElevatorStates[i]
# act = action[i]
assert isinstance(ele, ElevatorState)
# assert isinstance(act, ElevatorAction)
i += 1
print(i,"\t|",' ',"\t|",' ',"\t|",
'%.2f'%ele.Floor,"\t|",ele.MaximumFloor,"\t|",
'%.1f'%ele.Velocity,"\t|",ele.MaximumSpeed,"\t|",
ele.Direction,"\t|",ele.CurrentDispatchTarget,"\t|",ele.DispatchTargetDirection,"\t|",
'%.1f'%ele.LoadWeight,"\t|",ele.MaximumLoad,"\t|",ele.OverloadedAlarm,"\t|",
ele.DoorState,"\t|",int(ele.DoorIsOpening),"\t|",int(ele.DoorIsClosing),"\t|",ele.ReservedTargetFloors)
print('------------------RequiringUpwardFloors', state.RequiringUpwardFloors)
print('------------------RequiringDownwardFloors', state.RequiringDownwardFloors)
print('')
# time.sleep(2)
def run_mansion_main(mansion_env, policy_handle, iteration):
last_state = mansion_env.reset()
# policy_handle.link_mansion(mansion_env.attribute)
# policy_handle.load_settings()
i = 0
acc_reward = 0.0
# = copy.deepcopy(mansion_env.state)
while i < iteration:
i += 1
# state = mansion_env.state
action = policy_handle.policy(last_state)
state, r, _, _ = mansion_env.step(elevatoraction_to_list(action))
# output_info = policy_handle.feedback(last_state, action, r)
acc_reward += r
# if(isinstance(output_info, dict) and len(output_info) > 0):
# mansion_env.log_notice("%s", output_info)
if(i % 3600 == 0):
print(
"Accumulated Reward: %f, Mansion Status: %s",
acc_reward, mansion_env.statistics)
acc_reward = 0.0
print_state(state, action)
print('reward: %f' % r)
state_check(last_state, state, action)
last_state = copy.deepcopy(state)
# run main program with args
def run_qa_test(configfile, iterations, controlpolicy, set_seed=None):
print('configfile:', configfile) # configuration file for running elevators
print('iterations:', iterations) # total number of iterations
print('controlpolicy:', controlpolicy) # policy type: rule_benchmark or others
mansion_env = LiftSim(configfile)
if(set_seed):
mansion_env.seed(set_seed)
if controlpolicy == 'rule_benchmark':
dispatcher = Rule_dispatcher(mansion_env, iterations)
elif controlpolicy == 'rl_benchmark':
pass
run_mansion_main(mansion_env, dispatcher, iterations)
return 0
def run_time_step_abnormal_test(configfile, iterations, controlpolicy, set_seed=None):
try:
run_qa_test(configfile, iterations, controlpolicy, set_seed=set_seed)
except AssertionError:
print('run_time_step_abnormal_test pass')
def run_action_abnormal_test(action_target_floor, action_target_direction, set_seed):
flag = True
try:
env = LiftSim()
if(set_seed):
env.seed(set_seed)
state = env.reset()
action = [ElevatorAction(action_target_floor, action_target_direction) for i in range(4)]
next_state, reward, _, _ = env.step(elevatoraction_to_list(action))
except AssertionError:
flag = False
print('abnormal action: ', action_target_floor, type(action_target_floor) \
, action_target_direction, type(action_target_direction))
print('run_action_abnormal_test pass')
if (flag):
print('abnormal action: ', action_target_floor, type(action_target_floor) \
, action_target_direction, type(action_target_direction))
print('run_action_abnormal_test fail')
assert False
def elevatoraction_to_list(action):
action_list = []
for a in action:
action_list.append(a.TargetFloor)
action_list.append(a.DirectionIndicator)
return action_list
if __name__ == "__main__":
if (len(sys.argv) == 2):
set_seed = int(sys.argv[1])
else:
set_seed = None
run_time_step_abnormal_test('metagym/liftsim/tests/conf/config_time_step_more_than_1.ini', 100, 'rule_benchmark', set_seed)
run_action_abnormal_test(-2, 1, set_seed)
run_action_abnormal_test(10000, -1, set_seed)
run_action_abnormal_test(5.0, 1, set_seed)
run_action_abnormal_test('5', 1, set_seed)
run_action_abnormal_test(5, 4, set_seed)
run_action_abnormal_test(5, '-1', set_seed)
run_qa_test('metagym/liftsim/config.ini', 4000, 'rule_benchmark', set_seed)
run_qa_test('metagym/liftsim/tests/conf/config1.ini', 4000, 'rule_benchmark', set_seed) # 1 elevator
run_qa_test('metagym/liftsim/tests/conf/config2.ini', 4000, 'rule_benchmark', set_seed) # 100 floors 20 elevator 0.3 time_step
run_qa_test('metagym/liftsim/tests/conf/config3.ini', 4000, 'rule_benchmark', set_seed) # quick person generator
run_qa_test('metagym/liftsim/tests/conf/config4.ini', 4000, 'rule_benchmark', set_seed) # 1.0 time_step
| [
"metagym.liftsim.environment.env.LiftSim",
"rule_benchmark.dispatcher.Rule_dispatcher",
"traceback.print_tb",
"sys.exc_info",
"copy.deepcopy",
"metagym.liftsim.environment.mansion.utils.ElevatorAction",
"traceback.extract_tb"
] | [((12809, 12828), 'metagym.liftsim.environment.env.LiftSim', 'LiftSim', (['configfile'], {}), '(configfile)\n', (12816, 12828), False, 'from metagym.liftsim.environment.env import LiftSim\n'), ((12438, 12458), 'copy.deepcopy', 'copy.deepcopy', (['state'], {}), '(state)\n', (12451, 12458), False, 'import copy\n'), ((12947, 12987), 'rule_benchmark.dispatcher.Rule_dispatcher', 'Rule_dispatcher', (['mansion_env', 'iterations'], {}), '(mansion_env, iterations)\n', (12962, 12987), False, 'from rule_benchmark.dispatcher import Rule_dispatcher\n'), ((13498, 13507), 'metagym.liftsim.environment.env.LiftSim', 'LiftSim', ([], {}), '()\n', (13505, 13507), False, 'from metagym.liftsim.environment.env import LiftSim\n'), ((1504, 1542), 'copy.deepcopy', 'copy.deepcopy', (['state.ElevatorStates[i]'], {}), '(state.ElevatorStates[i])\n', (1517, 1542), False, 'import copy\n'), ((1616, 1659), 'copy.deepcopy', 'copy.deepcopy', (['next_state.ElevatorStates[i]'], {}), '(next_state.ElevatorStates[i])\n', (1629, 1659), False, 'import copy\n'), ((1733, 1757), 'copy.deepcopy', 'copy.deepcopy', (['action[i]'], {}), '(action[i])\n', (1746, 1757), False, 'import copy\n'), ((8648, 8662), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8660, 8662), False, 'import sys\n'), ((8671, 8693), 'traceback.print_tb', 'traceback.print_tb', (['tb'], {}), '(tb)\n', (8689, 8693), False, 'import traceback\n'), ((8727, 8751), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {}), '(tb)\n', (8747, 8751), False, 'import traceback\n'), ((13608, 13668), 'metagym.liftsim.environment.mansion.utils.ElevatorAction', 'ElevatorAction', (['action_target_floor', 'action_target_direction'], {}), '(action_target_floor, action_target_direction)\n', (13622, 13668), False, 'from metagym.liftsim.environment.mansion.utils import ElevatorState, MansionState, ElevatorAction\n')] |
import os
import wave
from array import array
from struct import pack
from sys import byteorder
import pyaudio
import soundfile
from .emotion_recognition import EmotionRecognizer
from .utils import get_best_estimators
THRESHOLD = 500
CHUNK_SIZE = 1024
FORMAT = pyaudio.paInt16
RATE = 16000
SILENCE = 30
def is_silent(snd_data):
"Returns 'True' if below the 'silent' threshold"
return max(snd_data) < THRESHOLD
def normalize(snd_data):
"Average the volume out"
MAXIMUM = 16384
times = float(MAXIMUM)/max(abs(i) for i in snd_data)
r = array('h')
for i in snd_data:
r.append(int(i*times))
return r
def trim(snd_data):
"Trim the blank spots at the start and end"
def _trim(snd_data):
snd_started = False
r = array('h')
for i in snd_data:
if not snd_started and abs(i)>THRESHOLD:
snd_started = True
r.append(i)
elif snd_started:
r.append(i)
return r
# Trim to the left
snd_data = _trim(snd_data)
# Trim to the right
snd_data.reverse()
snd_data = _trim(snd_data)
snd_data.reverse()
return snd_data
def add_silence(snd_data, seconds):
"Add silence to the start and end of 'snd_data' of length 'seconds' (float)"
r = array('h', [0 for i in range(int(seconds*RATE))])
r.extend(snd_data)
r.extend([0 for i in range(int(seconds*RATE))])
return r
def record():
"""
Record a word or words from the microphone and
return the data as an array of signed shorts.
Normalizes the audio, trims silence from the
start and end, and pads with 0.5 seconds of
blank sound to make sure VLC et al can play
it without getting chopped off.
"""
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=1, rate=RATE,
input=True, output=True,
frames_per_buffer=CHUNK_SIZE)
num_silent = 0
snd_started = False
r = array('h')
while 1:
# little endian, signed short
snd_data = array('h', stream.read(CHUNK_SIZE))
if byteorder == 'big':
snd_data.byteswap()
r.extend(snd_data)
silent = is_silent(snd_data)
if silent and snd_started:
num_silent += 1
elif not silent and not snd_started:
snd_started = True
if snd_started and num_silent > SILENCE:
break
sample_width = p.get_sample_size(FORMAT)
stream.stop_stream()
stream.close()
p.terminate()
r = normalize(r)
r = trim(r)
r = add_silence(r, 0.5)
return sample_width, r
def record_to_file(path):
"Records from the microphone and outputs the resulting data to 'path'"
sample_width, data = record()
data = pack('<' + ('h'*len(data)), *data)
wf = wave.open(path, 'wb')
wf.setnchannels(1)
wf.setsampwidth(sample_width)
wf.setframerate(RATE)
wf.writeframes(data)
wf.close()
def get_estimators_name(estimators):
result = [ '"{}"'.format(estimator.__class__.__name__) for estimator, _, _ in estimators ]
return ','.join(result), {estimator_name.strip('"'): estimator for estimator_name, (estimator, _, _) in zip(result, estimators)}
if __name__ == "__main__":
estimators = get_best_estimators(True)
estimators_str, estimator_dict = get_estimators_name(estimators)
import argparse
parser = argparse.ArgumentParser(description="""
Testing emotion recognition system using your voice,
please consider changing the model and/or parameters as you wish.
""")
parser.add_argument("-e", "--emotions", help=
"""Emotions to recognize separated by a comma ',', available emotions are
"neutral", "calm", "happy" "sad", "angry", "fear", "disgust", "ps" (pleasant surprise)
and "boredom", default is "sad,neutral,happy"
""", default="sad,neutral,happy")
parser.add_argument("-m", "--model", help=
"""
The model to use, 8 models available are: {},
default is "BaggingClassifier"
""".format(estimators_str), default="BaggingClassifier")
# Parse the arguments passed
args = parser.parse_args()
features = ["mfcc", "chroma", "mel"]
detector = EmotionRecognizer(estimator_dict[args.model], emotions=args.emotions.split(","), features=features, verbose=0)
detector.train()
print("Test accuracy score: {:.3f}%".format(detector.test_score()*100))
# print("Please talk")
filename = "test.wav"
record_to_file(filename)
| [
"wave.open",
"pyaudio.PyAudio",
"array.array",
"argparse.ArgumentParser"
] | [((564, 574), 'array.array', 'array', (['"""h"""'], {}), "('h')\n", (569, 574), False, 'from array import array\n'), ((1774, 1791), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (1789, 1791), False, 'import pyaudio\n'), ((1974, 1984), 'array.array', 'array', (['"""h"""'], {}), "('h')\n", (1979, 1984), False, 'from array import array\n'), ((2821, 2842), 'wave.open', 'wave.open', (['path', '"""wb"""'], {}), "(path, 'wb')\n", (2830, 2842), False, 'import wave\n'), ((3407, 3689), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""\n Testing emotion recognition system using your voice, \n please consider changing the model and/or parameters as you wish.\n """'}), '(description=\n """\n Testing emotion recognition system using your voice, \n please consider changing the model and/or parameters as you wish.\n """\n )\n', (3430, 3689), False, 'import argparse\n'), ((776, 786), 'array.array', 'array', (['"""h"""'], {}), "('h')\n", (781, 786), False, 'from array import array\n')] |
# coding=utf-8
import os
import json
# 获取最新模型预测数据文件夹
def get_latest_model_predict_data_dir(new_epochs_ckpt_dir=None):
# 获取文件下最新文件路径
def new_report(test_report):
lists = os.listdir(test_report) # 列出目录的下所有文件和文件夹保存到lists
lists.sort(key=lambda fn: os.path.getmtime(test_report + "/" + fn)) # 按时间排序
file_new = os.path.join(test_report, lists[-1]) # 获取最新的文件保存到file_new
return file_new
if new_epochs_ckpt_dir is None:
# 获取分类预测输出文件路径
input_new_epochs = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), "output")), "sequnce_infer_out")
# 获取最新周期文件路径
new_ckpt_dir = new_report(input_new_epochs)
input_new_epochs_ckpt = os.path.join(input_new_epochs, new_ckpt_dir)
# 获取最新周期下最新模型文件路径
new_epochs_ckpt_dir = new_report(input_new_epochs_ckpt)
if not os.path.exists(new_ckpt_dir):
raise ValueError("路径不存在!{}".format(new_epochs_ckpt_dir))
return new_epochs_ckpt_dir
# dict is comes from raw_data all_50_schemas
schemas_dict_relation_2_object_subject_type = {
'Road_status':[('Status','Road')],
'Lane_status':[('Status','Lane')],
'Road_position':[('Position_of_road','Road')],
# 'At':[('Road','Road')],
# 'PRIOR':[('Road','Road')],
# 'PAST':[('Road','Road')],
# 'Bet':[('Road','Road')],
'Lane_of_Road':[('Road','Lane')],
'Lane_direction':[('Direction_of_lane','Lane')],
'Lane_position':[('Position_of_lane','Lane')],
'Road_direction':[('Direction_of_road','Road')],
#'Lane_number':[('Number','Lane')]
# '父亲': [('人物', '人物')],
# '妻子': [('人物', '人物')],
# '母亲': [('人物', '人物')],
# '丈夫': [('人物', '人物')],
# '祖籍': [('地点', '人物')],
# '总部地点': [('地点', '企业')],
# '出生地': [('地点', '人物')],
# '目': [('目', '生物')],
# '面积': [('Number', '行政区')],
# '简称': [('Text', '机构')],
# '上映时间': [('Date', '影视作品')],
# '所属专辑': [('音乐专辑', '歌曲')],
# '注册资本': [('Number', '企业')],
# '首都': [('城市', '国家')],
# '导演': [('人物', '影视作品')],
# '字': [('Text', '历史人物')],
# '身高': [('Number', '人物')],
# '出品公司': [('企业', '影视作品')],
# '修业年限': [('Number', '学科专业')],
# '出生日期': [('Date', '人物')],
# '制片人': [('人物', '影视作品')],
# '编剧': [('人物', '影视作品')],
# '国籍': [('国家', '人物')],
# '海拔': [('Number', '地点')],
# '连载网站': [('网站', '网络小说')],
# '朝代': [('Text', '历史人物')],
# '民族': [('Text', '人物')],
# '号': [('Text', '历史人物')],
# '出版社': [('出版社', '书籍')],
# '主持人': [('人物', '电视综艺')],
# '专业代码': [('Text', '学科专业')],
# '歌手': [('人物', '歌曲')],
# '作词': [('人物', '歌曲')],
# '主角': [('人物', '网络小说')],
# '董事长': [('人物', '企业')],
# '成立日期': [('Date', '机构'), ('Date', '企业')],
# '毕业院校': [('学校', '人物')],
# '占地面积': [('Number', '机构')],
# '官方语言': [('语言', '国家')],
# '邮政编码': [('Text', '行政区')],
# '人口数量': [('Number', '行政区')],
# '所在城市': [('城市', '景点')],
# '作者': [('人物', '图书作品')],
# '作曲': [('人物', '歌曲')],
# '气候': [('气候', '行政区')],
# '嘉宾': [('人物', '电视综艺')],
# '主演': [('人物', '影视作品')],
# '改编自': [('作品', '影视作品')],
# '创始人': [('人物', '企业')]
}
class File_Management(object):
"""读取TXT文件,以列表形式返回文件内容"""
def __init__(self, TEST_DATA_DIR=None, MODEL_OUTPUT_DIR=None, Competition_Mode=True):
self.TEST_DATA_DIR = TEST_DATA_DIR
self.MODEL_OUTPUT_DIR = get_latest_model_predict_data_dir(MODEL_OUTPUT_DIR)
self.Competition_Mode = Competition_Mode
def file_path_and_name(self):
text_sentence_file_path = os.path.join(self.TEST_DATA_DIR, "text_and_one_predicate.txt")
token_in_file_path = os.path.join(self.TEST_DATA_DIR, "token_in_not_UNK_and_one_predicate.txt")
predicate_token_label_file_path = os.path.join(self.MODEL_OUTPUT_DIR, "token_label_predictions.txt")
file_path_list = [text_sentence_file_path, token_in_file_path, predicate_token_label_file_path]
file_name_list = ["text_sentence_list", "token_in_not_NUK_list ", "token_label_list",]
if not self.Competition_Mode:
spo_out_file_path = os.path.join(self.TEST_DATA_DIR, "spo_out.txt")
if os.path.exists(spo_out_file_path):
file_path_list.append(spo_out_file_path)
file_name_list.append("reference_spo_list")
return file_path_list, file_name_list
def read_file_return_content_list(self):
file_path_list, file_name_list = self.file_path_and_name()
content_list_summary = []
for file_path in file_path_list:
with open(file_path, "r", encoding='utf-8') as f:
content_list = f.readlines()
content_list = [content.replace("\n", "") for content in content_list]
content_list_summary.append(content_list)
if self.Competition_Mode:
content_list_length_summary = [(file_name, len(content_list)) for content_list, file_name in
zip(content_list_summary, file_name_list)]
file_line_number = self._check_file_line_numbers(content_list_length_summary)
print("Competition_Mode=True, check file line pass!")
print("输入文件行数一致,行数是: ", file_line_number)
else:
file_line_number = len(content_list_summary[0])
print("first file line number: ", file_line_number)
print("do not check file line! if you need check file line, set Competition_Mode=True")
print("\n")
return content_list_summary, file_line_number
def _check_file_line_numbers(self, content_list_length_summary):
content_list_length_file_one = content_list_length_summary[0][1]
for file_name, file_line_number in content_list_length_summary:
assert file_line_number == content_list_length_file_one
return content_list_length_file_one
class Sorted_relation_and_entity_list_Management(File_Management):
"""
生成按概率大小排序的可能关系列表和按照原始句子中顺序排序的实体列表
"""
def __init__(self, TEST_DATA_DIR, MODEL_OUTPUT_DIR, Competition_Mode=False):
File_Management.__init__(self, TEST_DATA_DIR=TEST_DATA_DIR, MODEL_OUTPUT_DIR=MODEL_OUTPUT_DIR, Competition_Mode=Competition_Mode)
# 关系列表 把模型输出的实数值对应为标签
#self.relationship_label_list = ['丈夫', '上映时间', '专业代码', '主持人', '主演', '主角', '人口数量', '作曲', '作者', '作词', '修业年限', '出品公司', '出版社', '出生地', '出生日期', '创始人', '制片人', '占地面积', '号', '嘉宾', '国籍', '妻子', '字', '官方语言', '导演', '总部地点', '成立日期', '所在城市', '所属专辑', '改编自', '朝代', '歌手', '母亲', '毕业院校', '民族', '气候', '注册资本', '海拔', '父亲', '目', '祖籍', '简称', '编剧', '董事长', '身高', '连载网站', '邮政编码', '面积', '首都']
self.relationship_label_list = ['Road_status','Lane_status','At','PRIOR', 'PAST', 'Bet', 'LaneOfRoad','Lane_direction','Lane_position','Road_direction','Lane_number']
self.Competition_Mode = Competition_Mode
print("test数据输入路径是:\t{}".format(self.TEST_DATA_DIR))
print("最新模型预测结果路径是:\t{}".format(self.MODEL_OUTPUT_DIR))
def get_input_list(self,):
content_list_summary, self.file_line_number = self.read_file_return_content_list()
if len(content_list_summary) == 4:
[text_sentence_list, token_in_not_NUK_list, token_label_list, reference_spo_list] = content_list_summary
elif len(content_list_summary) == 3:
[text_sentence_list, token_in_not_NUK_list, token_label_list] = content_list_summary
reference_spo_list = [None] * len(text_sentence_list)
else:
raise ValueError("check code!")
print(reference_spo_list)
return text_sentence_list, token_in_not_NUK_list, token_label_list, reference_spo_list
#合并由WordPiece切分的词和单字
def _merge_WordPiece_and_single_word(self, entity_sort_list):
# [..['B-SUB', '新', '地', '球', 'ge', '##nes', '##is'] ..]---> [..('SUB', '新地球genesis')..]
entity_sort_tuple_list = []
for a_entity_list in entity_sort_list:
entity_content = ""
entity_type = None
for idx, entity_part in enumerate(a_entity_list):
if idx == 0:
entity_type = entity_part
if entity_type[:2] not in ["B-", "I-"]:
break
else:
if entity_part.startswith("##"):
entity_content += entity_part.replace("##", "")
else:
entity_content += entity_part
if entity_content != "":
entity_sort_tuple_list.append((entity_type[2:], entity_content))
return entity_sort_tuple_list
# 把spo_out.txt 的[SPO_SEP] 分割形式转换成标准列表字典形式
# 妻子 人物 人物 杨淑慧 周佛海[SPO_SEP]丈夫 人物 人物 周佛海 杨淑慧 ---> dict
def preprocessing_reference_spo_list(self, refer_spo_str):
refer_spo_list = refer_spo_str.split("[SPO_SEP]")
refer_spo_list = [spo.split(" ") for spo in refer_spo_list]
refer_spo_list = [dict([('predicate', spo[0]),
('object_type', spo[2]), ('subject_type', spo[1]),
('object', spo[4]), ('subject', spo[3])]) for spo in refer_spo_list]
print(refer_spo_list)
refer_spo_list.sort(key= lambda item:item['predicate'])
return refer_spo_list
# 把模型输出实体标签按照原句中相对位置输出
def model_token_label_2_entity_sort_tuple_list(self, token_in_not_UNK_list, predicate_token_label_list):
"""
:param token_in_not_UNK: ['紫', '菊', '花', '草', '是', '菊', '目', ',', '菊', '科', ',', '松', '果', '菊', '属', '的', '植', '物']
:param predicate_token_label: ['B-SUB', 'I-SUB', 'I-SUB', 'I-SUB', 'O', 'B-OBJ', 'I-OBJ', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O']
:return: [('SUB', '紫菊花草'), ('OBJ', '菊目')]
"""
# 除去模型输出的特殊符号
def preprocessing_model_token_lable(predicate_token_label_list, token_in_list_lenth):
# ToDo:检查错误,纠错
if predicate_token_label_list[0] == "[CLS]":
predicate_token_label_list = predicate_token_label_list[1:] # y_predict.remove('[CLS]')
if len(predicate_token_label_list) > token_in_list_lenth: # 只取输入序列长度即可
predicate_token_label_list = predicate_token_label_list[:token_in_list_lenth]
return predicate_token_label_list
# 预处理标注数据列表
predicate_token_label_list = preprocessing_model_token_lable(predicate_token_label_list, len(token_in_not_UNK_list))
entity_sort_list = []
entity_part_list = []
#TODO:需要检查以下的逻辑判断,可能写的不够完备充分
for idx, token_label in enumerate(predicate_token_label_list):
# 如果标签为 "O"
if token_label == "O":
# entity_part_list 不为空,则直接提交
if len(entity_part_list) > 0:
entity_sort_list.append(entity_part_list)
entity_part_list = []
# 如果标签以字符 "B-" 开始
if token_label.startswith("B-"):
# 如果 entity_part_list 不为空,则先提交原来 entity_part_list
if len(entity_part_list) > 0:
entity_sort_list.append(entity_part_list)
entity_part_list = []
entity_part_list.append(token_label)
entity_part_list.append(token_in_not_UNK_list[idx])
# 如果到了标签序列最后一个标签处
if idx == len(predicate_token_label_list) - 1:
entity_sort_list.append(entity_part_list)
# 如果标签以字符 "I-" 开始 或者等于 "[##WordPiece]"
if token_label.startswith("I-") or token_label == "[##WordPiece]":
# entity_part_list 不为空,则把该标签对应的内容并入 entity_part_list
if len(entity_part_list) > 0:
entity_part_list.append(' ') #英文需要这一行,中文不需要这一行
entity_part_list.append(token_in_not_UNK_list[idx])
# 如果到了标签序列最后一个标签处
if idx == len(predicate_token_label_list) - 1:
entity_sort_list.append(entity_part_list)
# 如果遇到 [SEP] 分隔符,说明需要处理的标注部分已经结束
if token_label == "[SEP]":
break
entity_sort_tuple_list = self._merge_WordPiece_and_single_word(entity_sort_list)
print(entity_sort_tuple_list)
return entity_sort_tuple_list
# 生成排好序的关系列表和实体列表
def produce_relationship_and_entity_sort_list(self):
text_sentence_list, token_in_not_NUK_list, token_label_list, reference_spo_list = self.get_input_list()
for [text_sentence, token_in_not_UNK, token_label, refer_spo_str] in\
zip(text_sentence_list, token_in_not_NUK_list, token_label_list, reference_spo_list):
text = text_sentence.split("\t")[0]
text_predicate = text_sentence.split("\t")[1]
token_in = token_in_not_UNK.split("\t")[0].split(" ")
token_in_predicate = token_in_not_UNK.split("\t")[1]
assert text_predicate == token_in_predicate
token_label_out = token_label.split(" ")
entity_sort_tuple_list = self.model_token_label_2_entity_sort_tuple_list(token_in, token_label_out)
if self.Competition_Mode:
yield text, text_predicate, entity_sort_tuple_list, None
else:
if refer_spo_str is not None:
refer_spo_list = self.preprocessing_reference_spo_list(refer_spo_str)
else:
refer_spo_list = []
yield text, text_predicate, entity_sort_tuple_list, refer_spo_list
# 打印排好序的关系列表和实体列表
def show_produce_relationship_and_entity_sort_list(self):
idx = 0
for text, text_predicate, entity_sort_tuple_list, refer_spo_list in self.produce_relationship_and_entity_sort_list():
print("序号: ", idx + 1)
print("原句: ", text)
print("预测的关系: ", text_predicate)
print("预测的实体: ", entity_sort_tuple_list)
print("参考的 spo_slit:", refer_spo_list)
print("\n")
idx += 1
if idx == 100:
break
def produce_output_file(self, OUT_RESULTS_DIR=None, keep_empty_spo_list=False):
filename = "subject_predicate_object_predict_output.json"
output_dict = dict()
for text, text_predicate, entity_sort_tuple_list, refer_spo_list in self.produce_relationship_and_entity_sort_list():
object_type, subject_type = schemas_dict_relation_2_object_subject_type[text_predicate][0]
subject_list = [value for name, value in entity_sort_tuple_list if name == "SUB"]
subject_list = list(set(subject_list))
subject_list = [value for value in subject_list if len(value) >= 2]
object_list = [value for name, value in entity_sort_tuple_list if name == "OBJ"]
object_list = list(set(object_list))
object_list = [value for value in object_list if len(value) >= 2]
if len(subject_list) == 0 or len(object_list) == 0:
output_dict.setdefault(text, [])
for subject_value in subject_list:
for object_value in object_list:
output_dict.setdefault(text, []).append({"object_type": object_type, "predicate": text_predicate,
"object": object_value, "subject_type": subject_type,
"subject": subject_value})
if keep_empty_spo_list:
filename = "keep_empty_spo_list_" + filename
if OUT_RESULTS_DIR is None:
out_path = filename
else:
out_path = os.path.join(OUT_RESULTS_DIR, filename)
print("生成结果的输出路径是:\t{}".format(out_path))
if not os.path.exists(OUT_RESULTS_DIR):
os.makedirs(OUT_RESULTS_DIR)
result_json_write_f = open(out_path, "w", encoding='utf-8')
count_line_number = 0
count_empty_line_number = 0
for text, spo_list in output_dict.items():
count_line_number += 1
line_dict = dict()
line_dict["text"] = text
line_dict["spo_list"] = spo_list
line_json = json.dumps(line_dict, ensure_ascii=False)
if len(spo_list) == 0:
count_empty_line_number += 1
if keep_empty_spo_list:
result_json_write_f.write(line_json + "\n")
else:
if len(spo_list) > 0:
result_json_write_f.write(line_json + "\n")
print("empty_line: {}, line: {}, percentage: {:.2f}%".format(count_empty_line_number, count_line_number,
(count_empty_line_number / count_line_number) * 100))
if __name__=='__main__':
TEST_DATA_DIR = "bin/subject_object_labeling/sequence_labeling_data/test"
# MODEL_OUTPUT_DIR = "output/sequnce_infer_out/epochs9/ckpt20000"
MODEL_OUTPUT_DIR = None
OUT_RESULTS_DIR = "output/final_text_spo_list_result"
Competition_Mode = True
spo_list_manager = Sorted_relation_and_entity_list_Management(TEST_DATA_DIR, MODEL_OUTPUT_DIR, Competition_Mode=Competition_Mode)
spo_list_manager.produce_output_file(OUT_RESULTS_DIR=OUT_RESULTS_DIR, keep_empty_spo_list=True) | [
"os.path.exists",
"os.listdir",
"os.makedirs",
"json.dumps",
"os.path.join",
"os.path.dirname",
"os.path.getmtime"
] | [((186, 209), 'os.listdir', 'os.listdir', (['test_report'], {}), '(test_report)\n', (196, 209), False, 'import os\n'), ((340, 376), 'os.path.join', 'os.path.join', (['test_report', 'lists[-1]'], {}), '(test_report, lists[-1])\n', (352, 376), False, 'import os\n'), ((733, 777), 'os.path.join', 'os.path.join', (['input_new_epochs', 'new_ckpt_dir'], {}), '(input_new_epochs, new_ckpt_dir)\n', (745, 777), False, 'import os\n'), ((879, 907), 'os.path.exists', 'os.path.exists', (['new_ckpt_dir'], {}), '(new_ckpt_dir)\n', (893, 907), False, 'import os\n'), ((3499, 3561), 'os.path.join', 'os.path.join', (['self.TEST_DATA_DIR', '"""text_and_one_predicate.txt"""'], {}), "(self.TEST_DATA_DIR, 'text_and_one_predicate.txt')\n", (3511, 3561), False, 'import os\n'), ((3591, 3665), 'os.path.join', 'os.path.join', (['self.TEST_DATA_DIR', '"""token_in_not_UNK_and_one_predicate.txt"""'], {}), "(self.TEST_DATA_DIR, 'token_in_not_UNK_and_one_predicate.txt')\n", (3603, 3665), False, 'import os\n'), ((3708, 3774), 'os.path.join', 'os.path.join', (['self.MODEL_OUTPUT_DIR', '"""token_label_predictions.txt"""'], {}), "(self.MODEL_OUTPUT_DIR, 'token_label_predictions.txt')\n", (3720, 3774), False, 'import os\n'), ((4045, 4092), 'os.path.join', 'os.path.join', (['self.TEST_DATA_DIR', '"""spo_out.txt"""'], {}), "(self.TEST_DATA_DIR, 'spo_out.txt')\n", (4057, 4092), False, 'import os\n'), ((4108, 4141), 'os.path.exists', 'os.path.exists', (['spo_out_file_path'], {}), '(spo_out_file_path)\n', (4122, 4141), False, 'import os\n'), ((15561, 15600), 'os.path.join', 'os.path.join', (['OUT_RESULTS_DIR', 'filename'], {}), '(OUT_RESULTS_DIR, filename)\n', (15573, 15600), False, 'import os\n'), ((15666, 15697), 'os.path.exists', 'os.path.exists', (['OUT_RESULTS_DIR'], {}), '(OUT_RESULTS_DIR)\n', (15680, 15697), False, 'import os\n'), ((15711, 15739), 'os.makedirs', 'os.makedirs', (['OUT_RESULTS_DIR'], {}), '(OUT_RESULTS_DIR)\n', (15722, 15739), False, 'import os\n'), ((16097, 16138), 'json.dumps', 'json.dumps', (['line_dict'], {'ensure_ascii': '(False)'}), '(line_dict, ensure_ascii=False)\n', (16107, 16138), False, 'import json\n'), ((270, 310), 'os.path.getmtime', 'os.path.getmtime', (["(test_report + '/' + fn)"], {}), "(test_report + '/' + fn)\n", (286, 310), False, 'import os\n'), ((568, 593), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (583, 593), False, 'import os\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id: InstructionTestGen.py $
"""
Instruction Test Generator.
"""
from __future__ import print_function;
__copyright__ = \
"""
Copyright (C) 2012-2017 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
"""
__version__ = "$Revision: 118412 $";
# pylint: disable=C0103,R0913
# Standard python imports.
import io;
import os;
from optparse import OptionParser
import random;
import sys;
## @name Exit codes
## @{
RTEXITCODE_SUCCESS = 0;
RTEXITCODE_SYNTAX = 2;
## @}
## @name Various C macros we're used to.
## @{
UINT8_MAX = 0xff
UINT16_MAX = 0xffff
UINT32_MAX = 0xffffffff
UINT64_MAX = 0xffffffffffffffff
def RT_BIT_32(iBit): # pylint: disable=C0103
""" 32-bit one bit mask. """
return 1 << iBit;
def RT_BIT_64(iBit): # pylint: disable=C0103
""" 64-bit one bit mask. """
return 1 << iBit;
## @}
## @name ModR/M
## @{
X86_MODRM_RM_MASK = 0x07;
X86_MODRM_REG_MASK = 0x38;
X86_MODRM_REG_SMASK = 0x07;
X86_MODRM_REG_SHIFT = 3;
X86_MODRM_MOD_MASK = 0xc0;
X86_MODRM_MOD_SMASK = 0x03;
X86_MODRM_MOD_SHIFT = 6;
## @}
## @name SIB
## @{
X86_SIB_BASE_MASK = 0x07;
X86_SIB_INDEX_MASK = 0x38;
X86_SIB_INDEX_SMASK = 0x07;
X86_SIB_INDEX_SHIFT = 3;
X86_SIB_SCALE_MASK = 0xc0;
X86_SIB_SCALE_SMASK = 0x03;
X86_SIB_SCALE_SHIFT = 6;
## @}
## @name Prefixes
## @
X86_OP_PRF_CS = 0x2e;
X86_OP_PRF_SS = 0x36;
X86_OP_PRF_DS = 0x3e;
X86_OP_PRF_ES = 0x26;
X86_OP_PRF_FS = 0x64;
X86_OP_PRF_GS = 0x65;
X86_OP_PRF_SIZE_OP = 0x66;
X86_OP_PRF_SIZE_ADDR = 0x67;
X86_OP_PRF_LOCK = 0xf0;
X86_OP_PRF_REPNZ = 0xf2;
X86_OP_PRF_REPZ = 0xf3;
X86_OP_REX_B = 0x41;
X86_OP_REX_X = 0x42;
X86_OP_REX_R = 0x44;
X86_OP_REX_W = 0x48;
## @}
## @name General registers
## @
X86_GREG_xAX = 0
X86_GREG_xCX = 1
X86_GREG_xDX = 2
X86_GREG_xBX = 3
X86_GREG_xSP = 4
X86_GREG_xBP = 5
X86_GREG_xSI = 6
X86_GREG_xDI = 7
X86_GREG_x8 = 8
X86_GREG_x9 = 9
X86_GREG_x10 = 10
X86_GREG_x11 = 11
X86_GREG_x12 = 12
X86_GREG_x13 = 13
X86_GREG_x14 = 14
X86_GREG_x15 = 15
## @}
## @name Register names.
## @{
g_asGRegs64NoSp = ('rax', 'rcx', 'rdx', 'rbx', None, 'rbp', 'rsi', 'rdi', 'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15');
g_asGRegs64 = ('rax', 'rcx', 'rdx', 'rbx', 'rsp', 'rbp', 'rsi', 'rdi', 'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15');
g_asGRegs32NoSp = ('eax', 'ecx', 'edx', 'ebx', None, 'ebp', 'esi', 'edi',
'r8d', 'r9d', 'r10d', 'r11d', 'r12d', 'r13d', 'r14d', 'r15d');
g_asGRegs32 = ('eax', 'ecx', 'edx', 'ebx', 'esp', 'ebp', 'esi', 'edi',
'r8d', 'r9d', 'r10d', 'r11d', 'r12d', 'r13d', 'r14d', 'r15d');
g_asGRegs16NoSp = ('ax', 'cx', 'dx', 'bx', None, 'bp', 'si', 'di',
'r8w', 'r9w', 'r10w', 'r11w', 'r12w', 'r13w', 'r14w', 'r15w');
g_asGRegs16 = ('ax', 'cx', 'dx', 'bx', 'sp', 'bp', 'si', 'di',
'r8w', 'r9w', 'r10w', 'r11w', 'r12w', 'r13w', 'r14w', 'r15w');
g_asGRegs8 = ('al', 'cl', 'dl', 'bl', 'ah', 'ch', 'dh', 'bh');
g_asGRegs8Rex = ('al', 'cl', 'dl', 'bl', 'spl', 'bpl', 'sil', 'dil',
'r8b', 'r9b', 'r10b', 'r11b', 'r12b', 'r13b', 'r14b', 'r15b',
'ah', 'ch', 'dh', 'bh');
## @}
## @name EFLAGS/RFLAGS/EFLAGS
## @{
X86_EFL_CF = RT_BIT_32(0);
X86_EFL_CF_BIT = 0;
X86_EFL_1 = RT_BIT_32(1);
X86_EFL_PF = RT_BIT_32(2);
X86_EFL_AF = RT_BIT_32(4);
X86_EFL_AF_BIT = 4;
X86_EFL_ZF = RT_BIT_32(6);
X86_EFL_ZF_BIT = 6;
X86_EFL_SF = RT_BIT_32(7);
X86_EFL_SF_BIT = 7;
X86_EFL_TF = RT_BIT_32(8);
X86_EFL_IF = RT_BIT_32(9);
X86_EFL_DF = RT_BIT_32(10);
X86_EFL_OF = RT_BIT_32(11);
X86_EFL_OF_BIT = 11;
X86_EFL_IOPL = (RT_BIT_32(12) | RT_BIT_32(13));
X86_EFL_NT = RT_BIT_32(14);
X86_EFL_RF = RT_BIT_32(16);
X86_EFL_VM = RT_BIT_32(17);
X86_EFL_AC = RT_BIT_32(18);
X86_EFL_VIF = RT_BIT_32(19);
X86_EFL_VIP = RT_BIT_32(20);
X86_EFL_ID = RT_BIT_32(21);
X86_EFL_LIVE_MASK = 0x003f7fd5;
X86_EFL_RA1_MASK = RT_BIT_32(1);
X86_EFL_IOPL_SHIFT = 12;
X86_EFL_STATUS_BITS = ( X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF );
## @}
## @name Random
## @{
g_iMyRandSeed = int((os.urandom(4)).encode('hex'), 16);
#g_iMyRandSeed = 286523426;
#g_iMyRandSeed = 1994382324;
g_oMyRand = random.Random(g_iMyRandSeed);
#g_oMyRand = random.SystemRandom();
def randU8():
""" Unsigned 8-bit random number. """
return g_oMyRand.getrandbits(8);
def randU16():
""" Unsigned 16-bit random number. """
return g_oMyRand.getrandbits(16);
def randU32():
""" Unsigned 32-bit random number. """
return g_oMyRand.getrandbits(32);
def randU64():
""" Unsigned 64-bit random number. """
return g_oMyRand.getrandbits(64);
def randUxx(cBits):
""" Unsigned 8-, 16-, 32-, or 64-bit random number. """
return g_oMyRand.getrandbits(cBits);
def randSxx(cBits):
""" Signed 8-, 16-, 32-, or 64-bit random number. """
uVal = randUxx(cBits);
iRet = uVal & ((1 << (cBits - 1)) - 1);
if iRet != uVal:
iRet = -iRet;
return iRet;
def randUxxList(cBits, cElements):
""" List of unsigned 8-, 16-, 32-, or 64-bit random numbers. """
return [randUxx(cBits) for _ in range(cElements)];
## @}
## @name Instruction Emitter Helpers
## @{
def calcRexPrefixForTwoModRmRegs(iReg, iRm, bOtherRexPrefixes = 0):
"""
Calculates a rex prefix if neccessary given the two registers
and optional rex size prefixes.
Returns an empty array if not necessary.
"""
bRex = bOtherRexPrefixes;
if iReg >= 8:
bRex |= X86_OP_REX_R;
if iRm >= 8:
bRex |= X86_OP_REX_B;
if bRex == 0:
return [];
return [bRex,];
def calcModRmForTwoRegs(iReg, iRm):
"""
Calculate the RM byte for two registers.
Returns an array with one byte in it.
"""
bRm = (0x3 << X86_MODRM_MOD_SHIFT) \
| ((iReg << X86_MODRM_REG_SHIFT) & X86_MODRM_REG_MASK) \
| (iRm & X86_MODRM_RM_MASK);
return [bRm,];
## @}
## @name Misc
## @{
def convU32ToSigned(u32):
""" Converts a 32-bit unsigned value to 32-bit signed. """
if u32 < 0x80000000:
return u32;
return u32 - UINT32_MAX - 1;
def rotateLeftUxx(cBits, uVal, cShift):
""" Rotate a xx-bit wide unsigned number to the left. """
assert cShift < cBits;
if cBits == 16:
uMask = UINT16_MAX;
elif cBits == 32:
uMask = UINT32_MAX;
elif cBits == 64:
uMask = UINT64_MAX;
else:
assert cBits == 8;
uMask = UINT8_MAX;
uVal &= uMask;
uRet = (uVal << cShift) & uMask;
uRet |= (uVal >> (cBits - cShift));
return uRet;
def rotateRightUxx(cBits, uVal, cShift):
""" Rotate a xx-bit wide unsigned number to the right. """
assert cShift < cBits;
if cBits == 16:
uMask = UINT16_MAX;
elif cBits == 32:
uMask = UINT32_MAX;
elif cBits == 64:
uMask = UINT64_MAX;
else:
assert cBits == 8;
uMask = UINT8_MAX;
uVal &= uMask;
uRet = (uVal >> cShift);
uRet |= (uVal << (cBits - cShift)) & uMask;
return uRet;
def gregName(iReg, cBits, fRexByteRegs = True):
""" Gets the name of a general register by index and width. """
if cBits == 64:
return g_asGRegs64[iReg];
if cBits == 32:
return g_asGRegs32[iReg];
if cBits == 16:
return g_asGRegs16[iReg];
assert cBits == 8;
if fRexByteRegs:
return g_asGRegs8Rex[iReg];
return g_asGRegs8[iReg];
## @}
class TargetEnv(object):
"""
Target Runtime Environment.
"""
## @name CPU Modes
## @{
ksCpuMode_Real = 'real';
ksCpuMode_Protect = 'prot';
ksCpuMode_Paged = 'paged';
ksCpuMode_Long = 'long';
ksCpuMode_V86 = 'v86';
## @}
## @name Instruction set.
## @{
ksInstrSet_16 = '16';
ksInstrSet_32 = '32';
ksInstrSet_64 = '64';
## @}
def __init__(self, sName,
sInstrSet = ksInstrSet_32,
sCpuMode = ksCpuMode_Paged,
iRing = 3,
):
self.sName = sName;
self.sInstrSet = sInstrSet;
self.sCpuMode = sCpuMode;
self.iRing = iRing;
self.asGRegs = g_asGRegs64 if self.is64Bit() else g_asGRegs32;
self.asGRegsNoSp = g_asGRegs64NoSp if self.is64Bit() else g_asGRegs32NoSp;
def isUsingIprt(self):
""" Whether it's an IPRT environment or not. """
return self.sName.startswith('iprt');
def is64Bit(self):
""" Whether it's a 64-bit environment or not. """
return self.sInstrSet == self.ksInstrSet_64;
def getDefOpBits(self):
""" Get the default operand size as a bit count. """
if self.sInstrSet == self.ksInstrSet_16:
return 16;
return 32;
def getDefOpBytes(self):
""" Get the default operand size as a byte count. """
return self.getDefOpBits() / 8;
def getMaxOpBits(self):
""" Get the max operand size as a bit count. """
if self.sInstrSet == self.ksInstrSet_64:
return 64;
return 32;
def getMaxOpBytes(self):
""" Get the max operand size as a byte count. """
return self.getMaxOpBits() / 8;
def getDefAddrBits(self):
""" Get the default address size as a bit count. """
if self.sInstrSet == self.ksInstrSet_16:
return 16;
if self.sInstrSet == self.ksInstrSet_32:
return 32;
return 64;
def getDefAddrBytes(self):
""" Get the default address size as a byte count. """
return self.getDefAddrBits() / 8;
def getGRegCount(self, cbEffBytes = 4):
""" Get the number of general registers. """
if self.sInstrSet == self.ksInstrSet_64:
if cbEffBytes == 1:
return 16 + 4;
return 16;
return 8;
def randGRegNoSp(self, cbEffBytes = 4):
""" Returns a random general register number, excluding the SP register. """
iReg = randU16() % self.getGRegCount(cbEffBytes);
while iReg == X86_GREG_xSP:
iReg = randU16() % self.getGRegCount(cbEffBytes);
return iReg;
def randGRegNoSpList(self, cItems, cbEffBytes = 4):
""" List of randGRegNoSp values. """
aiRegs = [];
for _ in range(cItems):
aiRegs.append(self.randGRegNoSp(cbEffBytes));
return aiRegs;
def getAddrModes(self):
""" Gets a list of addressing mode (16, 32, or/and 64). """
if self.sInstrSet == self.ksInstrSet_16:
return [16, 32];
if self.sInstrSet == self.ksInstrSet_32:
return [32, 16];
return [64, 32];
def is8BitHighGReg(self, cbEffOp, iGReg):
""" Checks if the given register is a high 8-bit general register (AH, CH, DH or BH). """
assert cbEffOp in [1, 2, 4, 8];
if cbEffOp == 1:
if iGReg >= 16:
return True;
if iGReg >= 4 and not self.is64Bit():
return True;
return False;
def gregNameBits(self, iReg, cBits):
""" Gets the name of the given register for the specified width (bits). """
return gregName(iReg, cBits, self.is64Bit());
def gregNameBytes(self, iReg, cbWidth):
""" Gets the name of the given register for the specified with (in bytes). """
return gregName(iReg, cbWidth * 8, self.is64Bit());
## Target environments.
g_dTargetEnvs = {
'iprt-r3-32': TargetEnv('iprt-r3-32', TargetEnv.ksInstrSet_32, TargetEnv.ksCpuMode_Protect, 3),
'iprt-r3-64': TargetEnv('iprt-r3-64', TargetEnv.ksInstrSet_64, TargetEnv.ksCpuMode_Long, 3),
'bs2-r0-64': TargetEnv('bs2-r0-64', TargetEnv.ksInstrSet_64, TargetEnv.ksCpuMode_Long, 0),
'bs2-r0-64-big': TargetEnv('bs2-r0-64-big', TargetEnv.ksInstrSet_64, TargetEnv.ksCpuMode_Long, 0),
'bs2-r0-32-big': TargetEnv('bs2-r0-32-big', TargetEnv.ksInstrSet_32, TargetEnv.ksCpuMode_Protect, 0),
};
class InstrTestBase(object):
"""
Base class for testing one instruction.
"""
def __init__(self, sName, sInstr = None):
self.sName = sName;
self.sInstr = sInstr if sInstr else sName.split()[0];
def isApplicable(self, oGen):
"""
Tests if the instruction test is applicable to the selected environment.
"""
_ = oGen;
return True;
def generateTest(self, oGen, sTestFnName):
"""
Emits the test assembly code.
"""
oGen.write(';; @todo not implemented. This is for the linter: %s, %s\n' % (oGen, sTestFnName));
return True;
def generateInputs(self, cbEffOp, cbMaxOp, oGen, fLong = False):
""" Generate a list of inputs. """
if fLong:
#
# Try do extremes as well as different ranges of random numbers.
#
auRet = [0, 1, ];
if cbMaxOp >= 1:
auRet += [ UINT8_MAX / 2, UINT8_MAX / 2 + 1, UINT8_MAX ];
if cbMaxOp >= 2:
auRet += [ UINT16_MAX / 2, UINT16_MAX / 2 + 1, UINT16_MAX ];
if cbMaxOp >= 4:
auRet += [ UINT32_MAX / 2, UINT32_MAX / 2 + 1, UINT32_MAX ];
if cbMaxOp >= 8:
auRet += [ UINT64_MAX / 2, UINT64_MAX / 2 + 1, UINT64_MAX ];
if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
for cBits, cValues in ( (8, 4), (16, 4), (32, 8), (64, 8) ):
if cBits < cbMaxOp * 8:
auRet += randUxxList(cBits, cValues);
cWanted = 16;
elif oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium:
for cBits, cValues in ( (8, 8), (16, 8), (24, 2), (32, 16), (40, 1), (48, 1), (56, 1), (64, 16) ):
if cBits < cbMaxOp * 8:
auRet += randUxxList(cBits, cValues);
cWanted = 64;
else:
for cBits, cValues in ( (8, 16), (16, 16), (24, 4), (32, 64), (40, 4), (48, 4), (56, 4), (64, 64) ):
if cBits < cbMaxOp * 8:
auRet += randUxxList(cBits, cValues);
cWanted = 168;
if len(auRet) < cWanted:
auRet += randUxxList(cbEffOp * 8, cWanted - len(auRet));
else:
#
# Short list, just do some random numbers.
#
auRet = [];
if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
auRet += randUxxList(cbMaxOp, 1);
elif oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium:
auRet += randUxxList(cbMaxOp, 2);
else:
auRet = [];
for cBits in (8, 16, 32, 64):
if cBits < cbMaxOp * 8:
auRet += randUxxList(cBits, 1);
return auRet;
class InstrTest_MemOrGreg_2_Greg(InstrTestBase):
"""
Instruction reading memory or general register and writing the result to a
general register.
"""
def __init__(self, sName, fnCalcResult, sInstr = None, acbOpVars = None):
InstrTestBase.__init__(self, sName, sInstr);
self.fnCalcResult = fnCalcResult;
self.acbOpVars = [ 1, 2, 4, 8 ] if not acbOpVars else list(acbOpVars);
self.fTestRegForm = True;
self.fTestMemForm = True;
## @name Test Instruction Writers
## @{
def writeInstrGregGreg(self, cbEffOp, iOp1, iOp2, oGen):
""" Writes the instruction with two general registers as operands. """
oGen.write(' %s %s, %s\n'
% ( self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBytes(iOp2, cbEffOp),));
return True;
def writeInstrGregPureRM(self, cbEffOp, iOp1, cAddrBits, iOp2, iMod, offDisp, oGen):
""" Writes the instruction with two general registers as operands. """
oGen.write(' ');
if iOp2 == 13 and iMod == 0 and cAddrBits == 64:
oGen.write('altrexb '); # Alternative encoding for rip relative addressing.
oGen.write('%s %s, [' % (self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp),));
if (iOp2 == 5 or iOp2 == 13) and iMod == 0:
oGen.write('VBINSTST_NAME(g_u%sData)' % (cbEffOp * 8,))
if oGen.oTarget.is64Bit():
oGen.write(' wrt rip');
else:
if iMod == 1:
oGen.write('byte %d + ' % (offDisp,));
elif iMod == 2:
oGen.write('dword %d + ' % (offDisp,));
else:
assert iMod == 0;
if cAddrBits == 64:
oGen.write(g_asGRegs64[iOp2]);
elif cAddrBits == 32:
oGen.write(g_asGRegs32[iOp2]);
elif cAddrBits == 16:
assert False; ## @todo implement 16-bit addressing.
else:
assert False, str(cAddrBits);
oGen.write(']\n');
return True;
def writeInstrGregSibLabel(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
""" Writes the instruction taking a register and a label (base only w/o reg), SIB form. """
assert offDisp is None; assert iBaseReg in [5, 13]; assert iIndexReg == 4; assert cAddrBits != 16;
if cAddrBits == 64:
# Note! Cannot test this in 64-bit mode in any sensible way because the disp is 32-bit
# and we cannot (yet) make assumtions about where we're loaded.
## @todo Enable testing this in environments where we can make assumptions (boot sector).
oGen.write(' %s %s, [VBINSTST_NAME(g_u%sData) xWrtRIP]\n'
% ( self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), cbEffOp * 8,));
else:
oGen.write(' altsibx%u %s %s, [VBINSTST_NAME(g_u%sData) xWrtRIP] ; iOp1=%s cbEffOp=%s\n'
% ( iScale, self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), cbEffOp * 8, iOp1, cbEffOp));
return True;
def writeInstrGregSibScaledReg(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
""" Writes the instruction taking a register and disp+scaled register (no base reg), SIB form. """
assert iBaseReg in [5, 13]; assert iIndexReg != 4; assert cAddrBits != 16;
# Note! Using altsibxN to force scaled encoding. This is only really a
# necessity for iScale=1, but doesn't hurt for the rest.
oGen.write(' altsibx%u %s %s, [%s * %#x'
% (iScale, self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBits(iIndexReg, cAddrBits), iScale,));
if offDisp is not None:
oGen.write(' + %#x' % (offDisp,));
oGen.write(']\n');
_ = iBaseReg;
return True;
def writeInstrGregSibBase(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
""" Writes the instruction taking a register and base only (with reg), SIB form. """
oGen.write(' altsibx%u %s %s, [%s'
% (iScale, self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBits(iBaseReg, cAddrBits),));
if offDisp is not None:
oGen.write(' + %#x' % (offDisp,));
oGen.write(']\n');
_ = iIndexReg;
return True;
def writeInstrGregSibBaseAndScaledReg(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
""" Writes tinstruction taking a register and full featured SIB form address. """
# Note! From the looks of things, yasm will encode the following instructions the same way:
# mov eax, [rsi*1 + rbx]
# mov eax, [rbx + rsi*1]
# So, when there are two registers involved, the '*1' selects
# which is index and which is base.
oGen.write(' %s %s, [%s + %s * %u'
% ( self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp),
oGen.gregNameBits(iBaseReg, cAddrBits), oGen.gregNameBits(iIndexReg, cAddrBits), iScale,));
if offDisp is not None:
oGen.write(' + %#x' % (offDisp,));
oGen.write(']\n');
return True;
## @}
## @name Memory setups
## @{
def generateMemSetupReadByLabel(self, oGen, cbEffOp, uInput):
""" Sets up memory for a memory read. """
oGen.pushConst(uInput);
oGen.write(' call VBINSTST_NAME(Common_SetupMemReadU%u)\n' % (cbEffOp*8,));
return True;
def generateMemSetupReadByReg(self, oGen, cAddrBits, cbEffOp, iReg1, uInput, offDisp = None):
""" Sets up memory for a memory read indirectly addressed thru one register and optional displacement. """
oGen.pushConst(uInput);
oGen.write(' call VBINSTST_NAME(%s)\n'
% (oGen.needGRegMemSetup(cAddrBits, cbEffOp, iBaseReg = iReg1, offDisp = offDisp),));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iReg1],));
return True;
def generateMemSetupReadByScaledReg(self, oGen, cAddrBits, cbEffOp, iIndexReg, iScale, uInput, offDisp = None):
""" Sets up memory for a memory read indirectly addressed thru one register and optional displacement. """
oGen.pushConst(uInput);
oGen.write(' call VBINSTST_NAME(%s)\n'
% (oGen.needGRegMemSetup(cAddrBits, cbEffOp, offDisp = offDisp, iIndexReg = iIndexReg, iScale = iScale),));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iIndexReg],));
return True;
def generateMemSetupReadByBaseAndScaledReg(self, oGen, cAddrBits, cbEffOp, iBaseReg, iIndexReg, iScale, uInput, offDisp):
""" Sets up memory for a memory read indirectly addressed thru two registers with optional displacement. """
oGen.pushConst(uInput);
oGen.write(' call VBINSTST_NAME(%s)\n'
% (oGen.needGRegMemSetup(cAddrBits, cbEffOp, iBaseReg = iBaseReg, offDisp = offDisp,
iIndexReg = iIndexReg, iScale = iScale),));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iIndexReg],));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iBaseReg],));
return True;
def generateMemSetupPureRM(self, oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput, offDisp = None):
""" Sets up memory for a pure R/M addressed read, iOp2 being the R/M value. """
oGen.pushConst(uInput);
assert offDisp is None or iMod != 0;
if (iOp2 != 5 and iOp2 != 13) or iMod != 0:
oGen.write(' call VBINSTST_NAME(%s)\n'
% (oGen.needGRegMemSetup(cAddrBits, cbEffOp, iOp2, offDisp),));
else:
oGen.write(' call VBINSTST_NAME(Common_SetupMemReadU%u)\n' % (cbEffOp*8,));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2],));
return True;
## @}
def generateOneStdTestGregGreg(self, oGen, cbEffOp, cbMaxOp, iOp1, iOp1X, iOp2, iOp2X, uInput, uResult):
""" Generate one standard instr greg,greg test. """
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2X], uInput,));
if iOp1X != iOp2X:
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2X],));
self.writeInstrGregGreg(cbEffOp, iOp1, iOp2, oGen);
oGen.pushConst(uResult);
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1X, iOp2X if iOp1X != iOp2X else None),));
_ = cbMaxOp;
return True;
def generateOneStdTestGregGreg8BitHighPain(self, oGen, cbEffOp, cbMaxOp, iOp1, iOp2, uInput):
""" High 8-bit registers are a real pain! """
assert oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1) or oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2);
# Figure out the register indexes of the max op sized regs involved.
iOp1X = iOp1 & 3;
iOp2X = iOp2 & 3;
oGen.write(' ; iOp1=%u iOp1X=%u iOp2=%u iOp2X=%u\n' % (iOp1, iOp1X, iOp2, iOp2X,));
# Calculate unshifted result.
if iOp1X != iOp2X:
uCur = oGen.auRegValues[iOp1X];
if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1):
uCur = rotateRightUxx(cbMaxOp * 8, uCur, 8);
else:
uCur = uInput;
if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1) != oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2):
if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1):
uCur = rotateRightUxx(cbMaxOp * 8, uCur, 8);
else:
uCur = rotateLeftUxx(cbMaxOp * 8, uCur, 8);
uResult = self.fnCalcResult(cbEffOp, uInput, uCur, oGen);
# Rotate the input and/or result to match their max-op-sized registers.
if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2):
uInput = rotateLeftUxx(cbMaxOp * 8, uInput, 8);
if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1):
uResult = rotateLeftUxx(cbMaxOp * 8, uResult, 8);
# Hand it over to an overridable worker method.
return self.generateOneStdTestGregGreg(oGen, cbEffOp, cbMaxOp, iOp1, iOp1X, iOp2, iOp2X, uInput, uResult);
def generateOneStdTestGregMemNoSib(self, oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, iOp2, uInput, uResult):
""" Generate mode 0, 1 and 2 test for the R/M=iOp2. """
if cAddrBits == 16:
_ = cbMaxOp;
else:
iMod = 0; # No disp, except for i=5.
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
self.generateMemSetupPureRM(oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput);
self.writeInstrGregPureRM(cbEffOp, iOp1, cAddrBits, iOp2, iMod, None, oGen);
oGen.pushConst(uResult);
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1, iOp2),));
if iOp2 != 5 and iOp2 != 13:
iMod = 1;
for offDisp in oGen.getDispForMod(iMod):
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
self.generateMemSetupPureRM(oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput, offDisp);
self.writeInstrGregPureRM(cbEffOp, iOp1, cAddrBits, iOp2, iMod, offDisp, oGen);
oGen.pushConst(uResult);
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1, iOp2),));
iMod = 2;
for offDisp in oGen.getDispForMod(iMod):
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
self.generateMemSetupPureRM(oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput, offDisp);
self.writeInstrGregPureRM(cbEffOp, iOp1, cAddrBits, iOp2, iMod, offDisp, oGen);
oGen.pushConst(uResult);
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1, iOp2),));
return True;
def generateOneStdTestGregMemSib(self, oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, iMod, # pylint: disable=R0913
iBaseReg, iIndexReg, iScale, uInput, uResult):
""" Generate one SIB variations. """
for offDisp in oGen.getDispForMod(iMod, cbEffOp):
if ((iBaseReg == 5 or iBaseReg == 13) and iMod == 0):
if iIndexReg == 4:
if cAddrBits == 64:
continue; # skipping.
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
self.generateMemSetupReadByLabel(oGen, cbEffOp, uInput);
self.writeInstrGregSibLabel(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
sChecker = oGen.needGRegChecker(iOp1);
else:
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
self.generateMemSetupReadByScaledReg(oGen, cAddrBits, cbEffOp, iIndexReg, iScale, uInput, offDisp);
self.writeInstrGregSibScaledReg(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
sChecker = oGen.needGRegChecker(iOp1, iIndexReg);
else:
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
if iIndexReg == 4:
self.generateMemSetupReadByReg(oGen, cAddrBits, cbEffOp, iBaseReg, uInput, offDisp);
self.writeInstrGregSibBase(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
sChecker = oGen.needGRegChecker(iOp1, iBaseReg);
else:
if iIndexReg == iBaseReg and iScale == 1 and offDisp is not None and (offDisp & 1):
if offDisp < 0: offDisp += 1;
else: offDisp -= 1;
self.generateMemSetupReadByBaseAndScaledReg(oGen, cAddrBits, cbEffOp, iBaseReg,
iIndexReg, iScale, uInput, offDisp);
self.writeInstrGregSibBaseAndScaledReg(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
sChecker = oGen.needGRegChecker(iOp1, iBaseReg, iIndexReg);
oGen.pushConst(uResult);
oGen.write(' call VBINSTST_NAME(%s)\n' % (sChecker,));
_ = cbMaxOp;
return True;
def generateStdTestGregMemSib(self, oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, auInputs):
""" Generate all SIB variations for the given iOp1 (reg) value. """
assert cAddrBits in [32, 64];
i = oGen.cSibBasePerRun;
while i > 0:
oGen.iSibBaseReg = (oGen.iSibBaseReg + 1) % oGen.oTarget.getGRegCount(cAddrBits / 8);
if oGen.iSibBaseReg == X86_GREG_xSP: # no RSP testing atm.
continue;
j = oGen.getSibIndexPerRun();
while j > 0:
oGen.iSibIndexReg = (oGen.iSibIndexReg + 1) % oGen.oTarget.getGRegCount(cAddrBits / 8);
if oGen.iSibIndexReg == iOp1 and oGen.iSibIndexReg != 4 and cAddrBits != cbMaxOp:
continue; # Don't know the high bit of the address ending up the result - skip it for now.
for iMod in [0, 1, 2]:
if oGen.iSibBaseReg == iOp1 \
and ((oGen.iSibBaseReg != 5 and oGen.iSibBaseReg != 13) or iMod != 0) \
and cAddrBits != cbMaxOp:
continue; # Don't know the high bit of the address ending up the result - skip it for now.
for _ in oGen.oSibScaleRange:
oGen.iSibScale *= 2;
if oGen.iSibScale > 8:
oGen.iSibScale = 1;
for uInput in auInputs:
oGen.newSubTest();
uResult = self.fnCalcResult(cbEffOp, uInput, oGen.auRegValues[iOp1], oGen);
self.generateOneStdTestGregMemSib(oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, iMod,
oGen.iSibBaseReg, oGen.iSibIndexReg, oGen.iSibScale,
uInput, uResult);
j -= 1;
i -= 1;
return True;
def generateStandardTests(self, oGen):
""" Generate standard tests. """
# Parameters.
cbDefOp = oGen.oTarget.getDefOpBytes();
cbMaxOp = oGen.oTarget.getMaxOpBytes();
auShortInputs = self.generateInputs(cbDefOp, cbMaxOp, oGen);
auLongInputs = self.generateInputs(cbDefOp, cbMaxOp, oGen, fLong = True);
iLongOp1 = oGen.oTarget.randGRegNoSp();
iLongOp2 = oGen.oTarget.randGRegNoSp();
# Register tests
if self.fTestRegForm:
for cbEffOp in self.acbOpVars:
if cbEffOp > cbMaxOp:
continue;
oOp2Range = range(oGen.oTarget.getGRegCount(cbEffOp));
if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
oOp2Range = [iLongOp2,];
oGen.write('; cbEffOp=%u\n' % (cbEffOp,));
for iOp1 in range(oGen.oTarget.getGRegCount(cbEffOp)):
if iOp1 == X86_GREG_xSP:
continue; # Cannot test xSP atm.
for iOp2 in oOp2Range:
if (iOp2 >= 16 and iOp1 in range(4, 16)) \
or (iOp1 >= 16 and iOp2 in range(4, 16)):
continue; # Any REX encoding turns AH,CH,DH,BH regs into SPL,BPL,SIL,DIL.
if iOp2 == X86_GREG_xSP:
continue; # Cannot test xSP atm.
oGen.write('; iOp2=%u cbEffOp=%u\n' % (iOp2, cbEffOp));
for uInput in (auLongInputs if iOp1 == iLongOp1 and iOp2 == iLongOp2 else auShortInputs):
oGen.newSubTest();
if not oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1) and not oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2):
uCur = oGen.auRegValues[iOp1 & 15] if iOp1 != iOp2 else uInput;
uResult = self.fnCalcResult(cbEffOp, uInput, uCur, oGen);
self.generateOneStdTestGregGreg(oGen, cbEffOp, cbMaxOp, iOp1, iOp1 & 15, iOp2, iOp2 & 15,
uInput, uResult);
else:
self.generateOneStdTestGregGreg8BitHighPain(oGen, cbEffOp, cbMaxOp, iOp1, iOp2, uInput);
# Memory test.
if self.fTestMemForm:
for cAddrBits in oGen.oTarget.getAddrModes():
for cbEffOp in self.acbOpVars:
if cbEffOp > cbMaxOp:
continue;
for _ in oGen.getModRegRange(cbEffOp):
oGen.iModReg = (oGen.iModReg + 1) % oGen.oTarget.getGRegCount(cbEffOp);
if oGen.iModReg == X86_GREG_xSP:
continue; # Cannot test xSP atm.
if oGen.iModReg > 15:
continue; ## TODO AH,CH,DH,BH
auInputs = auLongInputs if oGen.iModReg == iLongOp1 else auShortInputs;
for _ in oGen.oModRmRange:
oGen.iModRm = (oGen.iModRm + 1) % oGen.oTarget.getGRegCount(cAddrBits * 8);
if oGen.iModRm != 4 or cAddrBits == 16:
for uInput in auInputs:
oGen.newSubTest();
if oGen.iModReg == oGen.iModRm and oGen.iModRm != 5 \
and oGen.iModRm != 13 and cbEffOp != cbMaxOp:
continue; # Don't know the high bit of the address ending up the result - skip it for now.
uResult = self.fnCalcResult(cbEffOp, uInput, oGen.auRegValues[oGen.iModReg & 15], oGen);
self.generateOneStdTestGregMemNoSib(oGen, cAddrBits, cbEffOp, cbMaxOp,
oGen.iModReg, oGen.iModRm, uInput, uResult);
else:
# SIB - currently only short list of inputs or things may get seriously out of hand.
self.generateStdTestGregMemSib(oGen, cAddrBits, cbEffOp, cbMaxOp, oGen.iModReg, auShortInputs);
return True;
def generateTest(self, oGen, sTestFnName):
oGen.write('VBINSTST_BEGINPROC %s\n' % (sTestFnName,));
self.generateStandardTests(oGen);
oGen.write(' ret\n');
oGen.write('VBINSTST_ENDPROC %s\n' % (sTestFnName,));
return True;
class InstrTest_Mov_Gv_Ev(InstrTest_MemOrGreg_2_Greg):
"""
Tests MOV Gv,Ev.
"""
def __init__(self):
InstrTest_MemOrGreg_2_Greg.__init__(self, 'mov Gv,Ev', self.calc_mov);
@staticmethod
def calc_mov(cbEffOp, uInput, uCur, oGen):
""" Calculates the result of a mov instruction."""
if cbEffOp == 8:
return uInput & UINT64_MAX;
if cbEffOp == 4:
return uInput & UINT32_MAX;
if cbEffOp == 2:
return (uCur & 0xffffffffffff0000) | (uInput & UINT16_MAX);
assert cbEffOp == 1; _ = oGen;
return (uCur & 0xffffffffffffff00) | (uInput & UINT8_MAX);
class InstrTest_MovSxD_Gv_Ev(InstrTest_MemOrGreg_2_Greg):
"""
Tests MOVSXD Gv,Ev.
"""
def __init__(self):
InstrTest_MemOrGreg_2_Greg.__init__(self, 'movsxd Gv,Ev', self.calc_movsxd, acbOpVars = [ 8, 4, 2, ]);
self.fTestMemForm = False; # drop this...
def writeInstrGregGreg(self, cbEffOp, iOp1, iOp2, oGen):
""" Writes the instruction with two general registers as operands. """
if cbEffOp == 8:
oGen.write(' movsxd %s, %s\n'
% ( oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBytes(iOp2, cbEffOp / 2),));
else:
oGen.write(' oddmovsxd %s, %s\n'
% ( oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBytes(iOp2, cbEffOp),));
return True;
def isApplicable(self, oGen):
return oGen.oTarget.is64Bit();
@staticmethod
def calc_movsxd(cbEffOp, uInput, uCur, oGen):
"""
Calculates the result of a movxsd instruction.
Returns the result value (cbMaxOp sized).
"""
_ = oGen;
if cbEffOp == 8 and (uInput & RT_BIT_32(31)):
return (UINT32_MAX << 32) | (uInput & UINT32_MAX);
if cbEffOp == 2:
return (uCur & 0xffffffffffff0000) | (uInput & 0xffff);
return uInput & UINT32_MAX;
class InstrTest_DivIDiv(InstrTestBase):
"""
Tests IDIV and DIV instructions.
"""
def __init__(self, fIsIDiv):
if not fIsIDiv:
InstrTestBase.__init__(self, 'div Gv,Ev', 'div');
else:
InstrTestBase.__init__(self, 'idiv Gv,Ev', 'idiv');
self.fIsIDiv = fIsIDiv;
def generateInputEdgeCases(self, cbEffOp, fLong, fXcpt):
""" Generate edge case inputs for cbEffOp. Returns a list of pairs, dividen + divisor. """
# Test params.
uStep = 1 << (cbEffOp * 8);
if self.fIsIDiv:
uStep /= 2;
# edge tests
auRet = [];
uDivisor = 1 if fLong else 3;
uDividend = uStep * uDivisor - 1;
for i in range(5 if fLong else 3):
auRet.append([uDividend + fXcpt, uDivisor]);
if self.fIsIDiv:
auRet.append([-uDividend - fXcpt, -uDivisor]);
auRet.append([-(uDividend + uDivisor + fXcpt), uDivisor]);
auRet.append([ (uDividend + uDivisor + fXcpt), -uDivisor]);
if i <= 3 and fLong:
auRet.append([uDividend - 1 + fXcpt*3, uDivisor]);
if self.fIsIDiv:
auRet.append([-(uDividend - 1 + fXcpt*3), -uDivisor]);
uDivisor += 1;
uDividend += uStep;
uDivisor = uStep - 1;
uDividend = uStep * uDivisor - 1;
for _ in range(3 if fLong else 1):
auRet.append([uDividend + fXcpt, uDivisor]);
if self.fIsIDiv:
auRet.append([-uDividend - fXcpt, -uDivisor]);
uDivisor -= 1;
uDividend -= uStep;
if self.fIsIDiv:
uDivisor = -uStep;
for _ in range(3 if fLong else 1):
auRet.append([uDivisor * (-uStep - 1) - (not fXcpt), uDivisor]);
uDivisor += 1
uDivisor = uStep - 1;
for _ in range(3 if fLong else 1):
auRet.append([-(uDivisor * (uStep + 1) - (not fXcpt)), uDivisor]);
uDivisor -= 1
return auRet;
def generateInputsNoXcpt(self, cbEffOp, fLong = False):
""" Generate inputs for cbEffOp. Returns a list of pairs, dividen + divisor. """
# Test params.
uStep = 1 << (cbEffOp * 8);
if self.fIsIDiv:
uStep /= 2;
# edge tests
auRet = self.generateInputEdgeCases(cbEffOp, fLong, False)
# random tests.
if self.fIsIDiv:
for _ in range(6 if fLong else 2):
while True:
uDivisor = randSxx(cbEffOp * 8);
if uDivisor == 0 or uDivisor >= uStep or uDivisor < -uStep:
continue;
uDividend = randSxx(cbEffOp * 16);
uResult = uDividend / uDivisor;
if uResult >= uStep or uResult <= -uStep: # exclude difficulties
continue;
break;
auRet.append([uDividend, uDivisor]);
else:
for _ in range(6 if fLong else 2):
while True:
uDivisor = randUxx(cbEffOp * 8);
if uDivisor == 0 or uDivisor >= uStep:
continue;
uDividend = randUxx(cbEffOp * 16);
uResult = uDividend / uDivisor;
if uResult >= uStep:
continue;
break;
auRet.append([uDividend, uDivisor]);
return auRet;
def generateOneStdTestGreg(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
""" Generate code of one '[I]DIV rDX:rAX,<GREG>' test. """
cbMaxOp = oGen.oTarget.getMaxOpBytes();
fEffOp = ((1 << (cbEffOp *8) ) - 1);
fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
fTopOp = fMaxOp - fEffOp;
fFullOp1 = ((1 << (cbEffOp*16)) - 1);
uAX = iDividend & fFullOp1; # full with unsigned
uDX = uAX >> (cbEffOp*8);
uAX &= fEffOp;
uOp2Val = iDivisor & fEffOp;
iQuotient = iDividend / iDivisor;
iReminder = iDividend % iDivisor;
if iReminder != 0 and iQuotient < 0: # python has different rounding rules for negative division.
iQuotient += 1;
iReminder -= iDivisor;
uAXResult = iQuotient & fEffOp;
uDXResult = iReminder & fEffOp;
if cbEffOp < cbMaxOp:
uAX |= randUxx(cbMaxOp * 8) & fTopOp;
uDX |= randUxx(cbMaxOp * 8) & fTopOp;
uOp2Val |= randUxx(cbMaxOp * 8) & fTopOp;
if cbEffOp < 4:
uAXResult |= uAX & fTopOp;
uDXResult |= uDX & fTopOp;
oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
' ; iQuotient=%#x (%d) iReminder=%#x (%d)\n'
% ( iDividend & fFullOp1, iDividend, iDivisor & fEffOp, iDivisor,
iQuotient & fEffOp, iQuotient, iReminder & fEffOp, iReminder, ));
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xDX], uDX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2], uOp2Val,));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2],));
oGen.pushConst(uDXResult);
oGen.pushConst(uAXResult);
oGen.write(' %-4s %s\n' % (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, X86_GREG_xDX, iOp2),));
return True;
def generateOneStdTestGreg8Bit(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
""" Generate code of one '[I]DIV AX,<GREG>' test (8-bit). """
cbMaxOp = oGen.oTarget.getMaxOpBytes();
fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
iOp2X = (iOp2 & 3) if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2) else iOp2;
assert iOp2X != X86_GREG_xAX;
uAX = iDividend & UINT16_MAX; # full with unsigned
uOp2Val = iDivisor & UINT8_MAX;
iQuotient = iDividend / iDivisor;
iReminder = iDividend % iDivisor;
if iReminder != 0 and iQuotient < 0: # python has different rounding rules for negative division.
iQuotient += 1;
iReminder -= iDivisor;
uAXResult = (iQuotient & UINT8_MAX) | ((iReminder & UINT8_MAX) << 8);
uAX |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT16_MAX);
uAXResult |= uAX & (fMaxOp - UINT16_MAX);
uOp2Val |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT8_MAX);
if iOp2X != iOp2:
uOp2Val = rotateLeftUxx(cbMaxOp * 8, uOp2Val, 8);
oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
' ; iQuotient=%#x (%d) iReminder=%#x (%d)\n'
% ( iDividend & UINT16_MAX, iDividend, iDivisor & UINT8_MAX, iDivisor,
iQuotient & UINT8_MAX, iQuotient, iReminder & UINT8_MAX, iReminder, ));
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2X], uOp2Val,));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2X],));
oGen.pushConst(uAXResult);
oGen.write(' %-4s %s\n' % (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, iOp2X),));
return;
def generateStandardTests(self, oGen):
""" Generates test that causes no exceptions. """
# Parameters.
iLongOp2 = oGen.oTarget.randGRegNoSp();
# Register tests
if True:
for cbEffOp in ( 8, 4, 2, 1 ):
if cbEffOp > oGen.oTarget.getMaxOpBytes():
continue;
oGen.write('; cbEffOp=%u\n' % (cbEffOp,));
oOp2Range = range(oGen.oTarget.getGRegCount(cbEffOp));
if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
oOp2Range = [iLongOp2,];
for iOp2 in oOp2Range:
if iOp2 == X86_GREG_xSP:
continue; # Cannot test xSP atm.
if iOp2 == X86_GREG_xAX or (cbEffOp > 1 and iOp2 == X86_GREG_xDX):
continue; # Will overflow or be too complicated to get right.
if cbEffOp == 1 and iOp2 == (16 if oGen.oTarget.is64Bit() else 4):
continue; # Avoid dividing by AH, same reasons as above.
for iDividend, iDivisor in self.generateInputsNoXcpt(cbEffOp, iOp2 == iLongOp2):
oGen.newSubTest();
if cbEffOp > 1:
self.generateOneStdTestGreg(oGen, cbEffOp, iOp2, iDividend, iDivisor);
else:
self.generateOneStdTestGreg8Bit(oGen, cbEffOp, iOp2, iDividend, iDivisor);
## Memory test.
#if False:
# for cAddrBits in oGen.oTarget.getAddrModes():
# for cbEffOp in self.acbOpVars:
# if cbEffOp > cbMaxOp:
# continue;
#
# auInputs = auLongInputs if oGen.iModReg == iLongOp1 else auShortInputs;
# for _ in oGen.oModRmRange:
# oGen.iModRm = (oGen.iModRm + 1) % oGen.oTarget.getGRegCount(cAddrBits * 8);
# if oGen.iModRm != 4 or cAddrBits == 16:
# for uInput in auInputs:
# oGen.newSubTest();
# if oGen.iModReg == oGen.iModRm and oGen.iModRm != 5 and oGen.iModRm != 13 and cbEffOp != cbMaxOp:
# continue; # Don't know the high bit of the address ending up the result - skip it for now.
# uResult = self.fnCalcResult(cbEffOp, uInput, oGen.auRegValues[oGen.iModReg & 15], oGen);
# self.generateOneStdTestGregMemNoSib(oGen, cAddrBits, cbEffOp, cbMaxOp,
# oGen.iModReg, oGen.iModRm, uInput, uResult);
# else:
# # SIB - currently only short list of inputs or things may get seriously out of hand.
# self.generateStdTestGregMemSib(oGen, cAddrBits, cbEffOp, cbMaxOp, oGen.iModReg, auShortInputs);
#
return True;
def generateInputsXcpt(self, cbEffOp, fLong = False):
"""
Generate inputs for cbEffOp that will overflow or underflow.
Returns a list of pairs, dividen + divisor.
"""
# Test params.
uStep = 1 << (cbEffOp * 8);
if self.fIsIDiv:
uStep /= 2;
# edge tests
auRet = self.generateInputEdgeCases(cbEffOp, fLong, True);
auRet.extend([[0, 0], [1, 0], [ uStep * uStep / 2 - 1, 0]]);
# random tests.
if self.fIsIDiv:
for _ in range(6 if fLong else 2):
while True:
uDivisor = randSxx(cbEffOp * 8);
uDividend = randSxx(cbEffOp * 16);
if uDivisor >= uStep or uDivisor < -uStep:
continue;
if uDivisor != 0:
uResult = uDividend / uDivisor;
if (uResult <= uStep and uResult >= 0) or (uResult >= -uStep and uResult < 0):
continue; # exclude difficulties
break;
auRet.append([uDividend, uDivisor]);
else:
for _ in range(6 if fLong else 2):
while True:
uDivisor = randUxx(cbEffOp * 8);
uDividend = randUxx(cbEffOp * 16);
if uDivisor >= uStep:
continue;
if uDivisor != 0:
uResult = uDividend / uDivisor;
if uResult < uStep:
continue;
break;
auRet.append([uDividend, uDivisor]);
return auRet;
def generateOneDivideErrorTestGreg(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
""" Generate code of one '[I]DIV rDX:rAX,<GREG>' test that causes #DE. """
cbMaxOp = oGen.oTarget.getMaxOpBytes();
fEffOp = ((1 << (cbEffOp *8) ) - 1);
fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
fTopOp = fMaxOp - fEffOp;
fFullOp1 = ((1 << (cbEffOp*16)) - 1);
uAX = iDividend & fFullOp1; # full with unsigned
uDX = uAX >> (cbEffOp*8);
uAX &= fEffOp;
uOp2Val = iDivisor & fEffOp;
if cbEffOp < cbMaxOp:
uAX |= randUxx(cbMaxOp * 8) & fTopOp;
uDX |= randUxx(cbMaxOp * 8) & fTopOp;
uOp2Val |= randUxx(cbMaxOp * 8) & fTopOp;
oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
% ( iDividend & fFullOp1, iDividend, iDivisor & fEffOp, iDivisor,));
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xDX], uDX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2], uOp2Val,));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2],));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[X86_GREG_xDX],));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX],));
oGen.write(' VBINSTST_TRAP_INSTR X86_XCPT_DE, 0, %-4s %s\n'
% (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, X86_GREG_xDX, iOp2),));
return True;
def generateOneDivideErrorTestGreg8Bit(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
""" Generate code of one '[I]DIV AX,<GREG>' test that causes #DE (8-bit). """
if not oGen.oTarget.is64Bit() and iOp2 == 4: # Avoid AH.
iOp2 = 5;
cbMaxOp = oGen.oTarget.getMaxOpBytes();
fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
iOp2X = (iOp2 & 3) if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2) else iOp2;
assert iOp2X != X86_GREG_xAX;
uAX = iDividend & UINT16_MAX; # full with unsigned
uOp2Val = iDivisor & UINT8_MAX;
uAX |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT16_MAX);
uOp2Val |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT8_MAX);
if iOp2X != iOp2:
uOp2Val = rotateLeftUxx(cbMaxOp * 8, uOp2Val, 8);
oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
% ( iDividend & UINT16_MAX, iDividend, iDivisor & UINT8_MAX, iDivisor,));
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2X], uOp2Val,));
oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2X],));
oGen.write(' push sAX\n');
oGen.write(' VBINSTST_TRAP_INSTR X86_XCPT_DE, 0, %-4s %s\n'
% (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, iOp2X),));
return;
def generateDivideErrorTests(self, oGen):
""" Generate divide error tests (raises X86_XCPT_DE). """
oGen.write('%ifdef VBINSTST_CAN_DO_TRAPS\n');
# We do one register variation here, assuming the standard test has got them covered.
# Register tests
if True:
iOp2 = oGen.oTarget.randGRegNoSp();
while iOp2 == X86_GREG_xAX or iOp2 == X86_GREG_xDX:
iOp2 = oGen.oTarget.randGRegNoSp();
for cbEffOp in ( 8, 4, 2, 1 ):
if cbEffOp > oGen.oTarget.getMaxOpBytes():
continue;
oGen.write('; cbEffOp=%u iOp2=%u\n' % (cbEffOp, iOp2,));
for iDividend, iDivisor in self.generateInputsXcpt(cbEffOp, fLong = not oGen.isTiny()):
oGen.newSubTest();
if cbEffOp > 1:
self.generateOneDivideErrorTestGreg(oGen, cbEffOp, iOp2, iDividend, iDivisor);
else:
self.generateOneDivideErrorTestGreg8Bit(oGen, cbEffOp, iOp2, iDividend, iDivisor);
oGen.write('%endif ; VBINSTST_CAN_DO_TRAPS\n');
return True;
def generateTest(self, oGen, sTestFnName):
oGen.write('VBINSTST_BEGINPROC %s\n' % (sTestFnName,));
#oGen.write(' int3\n');
self.generateStandardTests(oGen);
self.generateDivideErrorTests(oGen);
#oGen.write(' int3\n');
oGen.write(' ret\n');
oGen.write('VBINSTST_ENDPROC %s\n' % (sTestFnName,));
return True;
class InstrTest_DaaDas(InstrTestBase):
""" Tests the DAA and DAS instructions. """
def __init__(self, fIsDas):
InstrTestBase.__init__(self, 'das' if fIsDas else 'daa');
self.fIsDas = fIsDas;
def isApplicable(self, oGen):
return not oGen.oTarget.is64Bit();
def generateTest(self, oGen, sTestFnName):
if self.fIsDas: from itgTableDas import g_aItgDasResults as aItgResults;
else: from itgTableDaa import g_aItgDaaResults as aItgResults;
cMax = len(aItgResults);
if oGen.isTiny():
cMax = 64;
oGen.write('VBINSTST_BEGINPROC %s\n' % (sTestFnName,));
oGen.write(' xor ebx, ebx\n');
oGen.write('.das_loop:\n');
# Save the loop variable so we can load known values.
oGen.write(' push ebx\n');
oGen.newSubTestEx('ebx');
# Push the results.
oGen.write(' movzx eax, byte [.abAlResults + ebx]\n');
oGen.write(' or eax, %#x\n' % (oGen.au32Regs[X86_GREG_xAX] & ~0xff,));
oGen.write(' push eax\n');
oGen.write(' movzx eax, byte [.aFlagsResults + ebx]\n');
oGen.write(' push eax\n');
# Calc and push the inputs.
oGen.write(' mov eax, ebx\n');
oGen.write(' shr eax, 2\n');
oGen.write(' and eax, 0ffh\n');
oGen.write(' or eax, %#x\n' % (oGen.au32Regs[X86_GREG_xAX] & ~0xff,));
oGen.write(' push eax\n');
oGen.write(' pushfd\n')
oGen.write(' and dword [xSP], ~(X86_EFL_CF | X86_EFL_AF)\n');
oGen.write(' mov al, bl\n');
oGen.write(' and al, 2\n');
oGen.write(' shl al, X86_EFL_AF_BIT - 1\n');
oGen.write(' or [xSP], al\n');
oGen.write(' mov al, bl\n');
oGen.write(' and al, X86_EFL_CF\n');
oGen.write(' or [xSP], al\n');
# Load register values and do the test.
oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
oGen.write(' popfd\n');
oGen.write(' pop eax\n');
if self.fIsDas:
oGen.write(' das\n');
else:
oGen.write(' daa\n');
# Verify the results.
fFlagsToCheck = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_ZF;
oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needFlagsGRegChecker(fFlagsToCheck, X86_GREG_xAX),));
# Restore the loop variable and advance.
oGen.write(' pop ebx\n');
oGen.write(' inc ebx\n');
oGen.write(' cmp ebx, %#x\n' % (cMax,));
oGen.write(' jb .das_loop\n');
oGen.write(' ret\n');
oGen.write('.abAlResults:\n');
for i in range(cMax):
oGen.write(' db %#x\n' % (aItgResults[i][0],));
oGen.write('.aFlagsResults:\n');
for i in range(cMax):
oGen.write(' db %#x\n' % (aItgResults[i][1],));
oGen.write('VBINSTST_ENDPROC %s\n' % (sTestFnName,));
return True;
##
# Instruction Tests.
#
g_aoInstructionTests = [
InstrTest_Mov_Gv_Ev(),
InstrTest_MovSxD_Gv_Ev(),
InstrTest_DivIDiv(fIsIDiv = False),
InstrTest_DivIDiv(fIsIDiv = True),
InstrTest_DaaDas(fIsDas = False),
InstrTest_DaaDas(fIsDas = True),
];
class InstructionTestGen(object): # pylint: disable=R0902
"""
Instruction Test Generator.
"""
## @name Test size
## @{
ksTestSize_Large = 'large';
ksTestSize_Medium = 'medium';
ksTestSize_Tiny = 'tiny';
## @}
kasTestSizes = ( ksTestSize_Large, ksTestSize_Medium, ksTestSize_Tiny );
## The prefix for the checker functions.
ksCheckerPrefix = 'Common_Check_'
def __init__(self, oOptions):
self.oOptions = oOptions;
self.oTarget = g_dTargetEnvs[oOptions.sTargetEnv];
# Calculate the number of output files.
self.cFiles = 1;
if len(g_aoInstructionTests) > self.oOptions.cInstrPerFile:
self.cFiles = len(g_aoInstructionTests) / self.oOptions.cInstrPerFile;
if self.cFiles * self.oOptions.cInstrPerFile < len(g_aoInstructionTests):
self.cFiles += 1;
# Fix the known register values.
self.au64Regs = randUxxList(64, 16);
self.au32Regs = [(self.au64Regs[i] & UINT32_MAX) for i in range(8)];
self.au16Regs = [(self.au64Regs[i] & UINT16_MAX) for i in range(8)];
self.auRegValues = self.au64Regs if self.oTarget.is64Bit() else self.au32Regs;
# Declare state variables used while generating.
self.oFile = sys.stderr;
self.iFile = -1;
self.sFile = '';
self._dCheckFns = dict();
self._dMemSetupFns = dict();
self._d64BitConsts = dict();
# State variables used while generating test convenientely placed here (lazy bird)...
self.iModReg = 0;
self.iModRm = 0;
self.iSibBaseReg = 0;
self.iSibIndexReg = 0;
self.iSibScale = 1;
if self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
self._oModRegRange = range(2);
self._oModRegRange8 = range(2);
self.oModRmRange = range(2);
self.cSibBasePerRun = 1;
self._cSibIndexPerRun = 2;
self.oSibScaleRange = range(1);
elif self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium:
self._oModRegRange = range( 5 if self.oTarget.is64Bit() else 4);
self._oModRegRange8 = range( 6 if self.oTarget.is64Bit() else 4);
self.oModRmRange = range(5);
self.cSibBasePerRun = 5;
self._cSibIndexPerRun = 4
self.oSibScaleRange = range(2);
else:
self._oModRegRange = range(16 if self.oTarget.is64Bit() else 8);
self._oModRegRange8 = range(20 if self.oTarget.is64Bit() else 8);
self.oModRmRange = range(16 if self.oTarget.is64Bit() else 8);
self.cSibBasePerRun = 8;
self._cSibIndexPerRun = 9;
self.oSibScaleRange = range(4);
self.iSibIndexRange = 0;
#
# Methods used by instruction tests.
#
def write(self, sText):
""" Writes to the current output file. """
return self.oFile.write(unicode(sText));
def writeln(self, sText):
""" Writes a line to the current output file. """
self.write(sText);
return self.write('\n');
def writeInstrBytes(self, abInstr):
"""
Emits an instruction given as a sequence of bytes values.
"""
self.write(' db %#04x' % (abInstr[0],));
for i in range(1, len(abInstr)):
self.write(', %#04x' % (abInstr[i],));
return self.write('\n');
def newSubTest(self):
"""
Indicates that a new subtest has started.
"""
self.write(' mov dword [VBINSTST_NAME(g_uVBInsTstSubTestIndicator) xWrtRIP], __LINE__\n');
return True;
def newSubTestEx(self, sIndicator):
"""
Indicates that a new subtest has started.
"""
self.write(' mov dword [VBINSTST_NAME(g_uVBInsTstSubTestIndicator) xWrtRIP], %s\n' % (sIndicator, ));
return True;
def needGRegChecker(self, iReg1, iReg2 = None, iReg3 = None):
"""
Records the need for a given register checker function, returning its label.
"""
if iReg2 is not None:
if iReg3 is not None:
sName = '%s_%s_%s' % (self.oTarget.asGRegs[iReg1], self.oTarget.asGRegs[iReg2], self.oTarget.asGRegs[iReg3],);
else:
sName = '%s_%s' % (self.oTarget.asGRegs[iReg1], self.oTarget.asGRegs[iReg2],);
else:
sName = '%s' % (self.oTarget.asGRegs[iReg1],);
assert iReg3 is None;
if sName in self._dCheckFns:
self._dCheckFns[sName] += 1;
else:
self._dCheckFns[sName] = 1;
return self.ksCheckerPrefix + sName;
def needFlagsGRegChecker(self, fFlagsToCheck, iReg1, iReg2 = None, iReg3 = None):
"""
Records the need for a given rFLAGS + register checker function, returning its label.
"""
sWorkerName = self.needGRegChecker(iReg1, iReg2, iReg3);
sName = 'eflags_%#x_%s' % (fFlagsToCheck, sWorkerName[len(self.ksCheckerPrefix):]);
if sName in self._dCheckFns:
self._dCheckFns[sName] += 1;
else:
self._dCheckFns[sName] = 1;
return self.ksCheckerPrefix + sName;
def needGRegMemSetup(self, cAddrBits, cbEffOp, iBaseReg = None, offDisp = None, iIndexReg = None, iScale = 1):
"""
Records the need for a given register checker function, returning its label.
"""
assert cAddrBits in [64, 32, 16];
assert cbEffOp in [8, 4, 2, 1];
assert iScale in [1, 2, 4, 8];
sName = '%ubit_U%u' % (cAddrBits, cbEffOp * 8,);
if iBaseReg is not None:
sName += '_%s' % (gregName(iBaseReg, cAddrBits),);
sName += '_x%u' % (iScale,);
if iIndexReg is not None:
sName += '_%s' % (gregName(iIndexReg, cAddrBits),);
if offDisp is not None:
sName += '_%#010x' % (offDisp & UINT32_MAX, );
if sName in self._dMemSetupFns:
self._dMemSetupFns[sName] += 1;
else:
self._dMemSetupFns[sName] = 1;
return 'Common_MemSetup_' + sName;
def need64BitConstant(self, uVal):
"""
Records the need for a 64-bit constant, returning its label.
These constants are pooled to attempt reduce the size of the whole thing.
"""
assert uVal >= 0 and uVal <= UINT64_MAX;
if uVal in self._d64BitConsts:
self._d64BitConsts[uVal] += 1;
else:
self._d64BitConsts[uVal] = 1;
return 'g_u64Const_0x%016x' % (uVal, );
def pushConst(self, uResult):
"""
Emits a push constant value, taking care of high values on 64-bit hosts.
"""
if self.oTarget.is64Bit() and uResult >= 0x80000000:
self.write(' push qword [%s wrt rip]\n' % (self.need64BitConstant(uResult),));
else:
self.write(' push dword 0x%x\n' % (uResult,));
return True;
def getDispForMod(self, iMod, cbAlignment = 1):
"""
Get a set of address dispositions for a given addressing mode.
The alignment restriction is for SIB scaling.
"""
assert cbAlignment in [1, 2, 4, 8];
if iMod == 0:
aoffDisp = [ None, ];
elif iMod == 1:
aoffDisp = [ 127 & ~(cbAlignment - 1), -128 ];
elif iMod == 2:
aoffDisp = [ 2147483647 & ~(cbAlignment - 1), -2147483648 ];
else: assert False;
return aoffDisp;
def getModRegRange(self, cbEffOp):
"""
The Mod R/M register range varies with the effective operand size, for
8-bit registers we have 4 more.
"""
if cbEffOp == 1:
return self._oModRegRange8;
return self._oModRegRange;
def getSibIndexPerRun(self):
"""
We vary the SIB index test range a little to try cover more operand
combinations and avoid repeating the same ones.
"""
self.iSibIndexRange += 1;
self.iSibIndexRange %= 3;
if self.iSibIndexRange == 0:
return self._cSibIndexPerRun - 1;
return self._cSibIndexPerRun;
def isTiny(self):
""" Checks if we're in tiny mode."""
return self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny;
def isMedium(self):
""" Checks if we're in medium mode."""
return self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium;
#
# Forwarding calls for oTarget to shorted typing and lessen the attacks
# on the right margin.
#
def gregNameBits(self, iReg, cBitsWide):
""" Target: Get the name of a general register for the given size (in bits). """
return self.oTarget.gregNameBits(iReg, cBitsWide);
def gregNameBytes(self, iReg, cbWide):
""" Target: Get the name of a general register for the given size (in bytes). """
return self.oTarget.gregNameBytes(iReg, cbWide);
def is64Bit(self):
""" Target: Is the target 64-bit? """
return self.oTarget.is64Bit();
#
# Internal machinery.
#
def _randInitIndexes(self):
"""
Initializes the Mod R/M and SIB state index with random numbers prior
to generating a test.
Note! As with all other randomness and variations we do, we cannot
test all combinations for each and every instruction so we try
get coverage over time.
"""
self.iModReg = randU8();
self.iModRm = randU8();
self.iSibBaseReg = randU8();
self.iSibIndexReg = randU8();
self.iSibScale = 1 << (randU8() & 3);
self.iSibIndexRange = randU8();
return True;
def _calcTestFunctionName(self, oInstrTest, iInstrTest):
"""
Calc a test function name for the given instruction test.
"""
sName = 'TestInstr%03u_%s' % (iInstrTest, oInstrTest.sName);
return sName.replace(',', '_').replace(' ', '_').replace('%', '_');
def _generateFileHeader(self, ):
"""
Writes the file header.
Raises exception on trouble.
"""
self.write('; $Id: InstructionTestGen.py $\n'
';; @file %s\n'
'; Autogenerate by %s %s. DO NOT EDIT\n'
';\n'
'\n'
';\n'
'; Headers\n'
';\n'
'%%include "env-%s.mac"\n'
% ( os.path.basename(self.sFile),
os.path.basename(__file__), __version__[11:-1],
self.oTarget.sName,
) );
# Target environment specific init stuff.
#
# Global variables.
#
self.write('\n\n'
';\n'
'; Globals\n'
';\n');
self.write('VBINSTST_BEGINDATA\n'
'VBINSTST_GLOBALNAME_EX g_pvLow16Mem4K, data hidden\n'
' dq 0\n'
'VBINSTST_GLOBALNAME_EX g_pvLow32Mem4K, data hidden\n'
' dq 0\n'
'VBINSTST_GLOBALNAME_EX g_pvMem4K, data hidden\n'
' dq 0\n'
'VBINSTST_GLOBALNAME_EX g_uVBInsTstSubTestIndicator, data hidden\n'
' dd 0\n'
'%ifdef VBINSTST_CAN_DO_TRAPS\n'
'VBINSTST_TRAP_RECS_BEGIN\n'
'%endif\n'
'VBINSTST_BEGINCODE\n'
);
self.write('%ifdef RT_ARCH_AMD64\n');
for i in range(len(g_asGRegs64)):
self.write('g_u64KnownValue_%s: dq 0x%x\n' % (g_asGRegs64[i], self.au64Regs[i]));
self.write('%endif\n\n')
#
# Common functions.
#
# Loading common values.
self.write('\n\n'
'VBINSTST_BEGINPROC Common_LoadKnownValues\n'
'%ifdef RT_ARCH_AMD64\n');
for i in range(len(g_asGRegs64NoSp)):
if g_asGRegs64NoSp[i]:
self.write(' mov %s, 0x%x\n' % (g_asGRegs64NoSp[i], self.au64Regs[i],));
self.write('%else\n');
for i in range(8):
if g_asGRegs32NoSp[i]:
self.write(' mov %s, 0x%x\n' % (g_asGRegs32NoSp[i], self.au32Regs[i],));
self.write('%endif\n'
' ret\n'
'VBINSTST_ENDPROC Common_LoadKnownValues\n'
'\n');
self.write('VBINSTST_BEGINPROC Common_CheckKnownValues\n'
'%ifdef RT_ARCH_AMD64\n');
for i in range(len(g_asGRegs64NoSp)):
if g_asGRegs64NoSp[i]:
self.write(' cmp %s, [g_u64KnownValue_%s wrt rip]\n'
' je .ok_%u\n'
' push %u ; register number\n'
' push %s ; actual\n'
' push qword [g_u64KnownValue_%s wrt rip] ; expected\n'
' call VBINSTST_NAME(Common_BadValue)\n'
'.ok_%u:\n'
% ( g_asGRegs64NoSp[i], g_asGRegs64NoSp[i], i, i, g_asGRegs64NoSp[i], g_asGRegs64NoSp[i], i,));
self.write('%else\n');
for i in range(8):
if g_asGRegs32NoSp[i]:
self.write(' cmp %s, 0x%x\n'
' je .ok_%u\n'
' push %u ; register number\n'
' push %s ; actual\n'
' push dword 0x%x ; expected\n'
' call VBINSTST_NAME(Common_BadValue)\n'
'.ok_%u:\n'
% ( g_asGRegs32NoSp[i], self.au32Regs[i], i, i, g_asGRegs32NoSp[i], self.au32Regs[i], i,));
self.write('%endif\n'
' ret\n'
'VBINSTST_ENDPROC Common_CheckKnownValues\n'
'\n');
return True;
def _generateMemSetupFunctions(self): # pylint: disable=R0915
"""
Generates the memory setup functions.
"""
cDefAddrBits = self.oTarget.getDefAddrBits();
for sName in self._dMemSetupFns:
# Unpack it.
asParams = sName.split('_');
cAddrBits = int(asParams[0][:-3]); assert asParams[0][-3:] == 'bit';
cEffOpBits = int(asParams[1][1:]); assert asParams[1][0] == 'U';
if cAddrBits == 64: asAddrGRegs = g_asGRegs64;
elif cAddrBits == 32: asAddrGRegs = g_asGRegs32;
else: asAddrGRegs = g_asGRegs16;
i = 2;
iBaseReg = None;
sBaseReg = None;
if i < len(asParams) and asParams[i] in asAddrGRegs:
sBaseReg = asParams[i];
iBaseReg = asAddrGRegs.index(sBaseReg);
i += 1
assert i < len(asParams); assert asParams[i][0] == 'x';
iScale = iScale = int(asParams[i][1:]); assert iScale in [1, 2, 4, 8], '%u %s' % (iScale, sName);
i += 1;
sIndexReg = None;
iIndexReg = None;
if i < len(asParams) and asParams[i] in asAddrGRegs:
sIndexReg = asParams[i];
iIndexReg = asAddrGRegs.index(sIndexReg);
i += 1;
u32Disp = None;
if i < len(asParams) and len(asParams[i]) == 10:
u32Disp = long(asParams[i], 16);
i += 1;
assert i == len(asParams), 'i=%d len=%d len[i]=%d (%s)' % (i, len(asParams), len(asParams[i]), asParams[i],);
assert iScale == 1 or iIndexReg is not None;
# Find a temporary register.
iTmpReg1 = X86_GREG_xCX;
while iTmpReg1 in [iBaseReg, iIndexReg]:
iTmpReg1 += 1;
# Prologue.
self.write('\n\n'
'; cAddrBits=%s cEffOpBits=%s iBaseReg=%s u32Disp=%s iIndexReg=%s iScale=%s\n'
'VBINSTST_BEGINPROC Common_MemSetup_%s\n'
' MY_PUSH_FLAGS\n'
' push %s\n'
% ( cAddrBits, cEffOpBits, iBaseReg, u32Disp, iIndexReg, iScale,
sName, self.oTarget.asGRegs[iTmpReg1], ));
# Figure out what to use.
if cEffOpBits == 64:
sTmpReg1 = g_asGRegs64[iTmpReg1];
sDataVar = 'VBINSTST_NAME(g_u64Data)';
elif cEffOpBits == 32:
sTmpReg1 = g_asGRegs32[iTmpReg1];
sDataVar = 'VBINSTST_NAME(g_u32Data)';
elif cEffOpBits == 16:
sTmpReg1 = g_asGRegs16[iTmpReg1];
sDataVar = 'VBINSTST_NAME(g_u16Data)';
else:
assert cEffOpBits == 8; assert iTmpReg1 < 4;
sTmpReg1 = g_asGRegs8Rex[iTmpReg1];
sDataVar = 'VBINSTST_NAME(g_u8Data)';
# Special case: reg + reg * [2,4,8]
if iBaseReg == iIndexReg and iBaseReg is not None and iScale != 1:
iTmpReg2 = X86_GREG_xBP;
while iTmpReg2 in [iBaseReg, iIndexReg, iTmpReg1]:
iTmpReg2 += 1;
sTmpReg2 = self.gregNameBits(iTmpReg2, cAddrBits);
self.write(' push sAX\n'
' push %s\n'
' push sDX\n'
% (self.oTarget.asGRegs[iTmpReg2],));
if cAddrBits == 16:
self.write(' mov %s, [VBINSTST_NAME(g_pvLow16Mem4K) xWrtRIP]\n' % (sTmpReg2,));
else:
self.write(' mov %s, [VBINSTST_NAME(g_pvLow32Mem4K) xWrtRIP]\n' % (sTmpReg2,));
self.write(' add %s, 0x200\n' % (sTmpReg2,));
self.write(' mov %s, %s\n' % (self.gregNameBits(X86_GREG_xAX, cAddrBits), sTmpReg2,));
if u32Disp is not None:
self.write(' sub %s, %d\n'
% ( self.gregNameBits(X86_GREG_xAX, cAddrBits), convU32ToSigned(u32Disp), ));
self.write(' xor edx, edx\n'
'%if xCB == 2\n'
' push 0\n'
'%endif\n');
self.write(' push %u\n' % (iScale + 1,));
self.write(' div %s [xSP]\n' % ('qword' if cAddrBits == 64 else 'dword',));
self.write(' sub %s, %s\n' % (sTmpReg2, self.gregNameBits(X86_GREG_xDX, cAddrBits),));
self.write(' pop sDX\n'
' pop sDX\n'); # sTmpReg2 is eff address; sAX is sIndexReg value.
# Note! sTmpReg1 can be xDX and that's no problem now.
self.write(' mov %s, [xSP + sCB*3 + MY_PUSH_FLAGS_SIZE + xCB]\n' % (sTmpReg1,));
self.write(' mov [%s], %s\n' % (sTmpReg2, sTmpReg1,)); # Value in place.
self.write(' pop %s\n' % (self.oTarget.asGRegs[iTmpReg2],));
if iBaseReg == X86_GREG_xAX:
self.write(' pop %s\n' % (self.oTarget.asGRegs[iTmpReg1],));
else:
self.write(' mov %s, %s\n' % (sBaseReg, self.gregNameBits(X86_GREG_xAX, cAddrBits),));
self.write(' pop sAX\n');
else:
# Load the value and mem address, storing the value there.
# Note! ASSUMES that the scale and disposition works fine together.
sAddrReg = sBaseReg if sBaseReg is not None else sIndexReg;
self.write(' mov %s, [xSP + sCB + MY_PUSH_FLAGS_SIZE + xCB]\n' % (sTmpReg1,));
if cAddrBits >= cDefAddrBits:
self.write(' mov [%s xWrtRIP], %s\n' % (sDataVar, sTmpReg1,));
self.write(' lea %s, [%s xWrtRIP]\n' % (sAddrReg, sDataVar,));
else:
if cAddrBits == 16:
self.write(' mov %s, [VBINSTST_NAME(g_pvLow16Mem4K) xWrtRIP]\n' % (sAddrReg,));
else:
self.write(' mov %s, [VBINSTST_NAME(g_pvLow32Mem4K) xWrtRIP]\n' % (sAddrReg,));
self.write(' add %s, %s\n' % (sAddrReg, (randU16() << cEffOpBits) & 0xfff, ));
self.write(' mov [%s], %s\n' % (sAddrReg, sTmpReg1, ));
# Adjust for disposition and scaling.
if u32Disp is not None:
self.write(' sub %s, %d\n' % ( sAddrReg, convU32ToSigned(u32Disp), ));
if iIndexReg is not None:
if iBaseReg == iIndexReg:
assert iScale == 1;
assert u32Disp is None or (u32Disp & 1) == 0;
self.write(' shr %s, 1\n' % (sIndexReg,));
elif sBaseReg is not None:
uIdxRegVal = randUxx(cAddrBits);
if cAddrBits == 64:
self.write(' mov %s, %u\n'
' sub %s, %s\n'
' mov %s, %u\n'
% ( sIndexReg, (uIdxRegVal * iScale) & UINT64_MAX,
sBaseReg, sIndexReg,
sIndexReg, uIdxRegVal, ));
else:
assert cAddrBits == 32;
self.write(' mov %s, %u\n'
' sub %s, %#06x\n'
% ( sIndexReg, uIdxRegVal, sBaseReg, (uIdxRegVal * iScale) & UINT32_MAX, ));
elif iScale == 2:
assert u32Disp is None or (u32Disp & 1) == 0;
self.write(' shr %s, 1\n' % (sIndexReg,));
elif iScale == 4:
assert u32Disp is None or (u32Disp & 3) == 0;
self.write(' shr %s, 2\n' % (sIndexReg,));
elif iScale == 8:
assert u32Disp is None or (u32Disp & 7) == 0;
self.write(' shr %s, 3\n' % (sIndexReg,));
else:
assert iScale == 1;
# Set upper bits that's supposed to be unused.
if cDefAddrBits > cAddrBits or cAddrBits == 16:
if cDefAddrBits == 64:
assert cAddrBits == 32;
if iBaseReg is not None:
self.write(' mov %s, %#018x\n'
' or %s, %s\n'
% ( g_asGRegs64[iTmpReg1], randU64() & 0xffffffff00000000,
g_asGRegs64[iBaseReg], g_asGRegs64[iTmpReg1],));
if iIndexReg is not None and iIndexReg != iBaseReg:
self.write(' mov %s, %#018x\n'
' or %s, %s\n'
% ( g_asGRegs64[iTmpReg1], randU64() & 0xffffffff00000000,
g_asGRegs64[iIndexReg], g_asGRegs64[iTmpReg1],));
else:
assert cDefAddrBits == 32; assert cAddrBits == 16; assert iIndexReg is None;
if iBaseReg is not None:
self.write(' or %s, %#010x\n'
% ( g_asGRegs32[iBaseReg], randU32() & 0xffff0000, ));
# Epilogue.
self.write(' pop %s\n'
' MY_POP_FLAGS\n'
' ret sCB\n'
'VBINSTST_ENDPROC Common_MemSetup_%s\n'
% ( self.oTarget.asGRegs[iTmpReg1], sName,));
def _generateFileFooter(self):
"""
Generates file footer.
"""
# Terminate the trap records.
self.write('\n\n'
';\n'
'; Terminate the trap records\n'
';\n'
'VBINSTST_BEGINDATA\n'
'%ifdef VBINSTST_CAN_DO_TRAPS\n'
'VBINSTST_TRAP_RECS_END\n'
'%endif\n'
'VBINSTST_BEGINCODE\n');
# Register checking functions.
for sName in self._dCheckFns:
asRegs = sName.split('_');
sPushSize = 'dword';
# Do we check eflags first.
if asRegs[0] == 'eflags':
asRegs.pop(0);
sFlagsToCheck = asRegs.pop(0);
self.write('\n\n'
'; Check flags and then defers to the register-only checker\n'
'; To save space, the callee cleans up the stack.'
'; Ref count: %u\n'
'VBINSTST_BEGINPROC %s%s\n'
' MY_PUSH_FLAGS\n'
' push sAX\n'
' mov sAX, [xSP + sCB]\n'
' and sAX, %s\n'
' cmp sAX, [xSP + xCB + sCB*2]\n'
' je .equal\n'
% ( self._dCheckFns[sName], self.ksCheckerPrefix, sName,
sFlagsToCheck,));
self.write(' push dword 0xef ; register number\n'
' push sAX ; actual\n'
' mov sAX, [xSP + xCB + sCB*4]\n'
' push sAX ; expected\n'
' call VBINSTST_NAME(Common_BadValue)\n');
self.write('.equal:\n'
' mov xAX, [xSP + sCB*2]\n' # Remove the expected eflags value from the stack frame.
' mov [xSP + sCB*2 + xCB + sCB - xCB], xAX\n'
' pop sAX\n'
' MY_POP_FLAGS\n'
' lea xSP, [xSP + sCB]\n'
' jmp VBINSTST_NAME(Common_Check_%s)\n'
'VBINSTST_ENDPROC %s%s\n'
% ( '_'.join(asRegs),
self.ksCheckerPrefix, sName,) );
else:
# Prologue
self.write('\n\n'
'; Checks 1 or more register values, expected values pushed on the stack.\n'
'; To save space, the callee cleans up the stack.'
'; Ref count: %u\n'
'VBINSTST_BEGINPROC %s%s\n'
' MY_PUSH_FLAGS\n'
% ( self._dCheckFns[sName], self.ksCheckerPrefix, sName, ) );
# Register checks.
for i in range(len(asRegs)):
sReg = asRegs[i];
iReg = self.oTarget.asGRegs.index(sReg);
if i == asRegs.index(sReg): # Only check once, i.e. input = output reg.
self.write(' cmp %s, [xSP + MY_PUSH_FLAGS_SIZE + xCB + sCB * %u]\n'
' je .equal%u\n'
' push %s %u ; register number\n'
' push %s ; actual\n'
' mov %s, [xSP + sCB*2 + MY_PUSH_FLAGS_SIZE + xCB + sCB * %u]\n'
' push %s ; expected\n'
' call VBINSTST_NAME(Common_BadValue)\n'
'.equal%u:\n'
% ( sReg, i, i, sPushSize, iReg, sReg, sReg, i, sReg, i, ) );
# Restore known register values and check the other registers.
for sReg in asRegs:
if self.oTarget.is64Bit():
self.write(' mov %s, [g_u64KnownValue_%s wrt rip]\n' % (sReg, sReg,));
else:
iReg = self.oTarget.asGRegs.index(sReg)
self.write(' mov %s, 0x%x\n' % (sReg, self.au32Regs[iReg],));
self.write(' MY_POP_FLAGS\n'
' call VBINSTST_NAME(Common_CheckKnownValues)\n'
' ret sCB*%u\n'
'VBINSTST_ENDPROC %s%s\n'
% (len(asRegs), self.ksCheckerPrefix, sName,));
# memory setup functions
self._generateMemSetupFunctions();
# 64-bit constants.
if len(self._d64BitConsts) > 0:
self.write('\n\n'
';\n'
'; 64-bit constants\n'
';\n');
for uVal in self._d64BitConsts:
self.write('g_u64Const_0x%016x: dq 0x%016x ; Ref count: %d\n' % (uVal, uVal, self._d64BitConsts[uVal], ) );
return True;
def _generateTests(self):
"""
Generate the test cases.
"""
for self.iFile in range(self.cFiles):
if self.cFiles == 1:
self.sFile = '%s.asm' % (self.oOptions.sOutputBase,)
else:
self.sFile = '%s-%u.asm' % (self.oOptions.sOutputBase, self.iFile)
self.oFile = sys.stdout;
if self.oOptions.sOutputBase != '-':
self.oFile = io.open(self.sFile, 'w', buffering = 65536, encoding = 'utf-8');
self._generateFileHeader();
# Calc the range.
iInstrTestStart = self.iFile * self.oOptions.cInstrPerFile;
iInstrTestEnd = iInstrTestStart + self.oOptions.cInstrPerFile;
if iInstrTestEnd > len(g_aoInstructionTests):
iInstrTestEnd = len(g_aoInstructionTests);
# Generate the instruction tests.
for iInstrTest in range(iInstrTestStart, iInstrTestEnd):
oInstrTest = g_aoInstructionTests[iInstrTest];
if oInstrTest.isApplicable(self):
self.write('\n'
'\n'
';\n'
'; %s\n'
';\n'
% (oInstrTest.sName,));
self._randInitIndexes();
oInstrTest.generateTest(self, self._calcTestFunctionName(oInstrTest, iInstrTest));
# Generate the main function.
self.write('\n\n'
'VBINSTST_BEGINPROC TestInstrMain\n'
' MY_PUSH_ALL\n'
' sub xSP, 40h\n'
'%ifdef VBINSTST_CAN_DO_TRAPS\n'
' VBINSTST_TRAP_RECS_INSTALL\n'
'%endif\n'
'\n');
for iInstrTest in range(iInstrTestStart, iInstrTestEnd):
oInstrTest = g_aoInstructionTests[iInstrTest];
if oInstrTest.isApplicable(self):
self.write('%%ifdef ASM_CALL64_GCC\n'
' lea rdi, [.szInstr%03u wrt rip]\n'
'%%elifdef ASM_CALL64_MSC\n'
' lea rcx, [.szInstr%03u wrt rip]\n'
'%%else\n'
' mov xAX, .szInstr%03u\n'
' mov [xSP], xAX\n'
'%%endif\n'
' VBINSTST_CALL_FN_SUB_TEST\n'
' call VBINSTST_NAME(%s)\n'
% ( iInstrTest, iInstrTest, iInstrTest, self._calcTestFunctionName(oInstrTest, iInstrTest)));
self.write('\n'
'%ifdef VBINSTST_CAN_DO_TRAPS\n'
' VBINSTST_TRAP_RECS_UNINSTALL\n'
'%endif\n'
' add xSP, 40h\n'
' MY_POP_ALL\n'
' ret\n\n');
for iInstrTest in range(iInstrTestStart, iInstrTestEnd):
self.write('.szInstr%03u: db \'%s\', 0\n' % (iInstrTest, g_aoInstructionTests[iInstrTest].sName,));
self.write('VBINSTST_ENDPROC TestInstrMain\n\n');
self._generateFileFooter();
if self.oOptions.sOutputBase != '-':
self.oFile.close();
self.oFile = None;
self.sFile = '';
return RTEXITCODE_SUCCESS;
def _runMakefileMode(self):
"""
Generate a list of output files on standard output.
"""
if self.cFiles == 1:
print('%s.asm' % (self.oOptions.sOutputBase,));
else:
print(' '.join('%s-%s.asm' % (self.oOptions.sOutputBase, i) for i in range(self.cFiles)));
return RTEXITCODE_SUCCESS;
def run(self):
"""
Generates the tests or whatever is required.
"""
if self.oOptions.fMakefileMode:
return self._runMakefileMode();
sys.stderr.write('InstructionTestGen.py: Seed = %s\n' % (g_iMyRandSeed,));
return self._generateTests();
@staticmethod
def main():
"""
Main function a la C/C++. Returns exit code.
"""
#
# Parse the command line.
#
oParser = OptionParser(version = __version__[11:-1].strip());
oParser.add_option('--makefile-mode', dest = 'fMakefileMode', action = 'store_true', default = False,
help = 'Special mode for use to output a list of output files for the benefit of '
'the make program (kmk).');
oParser.add_option('--split', dest = 'cInstrPerFile', metavar = '<instr-per-file>', type = 'int', default = 9999999,
help = 'Number of instruction to test per output file.');
oParser.add_option('--output-base', dest = 'sOutputBase', metavar = '<file>', default = None,
help = 'The output file base name, no suffix please. Required.');
oParser.add_option('--target', dest = 'sTargetEnv', metavar = '<target>',
default = 'iprt-r3-32',
choices = g_dTargetEnvs.keys(),
help = 'The target environment. Choices: %s'
% (', '.join(sorted(g_dTargetEnvs.keys())),));
oParser.add_option('--test-size', dest = 'sTestSize', default = InstructionTestGen.ksTestSize_Medium,
choices = InstructionTestGen.kasTestSizes,
help = 'Selects the test size.');
(oOptions, asArgs) = oParser.parse_args();
if len(asArgs) > 0:
oParser.print_help();
return RTEXITCODE_SYNTAX
if oOptions.sOutputBase is None:
print('syntax error: Missing required option --output-base.', file = sys.stderr);
return RTEXITCODE_SYNTAX
#
# Instantiate the program class and run it.
#
oProgram = InstructionTestGen(oOptions);
return oProgram.run();
if __name__ == '__main__':
sys.exit(InstructionTestGen.main());
| [
"random.Random",
"os.urandom",
"io.open",
"sys.stderr.write",
"os.path.basename"
] | [((5119, 5147), 'random.Random', 'random.Random', (['g_iMyRandSeed'], {}), '(g_iMyRandSeed)\n', (5132, 5147), False, 'import random\n'), ((94670, 94743), 'sys.stderr.write', 'sys.stderr.write', (["('InstructionTestGen.py: Seed = %s\\n' % (g_iMyRandSeed,))"], {}), "('InstructionTestGen.py: Seed = %s\\n' % (g_iMyRandSeed,))\n", (94686, 94743), False, 'import sys\n'), ((5015, 5028), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (5025, 5028), False, 'import os\n'), ((90983, 91042), 'io.open', 'io.open', (['self.sFile', '"""w"""'], {'buffering': '(65536)', 'encoding': '"""utf-8"""'}), "(self.sFile, 'w', buffering=65536, encoding='utf-8')\n", (90990, 91042), False, 'import io\n'), ((71279, 71307), 'os.path.basename', 'os.path.basename', (['self.sFile'], {}), '(self.sFile)\n', (71295, 71307), False, 'import os\n'), ((71332, 71358), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (71348, 71358), False, 'import os\n')] |
# -*- coding: utf-8 -*-
""".. moduleauthor:: <NAME>"""
import abc
from copy import copy
from dataclasses import dataclass
from multiprocessing.managers import SharedMemoryManager
from multiprocessing.shared_memory import SharedMemory
from typing import Tuple, List, Optional, final, TypeVar, Generic
from torch.utils.data import Dataset
import numpy as np # type: ignore
from bann.b_data_functions.errors.custom_erors import KnownErrorBannData
@final
@dataclass
class TypeShapeCon:
type: np.dtype = np.dtype('float')
shape: Tuple[int, ...] = (4,)
data: Optional[np.ndarray] = None
shared_data: Optional[SharedMemory] = None
@final
class SmmConManger:
def __init__(self) -> None:
self.__smm: SharedMemoryManager = SharedMemoryManager()
self.__started: bool = False
self.__stopped: bool = False
@property
def smm(self) -> SharedMemoryManager:
return self.__smm
def smm_shutdown(self) -> None:
if self.__started and not self.__stopped:
self.__smm.shutdown()
self.__stopped = True
def smm_start(self) -> None:
if not (self.__started or self.__stopped):
self.__smm.start()
self.__started = True
_TypD = TypeVar('_TypD')
class DataSetSharedMemoryA(abc.ABC, Dataset, Generic[_TypD]):
def __init__(self, data_len: int, /) -> None:
super().__init__()
self.__subset: List[int] = []
self.__subsets_locked: bool = False
self.__smm: Optional[SharedMemoryManager] = None
self.__data_len = data_len
@final
def __len__(self) -> int:
return self.__data_len
@final
@property
def subset(self) -> List[int]:
return self.__subset
@final
def _set_subset(self, indices: List[int], /) -> None:
self.__subset = indices
if indices:
self.__data_len = len(indices)
@final
def _lock_subsets(self) -> None:
self.__subsets_locked = True
@final
def create_subsets(self, indices: List[int], /) -> 'DataSetSharedMemoryA':
if self.__subsets_locked:
raise KnownErrorBannData("subset of subset is prohibited")
shallow_copy = copy(self)
shallow_copy._set_subset(indices)
shallow_copy._lock_subsets()
shallow_copy._trim_shallow_copy(indices)
return shallow_copy
@abc.abstractmethod
def _getitem(self, item: int, /) -> _TypD:
raise NotImplementedError("Abstract method!")
@final
def __getitem__(self, item: int) -> _TypD:
self.remap_shared_memory()
return self._getitem(item)
@final
@property
def used_smm(self) -> Optional[SharedMemoryManager]:
return self.__smm
@abc.abstractmethod
def _trim_shallow_copy(self, indices: List[int], /) -> None:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def remap_shared_memory(self) -> None:
raise NotImplementedError("Abstract method!")
@abc.abstractmethod
def _pre_send_empty(self) -> None:
raise NotImplementedError("Abstract method!")
@final
def pre_send_empty(self) -> None:
self.__smm = None
self._pre_send_empty()
@abc.abstractmethod
def _move_data_to_shared_memory(self) -> None:
raise NotImplementedError("Abstract method!")
@final
def move_data_to_shared_memory(self, smm: SharedMemoryManager, /) -> None:
if self.__smm is not None:
raise KnownErrorBannData("SharedMemoryManager already set")
self.__smm = smm
self._move_data_to_shared_memory()
def _generate_shared_mem_it(np_array: np.ndarray, cont: TypeShapeCon,
smm: SharedMemoryManager, /) -> SharedMemory:
cont.shape = np_array.shape
cont.type = np_array.dtype
shm = smm.SharedMemory(size=np_array.nbytes)
np_buffered = np.ndarray(np_array.shape, dtype=np_array.dtype, buffer=shm.buf)
np_buffered[:] = np_array[:]
return shm
def remap_shared_mem(data: TypeShapeCon, indices: List[int], /) -> None:
# TODO (remove copy) at this point DataLoader doesn't work without copy
if not (data.shared_data is None or data.shape is None or data.type is None):
data_point = data.shared_data
np_buffered_data = np.ndarray(data.shape, dtype=data.type, buffer=data_point.buf)
if indices:
data.data = np.array(list(np_buffered_data[index_i] for index_i in indices))
else:
data.data = copy(np_buffered_data)
data.shared_data = None
def generate_shared_mem(data_type_shape: TypeShapeCon, smm: SharedMemoryManager, /) -> None:
data_l = data_type_shape.data
if data_type_shape.shared_data is None and data_l is None:
raise KnownErrorBannData("Both data types are empty!")
if data_l is not None:
data_type_shape.shared_data = _generate_shared_mem_it(data_l, data_type_shape, smm)
data_type_shape.data = None
def trim_shallow_copy(data_type_shape: TypeShapeCon, indices: List[int], /) -> TypeShapeCon:
if data_type_shape.shared_data is None and data_type_shape.data is None:
raise KnownErrorBannData("Both data types are empty!")
new_con = TypeShapeCon(type=data_type_shape.type, shape=data_type_shape.shape)
if indices:
new_data = data_type_shape.data
if new_data is not None:
new_con.data = np.array(list(new_data[data_index] for data_index in indices))
new_con.shared_data = data_type_shape.shared_data
return new_con
new_con.shared_data = data_type_shape.shared_data
new_con.data = data_type_shape.data
return new_con
def data_get_item(data: TypeShapeCon, index: int, /) -> np.ndarray:
if data.data is not None:
return np.array(data.data[index])
raise KnownErrorBannData("Should never happen")
def data_shallow_copy_shared_mem(data: TypeShapeCon, /) -> TypeShapeCon:
if data.shared_data is None:
raise KnownErrorBannData("Shared data is empty!")
new_con = TypeShapeCon(type=data.type, shape=data.shape)
new_con.shared_data = data.shared_data
return new_con
| [
"bann.b_data_functions.errors.custom_erors.KnownErrorBannData",
"copy.copy",
"numpy.array",
"multiprocessing.managers.SharedMemoryManager",
"numpy.ndarray",
"numpy.dtype",
"typing.TypeVar"
] | [((1243, 1259), 'typing.TypeVar', 'TypeVar', (['"""_TypD"""'], {}), "('_TypD')\n", (1250, 1259), False, 'from typing import Tuple, List, Optional, final, TypeVar, Generic\n'), ((509, 526), 'numpy.dtype', 'np.dtype', (['"""float"""'], {}), "('float')\n", (517, 526), True, 'import numpy as np\n'), ((3903, 3967), 'numpy.ndarray', 'np.ndarray', (['np_array.shape'], {'dtype': 'np_array.dtype', 'buffer': 'shm.buf'}), '(np_array.shape, dtype=np_array.dtype, buffer=shm.buf)\n', (3913, 3967), True, 'import numpy as np\n'), ((5832, 5873), 'bann.b_data_functions.errors.custom_erors.KnownErrorBannData', 'KnownErrorBannData', (['"""Should never happen"""'], {}), "('Should never happen')\n", (5850, 5873), False, 'from bann.b_data_functions.errors.custom_erors import KnownErrorBannData\n'), ((749, 770), 'multiprocessing.managers.SharedMemoryManager', 'SharedMemoryManager', ([], {}), '()\n', (768, 770), False, 'from multiprocessing.managers import SharedMemoryManager\n'), ((2209, 2219), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (2213, 2219), False, 'from copy import copy\n'), ((4314, 4376), 'numpy.ndarray', 'np.ndarray', (['data.shape'], {'dtype': 'data.type', 'buffer': 'data_point.buf'}), '(data.shape, dtype=data.type, buffer=data_point.buf)\n', (4324, 4376), True, 'import numpy as np\n'), ((4785, 4833), 'bann.b_data_functions.errors.custom_erors.KnownErrorBannData', 'KnownErrorBannData', (['"""Both data types are empty!"""'], {}), "('Both data types are empty!')\n", (4803, 4833), False, 'from bann.b_data_functions.errors.custom_erors import KnownErrorBannData\n'), ((5175, 5223), 'bann.b_data_functions.errors.custom_erors.KnownErrorBannData', 'KnownErrorBannData', (['"""Both data types are empty!"""'], {}), "('Both data types are empty!')\n", (5193, 5223), False, 'from bann.b_data_functions.errors.custom_erors import KnownErrorBannData\n'), ((5795, 5821), 'numpy.array', 'np.array', (['data.data[index]'], {}), '(data.data[index])\n', (5803, 5821), True, 'import numpy as np\n'), ((5996, 6039), 'bann.b_data_functions.errors.custom_erors.KnownErrorBannData', 'KnownErrorBannData', (['"""Shared data is empty!"""'], {}), "('Shared data is empty!')\n", (6014, 6039), False, 'from bann.b_data_functions.errors.custom_erors import KnownErrorBannData\n'), ((2133, 2185), 'bann.b_data_functions.errors.custom_erors.KnownErrorBannData', 'KnownErrorBannData', (['"""subset of subset is prohibited"""'], {}), "('subset of subset is prohibited')\n", (2151, 2185), False, 'from bann.b_data_functions.errors.custom_erors import KnownErrorBannData\n'), ((3505, 3558), 'bann.b_data_functions.errors.custom_erors.KnownErrorBannData', 'KnownErrorBannData', (['"""SharedMemoryManager already set"""'], {}), "('SharedMemoryManager already set')\n", (3523, 3558), False, 'from bann.b_data_functions.errors.custom_erors import KnownErrorBannData\n'), ((4524, 4546), 'copy.copy', 'copy', (['np_buffered_data'], {}), '(np_buffered_data)\n', (4528, 4546), False, 'from copy import copy\n')] |
#!/bin/python3
import exploit
import ui_setup
from time import sleep
checkrain = exploit.Checkrain()
checkrain.REMOTE_SSH_CC = '<EMAIL>'
window = ui_setup.UI.window
keep_printing=True
while True:
if window['-OUTPUT-'].DisplayText.count('\n') >= 14:
window['-OUTPUT-'].DisplayText = window['-OUTPUT-'].DisplayText.split(
'\n', maxsplit=1)[1]
event, values = window.read(timeout=500)
#print(event)
if event == ui_setup.sg.WINDOW_CLOSED or event == 'Quit':
checkrain.kill()
checkrain.kill_inject()
break
if event == 'PWN' and checkrain.pid() is None and checkrain.isdone() is False:
checkrain.pwn()
#print(checkrain.pid())
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
"[*] Exploiting IOS device!\n", text_color='#0ab3d1')
if event == 'Inject' and checkrain.pid() != None and checkrain.inject_pid() is None and checkrain.isdone() is False:
try:
checkrain.inject()
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
"[***] Openning shell over USB to IOS device.\n")
try:
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
"[*] Sending Reverse SSH payload.....\n",)
if checkrain.reverse_ssh() != True:
raise ValueError("payload_not_sent")
else:
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
"[***] Payload sent!!!\n")
except ValueError:
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
"[!] Failed to send payload!\n")
checkrain.kill_inject()
except:
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
"[!] Unable to open shell over USB on IOS device!\n")
checkrain.kill_inject()
pass
if event == 'Reset':
checkrain.kill()
checkrain.kill_inject()
window['-OUTPUT-'].update('', background_color='#2b2b2b')
if keep_printing is True:
if checkrain.isdone() is True:
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
"\n ($$$$=====WIN====$$$$)\n\n", text_color='#28d1b5')
window['-OUTPUT-'].update(window['-OUTPUT-'].get() +
" ͡° ͜ʖ ͡°\n\n", text_color='#28d1b5')
keep_printing = False
else:
window['-OUTPUT-'].update(window['-OUTPUT-'].get() + checkrain.log.readline())
else:
pass
window.close()
| [
"exploit.Checkrain"
] | [((86, 105), 'exploit.Checkrain', 'exploit.Checkrain', ([], {}), '()\n', (103, 105), False, 'import exploit\n')] |
import os.path
import sys
import types
import typing
import unittest
from datetime import datetime, date
from functools import wraps
from io import BytesIO, StringIO
from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, \
AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, \
NoReturn, ClassVar
from enum import Enum, IntEnum
from pedantic import pedantic_class
from pedantic.exceptions import PedanticTypeCheckException, PedanticException, PedanticCallWithArgsException, \
PedanticTypeVarMismatchException
from pedantic.decorators.fn_deco_pedantic import pedantic
TEST_FILE = 'test.txt'
class Parent:
pass
class Child(Parent):
def method(self, a: int):
pass
class TestDecoratorRequireKwargsAndTypeCheck(unittest.TestCase):
def tearDown(self) -> None:
if os.path.isfile(TEST_FILE):
os.remove(TEST_FILE)
def test_no_kwargs(self):
@pedantic
def calc(n: int, m: int, i: int) -> int:
return n + m + i
with self.assertRaises(expected_exception=PedanticCallWithArgsException):
calc(42, 40, 38)
with self.assertRaises(expected_exception=PedanticCallWithArgsException):
calc(42, m=40, i=38)
calc(n=42, m=40, i=38)
def test_nested_type_hints_1(self):
@pedantic
def calc(n: int) -> List[List[float]]:
return [0.0 * n]
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)
def test_nested_type_hints_1_corrected(self):
@pedantic
def calc(n: int) -> List[List[float]]:
return [[0.0 * n]]
calc(n=42)
def test_nested_type_hints_2(self):
"""Problem here: int != float"""
@pedantic
def calc(n: int) -> List[Tuple[float, str]]:
return [(n, str(n))]
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)
def test_nested_type_hints_2_corrected(self):
@pedantic
def calc(n: int) -> List[Tuple[int, str]]:
return [(n, str(n))]
@pedantic
def calc_2(n: float) -> List[Tuple[float, str]]:
return [(n, str(n))]
calc(n=42)
calc_2(n=42.0)
def test_nested_type_hints_3(self):
"""Problem here: inner function actually returns Tuple[int, str]"""
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[float, str]]:
@pedantic
def f(x: int, y: float) -> Tuple[float, str]:
return n * x, str(y)
return f
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)(x=3, y=3.14)
def test_nested_type_hints_3_corrected(self):
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[int, str]]:
@pedantic
def f(x: int, y: float) -> Tuple[int, str]:
return n * x, str(y)
return f
calc(n=42)(x=3, y=3.14)
def test_nested_type_hints_4(self):
"""Problem here: return type is actually float"""
@pedantic
def calc(n: List[List[float]]) -> int:
return n[0][0]
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=[[42.0]])
def test_nested_type_hints_corrected(self):
@pedantic
def calc(n: List[List[float]]) -> int:
return int(n[0][0])
calc(n=[[42.0]])
def test_nested_type_hints_5(self):
"""Problem here: Tuple[float, str] != Tuple[float, float]"""
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[float, str]]:
@pedantic
def f(x: int, y: float) -> Tuple[float, float]:
return n * float(x), y
return f
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)
def test_nested_type_hints_5_corrected(self):
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[float, float]]:
@pedantic
def f(x: int, y: float) -> Tuple[float, float]:
return n * float(x), y
return f
calc(n=42)
def test_missing_type_hint_1(self):
"""Problem here: type hint for n missed"""
@pedantic
def calc(n) -> float:
return 42.0 * n
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)
def test_missing_type_hint_1_corrected(self):
@pedantic
def calc(n: int) -> float:
return 42.0 * n
calc(n=42)
def test_missing_type_hint_2(self):
"""Problem here: Return type annotation missed"""
@pedantic
def calc(n: int):
return 'Hi' + str(n)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42)
def test_missing_type_hint_2_corrected(self):
@pedantic
def calc(n: int) -> str:
return 'Hi' + str(n)
calc(n=42)
def test_missing_type_hint_3(self):
"""Problem here: type hint for i missed"""
@pedantic
def calc(n: int, m: int, i) -> int:
return n + m + i
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=38)
def test_missing_type_hint_3_corrected(self):
@pedantic
def calc(n: int, m: int, i: int) -> int:
return n + m + i
calc(n=42, m=40, i=38)
def test_all_ok_2(self):
@pedantic
def calc(n: int, m: int, i: int) -> str:
return str(n + m + i)
calc(n=42, m=40, i=38)
def test_all_ok_3(self):
@pedantic
def calc(n: int, m: int, i: int) -> None:
str(n + m + i)
calc(n=42, m=40, i=38)
def test_all_ok_4(self):
@pedantic
def calc(n: int) -> List[List[int]]:
return [[n]]
calc(n=42)
def test_all_ok_5(self):
@pedantic
def calc(n: int) -> List[Tuple[float, str]]:
return [(float(n), str(n))]
calc(n=42)
def test_all_ok_6(self):
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[float, str]]:
@pedantic
def f(x: int, y: float) -> Tuple[float, str]:
return n * float(x), str(y)
return f
calc(n=42)(x=72, y=3.14)
def test_all_ok_7(self):
@pedantic
def calc(n: List[List[float]]) -> Any:
return n[0][0]
calc(n=[[42.0]])
def test_all_ok_8(self):
@pedantic
def calc(n: int) -> Callable[[int, float], Tuple[float, str]]:
@pedantic
def f(x: int, y: float) -> Tuple[float, str]:
return n * float(x), str(y)
return f
calc(n=42)(x=3, y=3.14)
def test_wrong_type_hint_1(self):
"""Problem here: str != int"""
@pedantic
def calc(n: int, m: int, i: int) -> str:
return n + m + i
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=38)
def test_wrong_type_hint_1_corrected(self):
@pedantic
def calc(n: int, m: int, i: int) -> str:
return str(n + m + i)
calc(n=42, m=40, i=38)
def test_wrong_type_hint_2(self):
"""Problem here: str != int"""
@pedantic
def calc(n: int, m: int, i: str) -> int:
return n + m + i
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=38)
def test_wrong_type_hint_2_corrected(self):
@pedantic
def calc(n: int, m: int, i: str) -> int:
return n + m + int(i)
calc(n=42, m=40, i='38')
def test_wrong_type_hint_3(self):
"""Problem here: None != int"""
@pedantic
def calc(n: int, m: int, i: int) -> None:
return n + m + i
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=38)
def test_wrong_type_hint_corrected(self):
@pedantic
def calc(n: int, m: int, i: int) -> None:
print(n + m + i)
calc(n=42, m=40, i=38)
def test_wrong_type_hint_4(self):
"""Problem here: None != int"""
@pedantic
def calc(n: int, m: int, i: int) -> int:
print(n + m + i)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=38)
def test_wrong_type_hint_4_corrected(self):
@pedantic
def calc(n: int, m: int, i: int) -> int:
return n + m + i
calc(n=42, m=40, i=38)
def test_none_1(self):
"""Problem here: None is not accepted"""
@pedantic
def calc(n: int, m: int, i: int) -> int:
return n + m + i
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=None)
def test_none_2(self):
@pedantic
def calc(n: int, m: int, i: Optional[int]) -> int:
return n + m + i if i is not None else n + m
calc(n=42, m=40, i=None)
def test_none_3(self):
@pedantic
def calc(n: int, m: int, i: Union[int, None]) -> int:
return n + m + i if i is not None else n + m
calc(n=42, m=40, i=None)
def test_none_4(self):
"""Problem here: function may return None"""
@pedantic
def calc(n: int, m: int, i: Union[int, None]) -> int:
return n + m + i if i is not None else None
calc(n=42, m=40, i=42)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(n=42, m=40, i=None)
def test_none_5(self):
@pedantic
def calc(n: int, m: int, i: Union[int, None]) -> Optional[int]:
return n + m + i if i is not None else None
calc(n=42, m=40, i=None)
def test_inheritance_1(self):
class MyClassA:
pass
class MyClassB(MyClassA):
pass
@pedantic
def calc(a: MyClassA) -> str:
return str(a)
calc(a=MyClassA())
calc(a=MyClassB())
def test_inheritance_2(self):
"""Problem here: A is not a subtype of B"""
class MyClassA:
pass
class MyClassB(MyClassA):
pass
@pedantic
def calc(a: MyClassB) -> str:
return str(a)
calc(a=MyClassB())
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(a=MyClassA())
def test_instance_method_1(self):
class MyClassA:
@pedantic
def calc(self, i: int) -> str:
return str(i)
a = MyClassA()
a.calc(i=42)
def test_instance_method_2(self):
"""Problem here: 'i' has no type annotation"""
class MyClassA:
@pedantic
def calc(self, i) -> str:
return str(i)
a = MyClassA()
with self.assertRaises(expected_exception=PedanticTypeCheckException):
a.calc(i=42)
def test_instance_method_2_corrected(self):
class MyClassA:
@pedantic
def calc(self, i: int) -> str:
return str(i)
a = MyClassA()
a.calc(i=42)
def test_instance_method_int_is_not_float(self):
class MyClassA:
@pedantic
def calc(self, i: float) -> str:
return str(i)
a = MyClassA()
with self.assertRaises(expected_exception=PedanticTypeCheckException):
a.calc(i=42)
def test_instance_method_3_corrected(self):
class MyClassA:
@pedantic
def calc(self, i: float) -> str:
return str(i)
a = MyClassA()
a.calc(i=42.0)
def test_instance_method_no_kwargs(self):
class MyClassA:
@pedantic
def calc(self, i: int) -> str:
return str(i)
a = MyClassA()
with self.assertRaises(expected_exception=PedanticCallWithArgsException):
a.calc(42)
def test_instance_method_5(self):
"""Problem here: instance methods is not called with kwargs"""
class MyClassA:
@pedantic
def calc(self, i: int) -> str:
return str(i)
a = MyClassA()
a.calc(i=42)
def test_lambda_1(self):
@pedantic
def calc(i: float) -> Callable[[float], str]:
return lambda x: str(x * i)
calc(i=42.0)(10.0)
def test_lambda_3(self):
@pedantic
def calc(i: float) -> Callable[[float], str]:
def res(x: float) -> str:
return str(x * i)
return res
calc(i=42.0)(10.0)
def test_lambda_int_is_not_float(self):
@pedantic
def calc(i: float) -> Callable[[float], str]:
def res(x: int) -> str:
return str(x * i)
return res
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(i=42.0)(x=10)
def test_lambda_4_almost_corrected(self):
"""Problem here: float != str"""
@pedantic
def calc(i: float) -> Callable[[float], str]:
@pedantic
def res(x: int) -> str:
return str(x * i)
return res
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(i=42.0)(x=10)
def test_lambda_4_almost_corrected_2(self):
@pedantic
def calc(i: float) -> Callable[[int], str]:
@pedantic
def res(x: int) -> str:
return str(x * i)
return res
calc(i=42.0)(x=10)
def test_lambda_5(self):
"""Problem here: float != int"""
@pedantic
def calc(i: float) -> Callable[[float], str]:
@pedantic
def res(x: float) -> str:
return str(x * i)
return res
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(i=42.0)(x=10)
def test_lambda_corrected(self):
@pedantic
def calc(i: float) -> Callable[[float], str]:
@pedantic
def res(x: float) -> str:
return str(x * i)
return res
calc(i=42.0)(x=10.0)
def test_tuple_without_type_args(self):
@pedantic
def calc(i: Tuple) -> str:
return str(i)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(i=(42.0, 43, 'hi'))
def test_tuple_without_args_corrected(self):
@pedantic
def calc(i: Tuple[Any, ...]) -> str:
return str(i)
calc(i=(42.0, 43, 'hi'))
def test_callable_without_type_args(self):
@pedantic
def calc(i: Callable) -> str:
return str(i(' you'))
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(i=lambda x: (42.0, 43, 'hi', x))
def test_callable_without_args_correct_with_lambdas(self):
@pedantic
def calc(i: Callable[[Any], Tuple[Any, ...]]) -> str:
return str(i(x=' you'))
calc(i=lambda x: (42.0, 43, 'hi', x))
def test_callable_without_args_corrected(self):
@pedantic
def calc(i: Callable[[Any], Tuple[Any, ...]]) -> str:
return str(i(x=' you'))
@pedantic
def arg(x: Any) -> Tuple[Any, ...]:
return 42.0, 43, 'hi', x
calc(i=arg)
def test_list_without_args(self):
@pedantic
def calc(i: List) -> Any:
return [i]
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(i=[42.0, 43, 'hi'])
def test_list_without_args_corrected(self):
@pedantic
def calc(i: List[Any]) -> List[List[Any]]:
return [i]
calc(i=[42.0, 43, 'hi'])
def test_ellipsis_in_callable_1(self):
@pedantic
def calc(i: Callable[..., int]) -> int:
return i()
@pedantic
def call() -> int:
return 42
calc(i=call)
def test_ellipsis_in_callable_2(self):
@pedantic
def calc(i: Callable[..., int]) -> int:
return i(x=3.14, y=5)
@pedantic
def call(x: float, y: int) -> int:
return 42
calc(i=call)
def test_ellipsis_in_callable_3(self):
"""Problem here: call to "call" misses one argument"""
@pedantic
def calc(i: Callable[..., int]) -> int:
return i(x=3.14)
@pedantic
def call(x: float, y: int) -> int:
return 42
with self.assertRaises(expected_exception=PedanticException):
calc(i=call)
def test_optional_args_1(self):
@pedantic
def calc(a: int, b: int = 42) -> int:
return a + b
calc(a=2)
def test_optional_args_2(self):
@pedantic
def calc(a: int = 3, b: int = 42, c: float = 5.0) -> float:
return a + b + c
calc()
calc(a=1)
calc(b=1)
calc(c=1.0)
calc(a=1, b=1)
calc(a=1, c=1.0)
calc(b=1, c=1.0)
calc(a=1, b=1, c=1.0)
def test_optional_args_3(self):
"""Problem here: optional argument c: 5 is not a float"""
@pedantic
def calc(a: int = 3, b: int = 42, c: float = 5) -> float:
return a + b + c
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc()
def test_optional_args_3_corrected(self):
@pedantic
def calc(a: int = 3, b: int = 42, c: float = 5.0) -> float:
return a + b + c
calc()
def test_optional_args_4(self):
class MyClass:
@pedantic
def foo(self, a: int, b: Optional[int] = 1) -> int:
return a + b
my_class = MyClass()
my_class.foo(a=10)
def test_optional_args_5(self):
@pedantic
def calc(d: Optional[Dict[int, int]] = None) -> Optional[int]:
if d is None:
return None
return sum(d.keys())
calc(d=None)
calc()
calc(d={42: 3})
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(d={42: 3.14})
def test_optional_args_6(self):
""""Problem here: str != int"""
@pedantic
def calc(d: int = 42) -> int:
return int(d)
calc(d=99999)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(d='999999')
def test_enum_1(self):
"""Problem here: Type hint for 'a' should be MyEnum instead of MyEnum.GAMMA"""
class MyEnum(Enum):
ALPHA = 'startEvent'
BETA = 'task'
GAMMA = 'sequenceFlow'
class MyClass:
@pedantic
def operation(self, a: MyEnum.GAMMA) -> None:
print(a)
m = MyClass()
with self.assertRaises(expected_exception=PedanticTypeCheckException):
m.operation(a=MyEnum.GAMMA)
def test_enum_1_corrected(self):
class MyEnum(Enum):
ALPHA = 'startEvent'
BETA = 'task'
GAMMA = 'sequenceFlow'
@pedantic
def operation(a: MyEnum) -> None:
print(a)
operation(a=MyEnum.GAMMA)
def test_sloppy_types_dict(self):
@pedantic
def operation(d: dict) -> int:
return len(d.keys())
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d={1: 1, 2: 2})
def test_sloppy_types_dict_almost_corrected_no_type_args(self):
@pedantic
def operation(d: Dict) -> int:
return len(d.keys())
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d={1: 1, 2: 2})
def test_sloppy_types_dict_corrected(self):
@pedantic
def operation(d: Dict[int, int]) -> int:
return len(d.keys())
operation(d={1: 1, 2: 2})
def test_sloppy_types_list(self):
@pedantic
def operation(d: list) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d=[1, 2, 3, 4])
def test_sloppy_types_list_almost_corrected_no_type_args(self):
@pedantic
def operation(d: List) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d=[1, 2, 3, 4])
def test_sloppy_types_list_corrected(self):
@pedantic
def operation(d: List[int]) -> int:
return len(d)
operation(d=[1, 2, 3, 4])
def test_sloppy_types_tuple(self):
@pedantic
def operation(d: tuple) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d=(1, 2, 3))
def test_sloppy_types_tuple_almost_corrected_no_type_args(self):
@pedantic
def operation(d: Tuple) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d=(1, 2, 3))
def test_sloppy_types_tuple_corrected(self):
@pedantic
def operation(d: Tuple[int, int, int]) -> int:
return len(d)
operation(d=(1, 2, 3))
def test_sloppy_types_set(self):
@pedantic
def operation(d: set) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d={1, 2, 3})
def test_sloppy_types_set_almost_corrected_to_type_args(self):
@pedantic
def operation(d: Set) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d={1, 2, 3})
def test_sloppy_types_set_corrected(self):
@pedantic
def operation(d: Set[int]) -> int:
return len(d)
operation(d={1, 2, 3})
def test_sloppy_types_frozenset(self):
@pedantic
def operation(d: frozenset) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d=frozenset({1, 2, 3}))
def test_sloppy_types_frozenset_almost_corrected_no_type_args(self):
@pedantic
def operation(d: FrozenSet) -> int:
return len(d)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
operation(d=frozenset({1, 2, 3}))
def test_sloppy_types_frozenset_corrected(self):
@pedantic
def operation(d: FrozenSet[int]) -> int:
return len(d)
operation(d=frozenset({1, 2, 3}))
def test_type_list_but_got_tuple(self):
@pedantic
def calc(ls: List[Any]) -> int:
return len(ls)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
calc(ls=(1, 2, 3))
def test_type_list_corrected(self):
@pedantic
def calc(ls: Tuple[Any, ...]) -> int:
return len(ls)
calc(ls=(1, 2, 3))
def test_any(self):
@pedantic
def calc(ls: List[Any]) -> Dict[int, Any]:
return {i: ls[i] for i in range(0, len(ls))}
calc(ls=[1, 2, 3])
calc(ls=[1.11, 2.0, 3.0])
calc(ls=['1', '2', '3'])
calc(ls=[10.5, '2', (3, 4, 5)])
def test_aliases(self):
Vector = List[float]
@pedantic
def scale(scalar: float, vector: Vector) -> Vector:
return [scalar * num for num in vector]
scale(scalar=2.0, vector=[1.0, -4.2, 5.4])
def test_new_type(self):
UserId = NewType('UserId', int)
@pedantic
def get_user_name(user_id: UserId) -> str:
return str(user_id)
some_id = UserId(524313)
get_user_name(user_id=some_id)
# the following would be desirable but impossible to check at runtime:
# with self.assertRaises(expected_exception=AssertionError):
# get_user_name(user_id=-1)
def test_list_of_new_type(self):
UserId = NewType('UserId', int)
@pedantic
def get_user_name(user_ids: List[UserId]) -> str:
return str(user_ids)
get_user_name(user_ids=[UserId(524313), UserId(42)])
with self.assertRaises(expected_exception=PedanticTypeCheckException):
get_user_name(user_ids=[UserId(524313), UserId(42), 430.0])
def test_callable_no_args(self):
@pedantic
def f(g: Callable[[], str]) -> str:
return g()
@pedantic
def greetings() -> str:
return 'hello world'
f(g=greetings)
def test_type_var(self):
T = TypeVar('T')
@pedantic
def first(ls: List[T]) -> T:
return ls[0]
first(ls=[1, 2, 3])
def test_type_var_wrong(self):
T = TypeVar('T')
@pedantic
def first(ls: List[T]) -> T:
return str(ls[0])
with self.assertRaises(expected_exception=PedanticTypeVarMismatchException):
first(ls=[1, 2, 3])
def test_type_var_wrong_sequence(self):
T = TypeVar('T')
@pedantic
def first(ls: Sequence[T]) -> T:
return str(ls[0])
with self.assertRaises(expected_exception=PedanticTypeVarMismatchException):
first(ls=[1, 2, 3])
def test_double_pedantic(self):
@pedantic
@pedantic
def f(x: int, y: float) -> Tuple[float, str]:
return float(x), str(y)
f(x=5, y=3.14)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
f(x=5.0, y=3.14)
with self.assertRaises(expected_exception=PedanticCallWithArgsException):
f(5, 3.14)
def test_args_kwargs(self):
@pedantic
def some_method(a: int = 0, b: float = 0.0) -> float:
return a * b
@pedantic
def wrapper_method(*args: Union[int, float], **kwargs: Union[int, float]) -> float:
return some_method(*args, **kwargs)
some_method()
with self.assertRaises(expected_exception=PedanticCallWithArgsException):
some_method(3, 3.0)
some_method(a=3, b=3.0)
wrapper_method()
with self.assertRaises(expected_exception=PedanticCallWithArgsException):
wrapper_method(3, 3.0)
wrapper_method(a=3, b=3.0)
def test_args_kwargs_no_type_hint(self):
@pedantic
def method_no_type_hint(*args, **kwargs) -> None:
print(args)
print(kwargs)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
method_no_type_hint(a=3, b=3.0)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
method_no_type_hint()
def test_args_kwargs_wrong_type_hint(self):
"""See: https://www.python.org/dev/peps/pep-0484/#arbitrary-argument-lists-and-default-argument-values"""
@pedantic
def wrapper_method(*args: str, **kwargs: str) -> None:
print(args)
print(kwargs)
wrapper_method()
wrapper_method('hi', 'you', ':)')
wrapper_method(a='hi', b='you', c=':)')
with self.assertRaises(expected_exception=PedanticTypeCheckException):
wrapper_method('hi', 'you', ':)', 7)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
wrapper_method(3, 3.0)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
wrapper_method(a=3, b=3.0)
def test_additional_kwargs(self):
@pedantic
def some_method(a: int, b: float = 0.0, **kwargs: int) -> float:
return sum([a, b])
some_method(a=5)
some_method(a=5, b=0.1)
some_method(a=5, b=0.1, c=4)
some_method(a=5, b=0.1, c=4, d=5, e=6)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
some_method(a=5, b=0.1, c=4, d=5.0, e=6)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
some_method(a=5.0, b=0.1, c=4, d=5, e=6)
with self.assertRaises(expected_exception=PedanticTypeCheckException):
some_method(a=5, b=0, c=4, d=5, e=6)
def test_args_kwargs_different_types(self):
@pedantic
def foo(*args: str, **kwds: int) -> None:
print(args)
print(kwds)
foo('a', 'b', 'c')
foo(x=1, y=2)
foo('', z=0)
def test_pedantic_on_class(self):
with self.assertRaises(expected_exception=PedanticTypeCheckException):
@pedantic
class MyClass:
pass
MyClass()
def test_is_subtype_tuple(self):
with self.assertRaises(expected_exception=PedanticTypeCheckException):
@pedantic
def foo() -> Callable[[Tuple[float, str]], Tuple[int]]:
def bar(a: Tuple[float]) -> Tuple[int]:
return len(a[1]) + int(a[0]),
return bar
foo()
def test_is_subtype_tuple_corrected(self):
@pedantic
def foo() -> Callable[[Tuple[float, str]], Tuple[int]]:
def bar(a: Tuple[float, str]) -> Tuple[int]:
return len(a[1]) + int(a[0]),
return bar
foo()
def test_forward_ref(self):
class Conversation:
pass
@pedantic
def get_conversations() -> List['Conversation']:
return [Conversation(), Conversation()]
get_conversations()
def test_alternative_list_type_hint(self):
@pedantic
def _is_digit_in_int(digit: [int], num: int) -> bool:
num_str = str(num)
for i in num_str:
if int(i) == digit:
return True
return False
with self.assertRaises(expected_exception=PedanticTypeCheckException):
_is_digit_in_int(digit=4, num=42)
def test_callable_with_union_return(self):
class MyClass:
pass
@pedantic
def admin_required(func: Callable[..., Union[str, MyClass]]) -> Callable[..., Union[str, MyClass]]:
@wraps(func)
def decorated_function(*args, **kwargs):
return func(*args, **kwargs)
return decorated_function
@admin_required
@pedantic
def get_server_info() -> str:
return 'info'
get_server_info()
def test_pedantic(self):
@pedantic
def foo(a: int, b: str) -> str:
return 'abc'
self.assertEqual('abc', foo(a=4, b='abc'))
def test_pedantic_always(self):
@pedantic
def foo(a: int, b: str) -> str:
return 'abc'
self.assertEqual('abc', foo(a=4, b='abc'))
def test_pedantic_arguments_fail(self):
@pedantic
def foo(a: int, b: str) -> str:
return 'abc'
with self.assertRaises(expected_exception=PedanticTypeCheckException):
foo(a=4, b=5)
def test_pedantic_return_type_fail(self):
@pedantic
def foo(a: int, b: str) -> str:
return 6
with self.assertRaises(expected_exception=PedanticTypeCheckException):
foo(a=4, b='abc')
def test_return_type_none(self):
@pedantic
def foo() -> None:
return 'a'
with self.assertRaises(expected_exception=PedanticTypeCheckException):
foo()
def test_marco(self):
@pedantic_class
class A:
def __init__(self, val: int) -> None:
self.val = val
def __eq__(self, other: 'A') -> bool: # other: A and all subclasses
return self.val == other.val
@pedantic_class
class B(A):
def __init__(self, val: int) -> None:
super().__init__(val=val)
@pedantic_class
class C(A):
def __init__(self, val: int) -> None:
super().__init__(val=val)
a = A(val=42)
b = B(val=42)
c = C(val=42)
assert a == b # works
assert a == c # works
assert b == c # error
def test_date_datetime(self):
@pedantic
def foo(a: datetime, b: date) -> None:
pass
foo(a=datetime(1995, 2, 5), b=date(1987, 8, 7))
foo(a=datetime(1995, 2, 5), b=datetime(1987, 8, 7))
with self.assertRaises(expected_exception=PedanticTypeCheckException):
foo(a=date(1995, 2, 5), b=date(1987, 8, 7))
def test_any_type(self):
@pedantic
def foo(a: Any) -> None:
pass
foo(a='aa')
def test_callable_exact_arg_count(self):
@pedantic
def foo(a: Callable[[int, str], int]) -> None:
pass
def some_callable(x: int, y: str) -> int:
pass
foo(a=some_callable)
def test_callable_bad_type(self):
@pedantic
def foo(a: Callable[..., int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=5)
def test_callable_too_few_arguments(self):
@pedantic
def foo(a: Callable[[int, str], int]) -> None:
pass
def some_callable(x: int) -> int:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=some_callable)
def test_callable_mandatory_kwonlyargs(self):
@pedantic
def foo(a: Callable[[int, str], int]) -> None:
pass
def some_callable(x: int, y: str, *, z: float, bar: str) -> int:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=some_callable)
def test_callable_class(self):
"""
Test that passing a class as a callable does not count the "self" argument "a"gainst the
ones declared in the Callable specification.
"""
@pedantic
def foo(a: Callable[[int, str], Any]) -> None:
pass
class SomeClass:
def __init__(self, x: int, y: str):
pass
foo(a=SomeClass)
def test_callable_plain(self):
@pedantic
def foo(a: Callable[..., Any]) -> None:
pass
def callback(a):
pass
foo(a=callback)
def test_callable_bound_method(self):
@pedantic
def foo(callback: Callable[[int], Any]) -> None:
pass
foo(callback=Child().method)
def test_callable_defaults(self):
"""
Test that a callable having "too many" arguments don't raise an error if the extra
arguments have default values.
"""
@pedantic
def foo(callback: Callable[[int, str], Any]) -> None:
pass
def some_callable(x: int, y: str, z: float = 1.2) -> int:
pass
foo(callback=some_callable)
def test_callable_builtin(self):
@pedantic
def foo(callback: types.BuiltinFunctionType) -> None:
pass
foo(callback=[].append)
def test_dict_bad_type(self):
@pedantic
def foo(a: Dict[str, int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=5)
def test_dict_bad_key_type(self):
@pedantic
def foo(a: Dict[str, int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a={1: 2})
def test_dict_bad_value_type(self):
@pedantic
def foo(a: Dict[str, int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a={'x': 'a'})
def test_list_bad_type(self):
@pedantic
def foo(a: List[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=5)
def test_list_bad_element(self):
@pedantic
def foo(a: List[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=[1, 2, 'bb'])
def test_sequence_bad_type(self):
@pedantic
def foo(a: Sequence[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=5)
def test_sequence_bad_element(self):
@pedantic
def foo(a: Sequence[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=[1, 2, 'bb'])
def test_abstractset_custom_type(self):
T = TypeVar('T')
@pedantic_class
class DummySet(AbstractSet[T]):
def __contains__(self, x: object) -> bool:
return x == 1
def __len__(self) -> T:
return 1
def __iter__(self) -> Iterator[T]:
yield 1
@pedantic
def foo(a: AbstractSet[int]) -> None:
pass
foo(a=DummySet[int]())
def test_abstractset_bad_type(self):
@pedantic
def foo(a: AbstractSet[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=5)
def test_set_bad_type(self):
@pedantic
def foo(a: Set[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=5)
def test_abstractset_bad_element(self):
@pedantic
def foo(a: AbstractSet[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a={1, 2, 'bb'})
def test_set_bad_element(self):
@pedantic
def foo(a: Set[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a={1, 2, 'bb'})
def test_tuple_bad_type(self):
@pedantic
def foo(a: Tuple[int]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=5)
def test_tuple_too_many_elements(self):
@pedantic
def foo(a: Tuple[int, str]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=(1, 'aa', 2))
def test_tuple_too_few_elements(self):
@pedantic
def foo(a: Tuple[int, str]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=(1,))
def test_tuple_bad_element(self):
@pedantic
def foo(a: Tuple[int, str]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=(1, 2))
def test_tuple_ellipsis_bad_element(self):
@pedantic
def foo(a: Tuple[int, ...]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=(1, 2, 'blah'))
def test_namedtuple(self):
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
@pedantic
def foo(bar: Employee) -> None:
print(bar)
foo(bar=Employee('bob', 1))
def test_namedtuple_key_mismatch(self):
Employee1 = NamedTuple('Employee', [('name', str), ('id', int)])
Employee2 = NamedTuple('Employee', [('firstname', str), ('id', int)])
@pedantic
def foo(bar: Employee1) -> None:
print(bar)
with self.assertRaises(PedanticTypeCheckException):
foo(bar=Employee2('bob', 1))
def test_namedtuple_type_mismatch(self):
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
@pedantic
def foo(bar: Employee) -> None:
print(bar)
with self.assertRaises(PedanticTypeCheckException):
foo(bar=('bob', 1))
def test_namedtuple_huge_type_mismatch(self):
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
@pedantic
def foo(bar: int) -> None:
print(bar)
with self.assertRaises(PedanticTypeCheckException):
foo(bar=foo(bar=Employee('bob', 1)))
def test_namedtuple_wrong_field_type(self):
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
@pedantic
def foo(bar: Employee) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(bar=Employee(2, 1))
def test_union(self):
@pedantic
def foo(a: Union[str, int]) -> None:
pass
for value in [6, 'xa']:
foo(a=value)
def test_union_new_syntax(self):
if sys.version_info < (3, 10):
return
@pedantic
def foo(a: str | int) -> None:
pass
for value in [6, 'xa']:
foo(a=value)
with self.assertRaises(PedanticTypeCheckException):
foo(a=1.7)
def test_union_typing_type(self):
@pedantic
def foo(a: Union[str, Collection]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=1)
def test_union_fail(self):
@pedantic
def foo(a: Union[str, int]) -> None:
pass
for value in [5.6, b'xa']:
with self.assertRaises(PedanticTypeCheckException):
foo(a=value)
def test_type_var_constraints(self):
T = TypeVar('T', int, str)
@pedantic
def foo(a: T, b: T) -> None:
pass
for values in [
{'a': 6, 'b': 7},
{'a': 'aa', 'b': "bb"},
]:
foo(**values)
def test_type_var_constraints_fail_typing_type(self):
T = TypeVar('T', int, Collection)
@pedantic
def foo(a: T, b: T) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a='aa', b='bb')
def test_typevar_constraints_fail(self):
T = TypeVar('T', int, str)
@pedantic
def foo(a: T, b: T) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=2.5, b='aa')
def test_typevar_bound(self):
T = TypeVar('T', bound=Parent)
@pedantic
def foo(a: T, b: T) -> None:
pass
foo(a=Child(), b=Child())
def test_type_var_bound_fail(self):
T = TypeVar('T', bound=Child)
@pedantic
def foo(a: T, b: T) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=Parent(), b=Parent())
def test_type_var_invariant_fail(self):
T = TypeVar('T', int, str)
@pedantic
def foo(a: T, b: T) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=2, b=3.6)
def test_type_var_covariant(self):
T = TypeVar('T', covariant=True)
@pedantic
def foo(a: T, b: T) -> None:
pass
foo(a=Parent(), b=Child())
def test_type_var_covariant_fail(self):
T = TypeVar('T', covariant=True)
@pedantic
def foo(a: T, b: T) -> None:
pass
with self.assertRaises(PedanticTypeVarMismatchException):
foo(a=Child(), b=Parent())
def test_type_var_contravariant(self):
T = TypeVar('T', contravariant=True)
@pedantic
def foo(a: T, b: T) -> None:
pass
foo(a=Child(), b=Parent())
def test_type_var_contravariant_fail(self):
T = TypeVar('T', contravariant=True)
@pedantic
def foo(a: T, b: T) -> None:
pass
with self.assertRaises(PedanticTypeVarMismatchException):
foo(a=Parent(), b=Child())
def test_class_bad_subclass(self):
@pedantic
def foo(a: Type[Child]) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=Parent)
def test_class_any(self):
@pedantic
def foo(a: Type[Any]) -> None:
pass
foo(a=str)
def test_wrapped_function(self):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
@pedantic
@decorator
def foo(a: 'Child') -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=Parent())
def test_mismatching_default_type(self):
@pedantic
def foo(a: str = 1) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo()
def test_implicit_default_none(self):
"""
Test that if the default value is ``None``, a ``None`` argument can be passed.
"""
@pedantic
def foo(a: Optional[str] = None) -> None:
pass
foo()
def test_generator_simple(self):
"""Test that argument type checking works in a generator function too."""
@pedantic
def generate(a: int) -> Generator[int, int, None]:
yield a
yield a + 1
gen = generate(a=1)
next(gen)
def test_wrapped_generator_no_return_type_annotation(self):
"""Test that return type checking works in a generator function too."""
@pedantic
def generate(a: int) -> Generator[int, int, None]:
yield a
yield a + 1
gen = generate(a=1)
next(gen)
def test_varargs(self):
@pedantic
def foo(*args: int) -> None:
pass
foo(1, 2)
def test_varargs_fail(self):
@pedantic
def foo(*args: int) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(1, 'a')
def test_kwargs(self):
@pedantic
def foo(**kwargs: int) -> None:
pass
foo(a=1, b=2)
def test_kwargs_fail(self):
@pedantic
def foo(**kwargs: int) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=1, b='a')
def test_generic(self):
T_Foo = TypeVar('T_Foo')
class FooGeneric(Generic[T_Foo]):
pass
@pedantic
def foo(a: FooGeneric[str]) -> None:
print(a)
foo(a=FooGeneric[str]())
def test_newtype(self):
myint = NewType("myint", int)
@pedantic
def foo(a: myint) -> int:
return 42
assert foo(a=1) == 42
with self.assertRaises(PedanticTypeCheckException):
foo(a="a")
def test_collection(self):
@pedantic
def foo(a: Collection) -> None:
pass
with self.assertRaises(PedanticTypeCheckException):
foo(a=True)
def test_binary_io(self):
@pedantic
def foo(a: BinaryIO) -> None:
print(a)
foo(a=BytesIO())
def test_text_io(self):
@pedantic
def foo(a: TextIO) -> None:
print(a)
foo(a=StringIO())
def test_binary_io_fail(self):
@pedantic
def foo(a: TextIO) -> None:
print(a)
with self.assertRaises(PedanticTypeCheckException):
foo(a=BytesIO())
def test_text_io_fail(self):
@pedantic
def foo(a: BinaryIO) -> None:
print(a)
with self.assertRaises(PedanticTypeCheckException):
foo(a=StringIO())
def test_binary_io_real_file(self):
@pedantic
def foo(a: BinaryIO) -> None:
print(a)
with open(file=TEST_FILE, mode='wb') as f:
foo(a=f)
def test_text_io_real_file(self):
@pedantic
def foo(a: TextIO) -> None:
print(a)
with open(file=TEST_FILE, mode='w') as f:
foo(a=f)
def test_pedantic_return_type_var_fail(self):
T = TypeVar('T', int, float)
@pedantic
def foo(a: T, b: T) -> T:
return 'a'
with self.assertRaises(PedanticTypeCheckException):
foo(a=4, b=2)
def test_callable(self):
@pedantic
def foo_1(a: Callable[..., int]) -> None:
print(a)
@pedantic
def foo_2(a: Callable) -> None:
print(a)
def some_callable() -> int:
return 4
foo_1(a=some_callable)
with self.assertRaises(PedanticTypeCheckException):
foo_2(a=some_callable)
def test_list(self):
@pedantic
def foo_1(a: List[int]) -> None:
print(a)
@pedantic
def foo_2(a: List) -> None:
print(a)
@pedantic
def foo_3(a: list) -> None:
print(a)
foo_1(a=[1, 2])
with self.assertRaises(PedanticTypeCheckException):
foo_2(a=[1, 2])
with self.assertRaises(PedanticTypeCheckException):
foo_3(a=[1, 2])
def test_dict(self):
@pedantic
def foo_1(a: Dict[str, int]) -> None:
print(a)
@pedantic
def foo_2(a: Dict) -> None:
print(a)
@pedantic
def foo_3(a: dict) -> None:
print(a)
foo_1(a={'x': 2})
with self.assertRaises(PedanticTypeCheckException):
foo_3(a={'x': 2})
with self.assertRaises(PedanticTypeCheckException):
foo_3(a={'x': 2})
def test_sequence(self):
@pedantic
def foo(a: Sequence[str]) -> None:
print(a)
for value in [('a', 'b'), ['a', 'b'], 'abc']:
foo(a=value)
def test_sequence_no_type_args(self):
@pedantic
def foo(a: Sequence) -> None:
print(a)
for value in [('a', 'b'), ['a', 'b'], 'abc']:
with self.assertRaises(PedanticTypeCheckException):
foo(a=value)
def test_iterable(self):
@pedantic
def foo(a: Iterable[str]) -> None:
print(a)
for value in [('a', 'b'), ['a', 'b'], 'abc']:
foo(a=value)
def test_iterable_no_type_args(self):
@pedantic
def foo(a: Iterable) -> None:
print(a)
for value in [('a', 'b'), ['a', 'b'], 'abc']:
with self.assertRaises(PedanticTypeCheckException):
foo(a=value)
def test_container(self):
@pedantic
def foo(a: Container[str]) -> None:
print(a)
for value in [('a', 'b'), ['a', 'b'], 'abc']:
foo(a=value)
def test_container_no_type_args(self):
@pedantic
def foo(a: Container) -> None:
print(a)
for value in [('a', 'b'), ['a', 'b'], 'abc']:
with self.assertRaises(PedanticTypeCheckException):
foo(a=value)
def test_set(self):
@pedantic
def foo_1(a: AbstractSet[int]) -> None:
print(a)
@pedantic
def foo_2(a: Set[int]) -> None:
print(a)
for value in [set(), {6}]:
foo_1(a=value)
foo_2(a=value)
def test_set_no_type_args(self):
@pedantic
def foo_1(a: AbstractSet) -> None:
print(a)
@pedantic
def foo_2(a: Set) -> None:
print(a)
@pedantic
def foo_3(a: set) -> None:
print(a)
for value in [set(), {6}]:
with self.assertRaises(PedanticTypeCheckException):
foo_1(a=value)
with self.assertRaises(PedanticTypeCheckException):
foo_2(a=value)
with self.assertRaises(PedanticTypeCheckException):
foo_3(a=value)
def test_tuple(self):
@pedantic
def foo_1(a: Tuple[int, int]) -> None:
print(a)
@pedantic
def foo_2(a: Tuple[int, ...]) -> None:
print(a)
foo_1(a=(1, 2))
foo_2(a=(1, 2))
def test_tuple_no_type_args(self):
@pedantic
def foo_1(a: Tuple) -> None:
print(a)
@pedantic
def foo_2(a: tuple) -> None:
print(a)
with self.assertRaises(PedanticTypeCheckException):
foo_1(a=(1, 2))
with self.assertRaises(PedanticTypeCheckException):
foo_2(a=(1, 2))
def test_empty_tuple(self):
@pedantic
def foo(a: Tuple[()]) -> None:
print(a)
foo(a=())
def test_class(self):
@pedantic
def foo_1(a: Type[Parent]) -> None:
print(a)
@pedantic
def foo_2(a: Type[TypeVar('UnboundType')]) -> None:
print(a)
@pedantic
def foo_3(a: Type[TypeVar('BoundType', bound=Parent)]) -> None:
print(a)
foo_1(a=Child)
foo_2(a=Child)
foo_3(a=Child)
def test_class_no_type_vars(self):
@pedantic
def foo_1(a: Type) -> None:
print(a)
@pedantic
def foo_2(a: type) -> None:
print(a)
with self.assertRaises(PedanticTypeCheckException):
foo_1(a=Child)
with self.assertRaises(PedanticTypeCheckException):
foo_2(a=Child)
def test_class_not_a_class(self):
@pedantic
def foo(a: Type[Parent]) -> None:
print(a)
with self.assertRaises(PedanticTypeCheckException):
foo(a=1)
def test_complex(self):
@pedantic
def foo(a: complex) -> None:
print(a)
foo(a=complex(1, 5))
with self.assertRaises(PedanticTypeCheckException):
foo(a=1.0)
def test_float(self):
@pedantic
def foo(a: float) -> None:
print(a)
foo(a=1.5)
with self.assertRaises(PedanticTypeCheckException):
foo(a=1)
def test_coroutine_correct_return_type(self):
@pedantic
async def foo() -> str:
return 'foo'
coro = foo()
with self.assertRaises(StopIteration):
coro.send(None)
def test_coroutine_wrong_return_type(self):
@pedantic
async def foo() -> str:
return 1
coro = foo()
with self.assertRaises(PedanticTypeCheckException):
coro.send(None)
def test_bytearray_bytes(self):
@pedantic
def foo(x: bytearray) -> None:
pass
foo(x=bytearray([1]))
def test_class_decorator(self):
@pedantic_class
class Foo:
@staticmethod
def staticmethod() -> int:
return 'foo'
@classmethod
def classmethod(cls) -> int:
return 'foo'
def method(self) -> int:
return 'foo'
with self.assertRaises(PedanticTypeCheckException):
Foo.staticmethod()
with self.assertRaises(PedanticTypeCheckException):
Foo.classmethod()
with self.assertRaises(PedanticTypeCheckException):
Foo().method()
def test_generator(self):
@pedantic
def genfunc() -> Generator[int, str, List[str]]:
val1 = yield 2
val2 = yield 3
val3 = yield 4
return [val1, val2, val3]
gen = genfunc()
with self.assertRaises(StopIteration):
value = next(gen)
while True:
value = gen.send(str(value))
assert isinstance(value, int)
def test_generator_no_type_args(self):
@pedantic
def genfunc() -> Generator:
val1 = yield 2
val2 = yield 3
val3 = yield 4
return [val1, val2, val3]
with self.assertRaises(PedanticTypeCheckException):
genfunc()
def test_iterator(self):
@pedantic
def genfunc() -> Iterator[int]:
val1 = yield 2
val2 = yield 3
val3 = yield 4
return [val1, val2, val3]
gen = genfunc()
with self.assertRaises(PedanticTypeCheckException):
value = next(gen)
while True:
value = gen.send(str(value))
assert isinstance(value, int)
def test_iterator_no_type_args(self):
@pedantic
def genfunc() -> Iterator:
val1 = yield 2
val2 = yield 3
val3 = yield 4
return [val1, val2, val3]
with self.assertRaises(PedanticTypeCheckException):
genfunc()
def test_iterable_advanced(self):
@pedantic
def genfunc() -> Iterable[int]:
val1 = yield 2
val2 = yield 3
val3 = yield 4
return [val1, val2, val3]
gen = genfunc()
with self.assertRaises(PedanticTypeCheckException):
value = next(gen)
while True:
value = gen.send(str(value))
assert isinstance(value, int)
def test_iterable_advanced_no_type_args(self):
@pedantic
def genfunc() -> Iterable:
val1 = yield 2
val2 = yield 3
val3 = yield 4
return [val1, val2, val3]
with self.assertRaises(PedanticTypeCheckException):
genfunc()
def test_generator_bad_yield(self):
@pedantic
def genfunc_1() -> Generator[int, str, None]:
yield 'foo'
@pedantic
def genfunc_2() -> Iterable[int]:
yield 'foo'
@pedantic
def genfunc_3() -> Iterator[int]:
yield 'foo'
gen = genfunc_1()
with self.assertRaises(PedanticTypeCheckException):
next(gen)
gen = genfunc_2()
with self.assertRaises(PedanticTypeCheckException):
next(gen)
gen = genfunc_3()
with self.assertRaises(PedanticTypeCheckException):
next(gen)
def test_generator_bad_send(self):
@pedantic
def genfunc() -> Generator[int, str, None]:
yield 1
yield 2
gen = genfunc()
next(gen)
with self.assertRaises(PedanticTypeCheckException):
gen.send(2)
def test_generator_bad_return(self):
@pedantic
def genfunc() -> Generator[int, str, str]:
yield 1
return 6
gen = genfunc()
next(gen)
with self.assertRaises(PedanticTypeCheckException):
gen.send('foo')
def test_return_generator(self):
@pedantic
def genfunc() -> Generator[int, None, None]:
yield 1
@pedantic
def foo() -> Generator[int, None, None]:
return genfunc()
foo()
def test_local_class(self):
@pedantic_class
class LocalClass:
class Inner:
pass
def create_inner(self) -> 'Inner':
return self.Inner()
retval = LocalClass().create_inner()
assert isinstance(retval, LocalClass.Inner)
def test_local_class_async(self):
@pedantic_class
class LocalClass:
class Inner:
pass
async def create_inner(self) -> 'Inner':
return self.Inner()
coro = LocalClass().create_inner()
with self.assertRaises(StopIteration):
coro.send(None)
def test_callable_nonmember(self):
class CallableClass:
def __call__(self):
pass
@pedantic_class
class LocalClass:
some_callable = CallableClass()
def test_inherited_class_method(self):
@pedantic_class
class Parent:
@classmethod
def foo(cls, x: str) -> str:
return cls.__name__
@pedantic_class
class Child(Parent):
pass
self.assertEqual('Parent', Child.foo(x='bar'))
with self.assertRaises(PedanticTypeCheckException):
Child.foo(x=1)
def test_type_var_forward_ref_bound(self):
TBound = TypeVar('TBound', bound='Parent')
@pedantic
def func(x: TBound) -> None:
pass
func(x=Parent())
with self.assertRaises(PedanticTypeCheckException):
func(x='foo')
def test_noreturn(self):
@pedantic
def foo() -> NoReturn:
pass
with self.assertRaises(PedanticTypeCheckException):
foo()
def test_literal(self):
if sys.version_info < (3, 8):
return
from typing import Literal
@pedantic
def foo(a: Literal[1, True, 'x', b'y', 404]) -> None:
print(a)
foo(a=404)
foo(a=True)
foo(a='x')
with self.assertRaises(PedanticTypeCheckException):
foo(a=4)
def test_literal_union(self):
if sys.version_info < (3, 8):
return
from typing import Literal
@pedantic
def foo(a: Union[str, Literal[1, 6, 8]]) -> None:
print(a)
foo(a=6)
with self.assertRaises(PedanticTypeCheckException):
foo(a=4)
def test_literal_illegal_value(self):
if sys.version_info < (3, 8):
return
from typing import Literal
@pedantic
def foo(a: Literal[1, 1.1]) -> None:
print(a)
with self.assertRaises(PedanticTypeCheckException):
foo(a=4)
def test_enum(self):
with self.assertRaises(PedanticTypeCheckException):
@pedantic_class
class MyEnum(Enum):
A = 'a'
def test_enum_aggregate(self):
T = TypeVar('T', bound=IntEnum)
@pedantic_class
class EnumAggregate(Generic[T]):
enum: ClassVar[Type[T]]
def __init__(self, value: Union[int, str, List[T]]) -> None:
assert len(self.enum) < 10
if value == '':
raise ValueError(f'Parameter "value" cannot be empty!')
if isinstance(value, list):
self._value = ''.join([str(x.value) for x in value])
else:
self._value = str(value)
self._value = ''.join(sorted(self._value)) # sort characters in string
self.to_list() # check if is valid
def __contains__(self, item: T) -> bool:
return item in self.to_list()
def __eq__(self, other: Union['EnumAggregate', str]) -> bool:
if isinstance(other, str):
return self._value == other
return self._value == other._value
def __str__(self) -> str:
return self._value
def to_list(self) -> List[T]:
return [self.enum(int(character)) for character in self._value]
@property
def value(self) -> str:
return self._value
@classmethod
def all(cls) -> str:
return ''.join([str(x.value) for x in cls.enum])
class Gender(IntEnum):
MALE = 1
FEMALE = 2
DIVERS = 3
@pedantic_class
class Genders(EnumAggregate[Gender]):
enum = Gender
Genders(value=12)
with self.assertRaises(PedanticTypeCheckException):
Genders(value=Child()) | [
"datetime.datetime",
"io.BytesIO",
"functools.wraps",
"typing.NewType",
"datetime.date",
"io.StringIO",
"typing.NamedTuple",
"typing.TypeVar"
] | [((24263, 24285), 'typing.NewType', 'NewType', (['"""UserId"""', 'int'], {}), "('UserId', int)\n", (24270, 24285), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((24705, 24727), 'typing.NewType', 'NewType', (['"""UserId"""', 'int'], {}), "('UserId', int)\n", (24712, 24727), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((25324, 25336), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (25331, 25336), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((25495, 25507), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (25502, 25507), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((25769, 25781), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (25776, 25781), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((37239, 37251), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (37246, 37251), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((39566, 39618), 'typing.NamedTuple', 'NamedTuple', (['"""Employee"""', "[('name', str), ('id', int)]"], {}), "('Employee', [('name', str), ('id', int)])\n", (39576, 39618), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((39803, 39855), 'typing.NamedTuple', 'NamedTuple', (['"""Employee"""', "[('name', str), ('id', int)]"], {}), "('Employee', [('name', str), ('id', int)])\n", (39813, 39855), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((39876, 39933), 'typing.NamedTuple', 'NamedTuple', (['"""Employee"""', "[('firstname', str), ('id', int)]"], {}), "('Employee', [('firstname', str), ('id', int)])\n", (39886, 39933), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((40184, 40236), 'typing.NamedTuple', 'NamedTuple', (['"""Employee"""', "[('name', str), ('id', int)]"], {}), "('Employee', [('name', str), ('id', int)])\n", (40194, 40236), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((40482, 40534), 'typing.NamedTuple', 'NamedTuple', (['"""Employee"""', "[('name', str), ('id', int)]"], {}), "('Employee', [('name', str), ('id', int)])\n", (40492, 40534), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((40790, 40842), 'typing.NamedTuple', 'NamedTuple', (['"""Employee"""', "[('name', str), ('id', int)]"], {}), "('Employee', [('name', str), ('id', int)])\n", (40800, 40842), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((41997, 42019), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'int', 'str'], {}), "('T', int, str)\n", (42004, 42019), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((42292, 42321), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'int', 'Collection'], {}), "('T', int, Collection)\n", (42299, 42321), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((42546, 42568), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'int', 'str'], {}), "('T', int, str)\n", (42553, 42568), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((42781, 42807), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': 'Parent'}), "('T', bound=Parent)\n", (42788, 42807), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((42969, 42994), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': 'Child'}), "('T', bound=Child)\n", (42976, 42994), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((43226, 43248), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'int', 'str'], {}), "('T', int, str)\n", (43233, 43248), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((43463, 43491), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'covariant': '(True)'}), "('T', covariant=True)\n", (43470, 43491), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((43658, 43686), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'covariant': '(True)'}), "('T', covariant=True)\n", (43665, 43686), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((43922, 43954), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'contravariant': '(True)'}), "('T', contravariant=True)\n", (43929, 43954), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((44125, 44157), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'contravariant': '(True)'}), "('T', contravariant=True)\n", (44132, 44157), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((46784, 46800), 'typing.TypeVar', 'TypeVar', (['"""T_Foo"""'], {}), "('T_Foo')\n", (46791, 46800), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((47025, 47046), 'typing.NewType', 'NewType', (['"""myint"""', 'int'], {}), "('myint', int)\n", (47032, 47046), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((48537, 48561), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'int', 'float'], {}), "('T', int, float)\n", (48544, 48561), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((60658, 60691), 'typing.TypeVar', 'TypeVar', (['"""TBound"""'], {'bound': '"""Parent"""'}), "('TBound', bound='Parent')\n", (60665, 60691), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((62265, 62292), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': 'IntEnum'}), "('T', bound=IntEnum)\n", (62272, 62292), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((30852, 30863), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (30857, 30863), False, 'from functools import wraps\n'), ((44745, 44756), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (44750, 44756), False, 'from functools import wraps\n'), ((32987, 33007), 'datetime.datetime', 'datetime', (['(1995)', '(2)', '(5)'], {}), '(1995, 2, 5)\n', (32995, 33007), False, 'from datetime import datetime, date\n'), ((33011, 33027), 'datetime.date', 'date', (['(1987)', '(8)', '(7)'], {}), '(1987, 8, 7)\n', (33015, 33027), False, 'from datetime import datetime, date\n'), ((33043, 33063), 'datetime.datetime', 'datetime', (['(1995)', '(2)', '(5)'], {}), '(1995, 2, 5)\n', (33051, 33063), False, 'from datetime import datetime, date\n'), ((33067, 33087), 'datetime.datetime', 'datetime', (['(1987)', '(8)', '(7)'], {}), '(1987, 8, 7)\n', (33075, 33087), False, 'from datetime import datetime, date\n'), ((47552, 47561), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (47559, 47561), False, 'from io import BytesIO, StringIO\n'), ((47682, 47692), 'io.StringIO', 'StringIO', ([], {}), '()\n', (47690, 47692), False, 'from io import BytesIO, StringIO\n'), ((33187, 33203), 'datetime.date', 'date', (['(1995)', '(2)', '(5)'], {}), '(1995, 2, 5)\n', (33191, 33203), False, 'from datetime import datetime, date\n'), ((33207, 33223), 'datetime.date', 'date', (['(1987)', '(8)', '(7)'], {}), '(1987, 8, 7)\n', (33211, 33223), False, 'from datetime import datetime, date\n'), ((47884, 47893), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (47891, 47893), False, 'from io import BytesIO, StringIO\n'), ((48085, 48095), 'io.StringIO', 'StringIO', ([], {}), '()\n', (48093, 48095), False, 'from io import BytesIO, StringIO\n'), ((53207, 53229), 'typing.TypeVar', 'TypeVar', (['"""UnboundType"""'], {}), "('UnboundType')\n", (53214, 53229), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n'), ((53307, 53341), 'typing.TypeVar', 'TypeVar', (['"""BoundType"""'], {'bound': 'Parent'}), "('BoundType', bound=Parent)\n", (53314, 53341), False, 'from typing import List, Tuple, Callable, Any, Optional, Union, Dict, Set, FrozenSet, NewType, TypeVar, Sequence, AbstractSet, Iterator, NamedTuple, Collection, Type, Generator, Generic, BinaryIO, TextIO, Iterable, Container, NoReturn, ClassVar\n')] |
# Generated by Django 2.1.7 on 2019-07-06 04:48
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('jobs', '0004_auto_20190706_0012'),
]
operations = [
migrations.AddField(
model_name='job',
name='date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='Date'),
preserve_default=False,
),
]
| [
"django.db.models.DateTimeField"
] | [((355, 431), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'verbose_name': '"""Date"""'}), "(default=django.utils.timezone.now, verbose_name='Date')\n", (375, 431), False, 'from django.db import migrations, models\n')] |
import os
from app import create_app
from dotenv import load_dotenv
# .env
dotenv_path = os.path.join(os.path.dirname(__file__), ".env")
if os.path.exists(dotenv_path):
load_dotenv(dotenv_path)
app = create_app(os.environ.get("FLASK_CONFIG") or "default")
if __name__ == "__main__":
app.run() | [
"os.path.dirname",
"os.path.exists",
"os.environ.get",
"dotenv.load_dotenv"
] | [((142, 169), 'os.path.exists', 'os.path.exists', (['dotenv_path'], {}), '(dotenv_path)\n', (156, 169), False, 'import os\n'), ((104, 129), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (119, 129), False, 'import os\n'), ((175, 199), 'dotenv.load_dotenv', 'load_dotenv', (['dotenv_path'], {}), '(dotenv_path)\n', (186, 199), False, 'from dotenv import load_dotenv\n'), ((218, 248), 'os.environ.get', 'os.environ.get', (['"""FLASK_CONFIG"""'], {}), "('FLASK_CONFIG')\n", (232, 248), False, 'import os\n')] |
import os
import tensorflow as tf
import numpy as np
import mcubes
from ops import *
class ZGenerator:
def __init__(self, sess, z_dim=128, ef_dim=32, gf_dim=128, dataset_name=None):
self.sess = sess
self.input_size = 64
self.z_dim = z_dim
self.ef_dim = ef_dim
self.gf_dim = gf_dim
self.dataset_name = dataset_name
self.real_size = 64
self.test_size = 32
self.batch_size = self.test_size*self.test_size*self.test_size
self.build_model()
def build_model(self):
self.z_vector = tf.placeholder(shape=[1,self.z_dim], dtype=tf.float32)
self.point_coord = tf.placeholder(shape=[self.batch_size,3], dtype=tf.float32)
self.point_value = tf.placeholder(shape=[self.batch_size,1], dtype=tf.float32)
self.zG = self.generator(self.point_coord, self.z_vector, phase_train=True, reuse=False)
self.loss = tf.reduce_mean(tf.square(self.point_value - self.zG))
self.saver = tf.train.Saver(max_to_keep=10)
def generator(self, points, z, phase_train=True, reuse=False):
with tf.variable_scope('simple_net') as scope:
if reuse:
scope.reuse_variables()
zs = tf.tile(z, [self.batch_size,1])
pointz = tf.concat([points,zs],1)
h1 = lrelu(linear(pointz, self.gf_dim*16, 'h1_lin'))
h1 = tf.concat([h1,pointz],1)
h2 = lrelu(linear(h1, self.gf_dim*8, 'h4_lin'))
h2 = tf.concat([h2,pointz],1)
h3 = lrelu(linear(h2, self.gf_dim*4, 'h5_lin'))
h3 = tf.concat([h3,pointz],1)
h4 = lrelu(linear(h3, self.gf_dim*2, 'h6_lin'))
h4 = tf.concat([h4,pointz],1)
h5 = lrelu(linear(h4, self.gf_dim, 'h7_lin'))
h6 = tf.nn.sigmoid(linear(h5, 1, 'h8_lin'))
return tf.reshape(h6, [self.batch_size,1])
def test(self, checkpoint_dir, batch_z, dim=64):
could_load, checkpoint_counter = self.load(checkpoint_dir)
if could_load:
print(' [*] Load SUCCESS')
else:
print(' [!] Load failed...')
return
dima = self.test_size
multiplier = int(dim/dima)
multiplier2 = multiplier*multiplier
multiplier3 = multiplier*multiplier*multiplier
aux_x = np.zeros([dima,dima,dima],np.int32)
aux_y = np.zeros([dima,dima,dima],np.int32)
aux_z = np.zeros([dima,dima,dima],np.int32)
for i in range(dima):
for j in range(dima):
for k in range(dima):
aux_x[i,j,k] = i*multiplier
aux_y[i,j,k] = j*multiplier
aux_z[i,j,k] = k*multiplier
coords = np.zeros([multiplier3,dima,dima,dima,3],np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
coords[i*multiplier2+j*multiplier+k,:,:,:,0] = aux_x+i
coords[i*multiplier2+j*multiplier+k,:,:,:,1] = aux_y+j
coords[i*multiplier2+j*multiplier+k,:,:,:,2] = aux_z+k
coords = (coords+0.5)/dim*2.0-1.0
coords = np.reshape(coords,[multiplier3,self.batch_size,3])
for t in range(batch_z.shape[0]):
model_float = np.zeros([dim+2,dim+2,dim+2],np.float32)
for i in range(multiplier):
for j in range(multiplier):
for k in range(multiplier):
minib = i*multiplier2+j*multiplier+k
model_out = self.sess.run(self.zG,
feed_dict={
self.z_vector: batch_z[t:t+1],
self.point_coord: coords[minib],
})
model_float[aux_x+i+1,aux_y+j+1,aux_z+k+1] = np.reshape(model_out, [dima,dima,dima])
thres = 0.2
vertices, triangles = mcubes.marching_cubes(model_float, thres)
return vertices, triangles
def load(self, checkpoint_dir):
import re
print(' [*] Reading checkpoints...')
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(next(re.finditer('(\d+)(?!.*\d)',ckpt_name)).group(0))
print(' [*] Success to read {}'.format(ckpt_name))
return True, counter
else:
print(' [*] Failed to find a checkpoint')
return False, 0
| [
"tensorflow.tile",
"numpy.reshape",
"tensorflow.variable_scope",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.train.Saver",
"os.path.join",
"mcubes.marching_cubes",
"tensorflow.train.get_checkpoint_state",
"tensorflow.concat",
"numpy.zeros",
"os.path.basename",
"re.finditer",
... | [((587, 642), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[1, self.z_dim]', 'dtype': 'tf.float32'}), '(shape=[1, self.z_dim], dtype=tf.float32)\n', (601, 642), True, 'import tensorflow as tf\n'), ((669, 729), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[self.batch_size, 3]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, 3], dtype=tf.float32)\n', (683, 729), True, 'import tensorflow as tf\n'), ((756, 816), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[self.batch_size, 1]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, 1], dtype=tf.float32)\n', (770, 816), True, 'import tensorflow as tf\n'), ((1035, 1065), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(10)'}), '(max_to_keep=10)\n', (1049, 1065), True, 'import tensorflow as tf\n'), ((2374, 2412), 'numpy.zeros', 'np.zeros', (['[dima, dima, dima]', 'np.int32'], {}), '([dima, dima, dima], np.int32)\n', (2382, 2412), True, 'import numpy as np\n'), ((2426, 2464), 'numpy.zeros', 'np.zeros', (['[dima, dima, dima]', 'np.int32'], {}), '([dima, dima, dima], np.int32)\n', (2434, 2464), True, 'import numpy as np\n'), ((2478, 2516), 'numpy.zeros', 'np.zeros', (['[dima, dima, dima]', 'np.int32'], {}), '([dima, dima, dima], np.int32)\n', (2486, 2516), True, 'import numpy as np\n'), ((2777, 2833), 'numpy.zeros', 'np.zeros', (['[multiplier3, dima, dima, dima, 3]', 'np.float32'], {}), '([multiplier3, dima, dima, dima, 3], np.float32)\n', (2785, 2833), True, 'import numpy as np\n'), ((3233, 3286), 'numpy.reshape', 'np.reshape', (['coords', '[multiplier3, self.batch_size, 3]'], {}), '(coords, [multiplier3, self.batch_size, 3])\n', (3243, 3286), True, 'import numpy as np\n'), ((4211, 4256), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (4240, 4256), True, 'import tensorflow as tf\n'), ((966, 1003), 'tensorflow.square', 'tf.square', (['(self.point_value - self.zG)'], {}), '(self.point_value - self.zG)\n', (975, 1003), True, 'import tensorflow as tf\n'), ((1147, 1178), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""simple_net"""'], {}), "('simple_net')\n", (1164, 1178), True, 'import tensorflow as tf\n'), ((1269, 1301), 'tensorflow.tile', 'tf.tile', (['z', '[self.batch_size, 1]'], {}), '(z, [self.batch_size, 1])\n', (1276, 1301), True, 'import tensorflow as tf\n'), ((1322, 1348), 'tensorflow.concat', 'tf.concat', (['[points, zs]', '(1)'], {}), '([points, zs], 1)\n', (1331, 1348), True, 'import tensorflow as tf\n'), ((1430, 1456), 'tensorflow.concat', 'tf.concat', (['[h1, pointz]', '(1)'], {}), '([h1, pointz], 1)\n', (1439, 1456), True, 'import tensorflow as tf\n'), ((1533, 1559), 'tensorflow.concat', 'tf.concat', (['[h2, pointz]', '(1)'], {}), '([h2, pointz], 1)\n', (1542, 1559), True, 'import tensorflow as tf\n'), ((1636, 1662), 'tensorflow.concat', 'tf.concat', (['[h3, pointz]', '(1)'], {}), '([h3, pointz], 1)\n', (1645, 1662), True, 'import tensorflow as tf\n'), ((1739, 1765), 'tensorflow.concat', 'tf.concat', (['[h4, pointz]', '(1)'], {}), '([h4, pointz], 1)\n', (1748, 1765), True, 'import tensorflow as tf\n'), ((1899, 1935), 'tensorflow.reshape', 'tf.reshape', (['h6', '[self.batch_size, 1]'], {}), '(h6, [self.batch_size, 1])\n', (1909, 1935), True, 'import tensorflow as tf\n'), ((3353, 3402), 'numpy.zeros', 'np.zeros', (['[dim + 2, dim + 2, dim + 2]', 'np.float32'], {}), '([dim + 2, dim + 2, dim + 2], np.float32)\n', (3361, 3402), True, 'import numpy as np\n'), ((4013, 4054), 'mcubes.marching_cubes', 'mcubes.marching_cubes', (['model_float', 'thres'], {}), '(model_float, thres)\n', (4034, 4054), False, 'import mcubes\n'), ((4329, 4373), 'os.path.basename', 'os.path.basename', (['ckpt.model_checkpoint_path'], {}), '(ckpt.model_checkpoint_path)\n', (4345, 4373), False, 'import os\n'), ((4416, 4455), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'ckpt_name'], {}), '(checkpoint_dir, ckpt_name)\n', (4428, 4455), False, 'import os\n'), ((3914, 3955), 'numpy.reshape', 'np.reshape', (['model_out', '[dima, dima, dima]'], {}), '(model_out, [dima, dima, dima])\n', (3924, 3955), True, 'import numpy as np\n'), ((4488, 4529), 're.finditer', 're.finditer', (['"""(\\\\d+)(?!.*\\\\d)"""', 'ckpt_name'], {}), "('(\\\\d+)(?!.*\\\\d)', ckpt_name)\n", (4499, 4529), False, 'import re\n')] |
import pytest
from faker import Faker
from fastapi.encoders import jsonable_encoder
from pydantic.types import SecretStr
from sqlalchemy.orm import Session
from app import crud, schemas
from app.core import security
def test_create_user(db: Session) -> None:
faker = Faker()
profile = faker.profile()
email = profile.get("mail", None)
username = profile.get("username", None)
password = "<PASSWORD>"
user_in = schemas.UserCreate(
username=username, email=email, password=SecretStr(password)
)
user = crud.user.create(db=db, obj_in=user_in)
assert user.email == email
assert user.username == username
assert hasattr(user, "hashed_password")
assert security.verify_password(SecretStr(password), user.hashed_password)
def test_authenticate_user_success(db: Session) -> None:
faker = Faker()
profile = faker.profile()
email = profile.get("mail", None)
username = profile.get("username", None)
password = "<PASSWORD>"
user_in = schemas.UserCreate(
username=username, email=email, password=SecretStr(password)
)
user = crud.user.create(db=db, obj_in=user_in)
wrong_email = email + "xxx"
authenticated_user = crud.user.authenticate(
db, email=wrong_email, password=SecretStr(password)
)
assert not authenticated_user
wrong_password = password + "<PASSWORD>"
authenticated_user = crud.user.authenticate(
db, email=email, password=SecretStr(wrong_password)
)
assert not authenticated_user
authenticated_user = crud.user.authenticate(
db, email=email, password=SecretStr(password)
)
assert authenticated_user
assert user.email == authenticated_user.email
@pytest.mark.parametrize("search_by", ("email", "username", "id"))
def test_get_user_by(db: Session, search_by: str) -> None:
faker = Faker()
profile = faker.profile()
email = profile.get("mail", None)
username = profile.get("username", None)
password = "<PASSWORD>"
user_in = schemas.UserCreate(
username=username, email=email, password=SecretStr(password)
)
user = crud.user.create(db=db, obj_in=user_in)
func_name = f"get_user_by_{search_by}"
func = getattr(crud.user, func_name)
user_2 = func(db, getattr(user, search_by))
assert user_2
assert user.email == user_2.email
assert jsonable_encoder(user) == jsonable_encoder(user_2)
| [
"app.crud.user.create",
"pydantic.types.SecretStr",
"faker.Faker",
"pytest.mark.parametrize",
"fastapi.encoders.jsonable_encoder"
] | [((1726, 1791), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""search_by"""', "('email', 'username', 'id')"], {}), "('search_by', ('email', 'username', 'id'))\n", (1749, 1791), False, 'import pytest\n'), ((274, 281), 'faker.Faker', 'Faker', ([], {}), '()\n', (279, 281), False, 'from faker import Faker\n'), ((544, 583), 'app.crud.user.create', 'crud.user.create', ([], {'db': 'db', 'obj_in': 'user_in'}), '(db=db, obj_in=user_in)\n', (560, 583), False, 'from app import crud, schemas\n'), ((846, 853), 'faker.Faker', 'Faker', ([], {}), '()\n', (851, 853), False, 'from faker import Faker\n'), ((1116, 1155), 'app.crud.user.create', 'crud.user.create', ([], {'db': 'db', 'obj_in': 'user_in'}), '(db=db, obj_in=user_in)\n', (1132, 1155), False, 'from app import crud, schemas\n'), ((1863, 1870), 'faker.Faker', 'Faker', ([], {}), '()\n', (1868, 1870), False, 'from faker import Faker\n'), ((2133, 2172), 'app.crud.user.create', 'crud.user.create', ([], {'db': 'db', 'obj_in': 'user_in'}), '(db=db, obj_in=user_in)\n', (2149, 2172), False, 'from app import crud, schemas\n'), ((732, 751), 'pydantic.types.SecretStr', 'SecretStr', (['password'], {}), '(password)\n', (741, 751), False, 'from pydantic.types import SecretStr\n'), ((2372, 2394), 'fastapi.encoders.jsonable_encoder', 'jsonable_encoder', (['user'], {}), '(user)\n', (2388, 2394), False, 'from fastapi.encoders import jsonable_encoder\n'), ((2398, 2422), 'fastapi.encoders.jsonable_encoder', 'jsonable_encoder', (['user_2'], {}), '(user_2)\n', (2414, 2422), False, 'from fastapi.encoders import jsonable_encoder\n'), ((507, 526), 'pydantic.types.SecretStr', 'SecretStr', (['password'], {}), '(password)\n', (516, 526), False, 'from pydantic.types import SecretStr\n'), ((1079, 1098), 'pydantic.types.SecretStr', 'SecretStr', (['password'], {}), '(password)\n', (1088, 1098), False, 'from pydantic.types import SecretStr\n'), ((1278, 1297), 'pydantic.types.SecretStr', 'SecretStr', (['password'], {}), '(password)\n', (1287, 1297), False, 'from pydantic.types import SecretStr\n'), ((1467, 1492), 'pydantic.types.SecretStr', 'SecretStr', (['wrong_password'], {}), '(wrong_password)\n', (1476, 1492), False, 'from pydantic.types import SecretStr\n'), ((1617, 1636), 'pydantic.types.SecretStr', 'SecretStr', (['password'], {}), '(password)\n', (1626, 1636), False, 'from pydantic.types import SecretStr\n'), ((2096, 2115), 'pydantic.types.SecretStr', 'SecretStr', (['password'], {}), '(password)\n', (2105, 2115), False, 'from pydantic.types import SecretStr\n')] |
import os
os.environ["PYRO_LOGFILE"] = "pyro.log"
os.environ["PYRO_LOGLEVEL"] = "DEBUG"
import Pyro4
import Pyro4.util
import Pyro4.naming
import sys
import pprint
"""
Front end controller for the 2017/18 Networks and Distributed Systems
Summative Assignment.
Author: Z0954757
"""
sys.excepthook = Pyro4.util.excepthook
pp = pprint.PrettyPrinter()
def main():
"""Main function to initiate the front end, expose it to the network, and
find any servers.
"""
frontend = FrontEnd()
frontend.find_servers()
with Pyro4.Daemon() as daemon:
frontend_uri = daemon.register(frontend)
with Pyro4.locateNS() as ns:
ns.register("filesystem.frontend", frontend_uri)
print("Frontend available.")
daemon.requestLoop()
@Pyro4.expose
class FrontEnd(object):
"""Class to represent the front end controller. This class accepts
connections from a client application, decides the appropriate action to
perform, dispatches commands to the servers on the file system.
"""
def __init__(self):
self.active_servers = []
def find_servers(self):
"""Method to find any servers existing on the network using the Pryo
Naming Server to lookup servers.
"""
with Pyro4.locateNS() as ns:
for server, server_uri in ns.list(prefix="filesystem.fileserver.").items():
print("Found server at: {0}".format(server))
self.active_servers.append(Pyro4.Proxy(server_uri))
if not self.active_servers:
raise ValueError("No servers found! (Have you started the servers first?)")
def connect_client(self,client_name):
"""Method called by the client to initiate a connection between the two.
"""
print("Client {0} connected.".format(client_name))
return "Hello {0}, you are now connected to the file system.".format(client_name)
def list_all(self):
"""Method called by the client list all the files on the file system.
Queries all currently connected servers for files and returns them as a
single list to the client, removing duplicate instances where a file is
sotred on multiple servers.
"""
raw_file_list = []
for server in self.active_servers:
server_contents = server.list_contents()
raw_file_list.append(server_contents)
flat_file_list = [item for sublist in raw_file_list for item in sublist]
#remove duplicates
file_list = list(set(flat_file_list))
return file_list
def delete_file(self, file_name):
"""Method called by the client to delete a file stored on the system.
Queries all currently connected servers and deletes the file if it
exists there, ensuringthat the file is removed on all servers.
"""
print("Deleting file: {0}".format(file_name))
deleted = False
print("Searching for file on servers...")
for server in self.active_servers:
server_contents = server.list_contents()
if file_name in server_contents:
print("Found file on server.")
response = server.delete_file(file_name)
if response == "File deleted.":
deleted = True
elif response == "File not found on server.":
continue
if deleted == True:
return "File deleted."
else:
return "File not found on file system."
def upload_file_low(self, file_name):
"""Method called by the client to upload a file in the low reliability
mode whereby the file is uploaded to the server with the fewest files.
"""
print("Starting upload sequence.")
print("Checking if file exists on system.")
file_list = self.list_all()
if file_name in file_list:
return "File already exists on system."
else:
print("No matching file on system")
print("Low reliability upload.")
print("Looking for least full server.")
server_least_files = (self.active_servers[0], len(self.active_servers[0].list_contents()))
for i in range(1, len(self.active_servers)):
server = self.active_servers[i]
server_no_files = len(server.list_contents())
if server_least_files[1] > server_no_files:
server_least_files = (server, server_no_files)
print("Preparing server for upload process: server_{0}".format(server_least_files[0].get_name()))
response = server_least_files[0].init_upload(file_name)
if response == "Failed to initate server, see server log for details.":
print(response)
return response
else:
print(response)
return response
def upload_file_high(self, file_name, status):
"""Method called by the client to upload a file in high reliability
mode whereby the file is uploaded to all servers attached to the system.
"""
if status == 'start':
print("High reliability upload process started.")
no_servers = len(self.active_servers)
return no_servers
else:
response = self.active_servers[status].init_upload_high(file_name)
return response
def download_file(self, file_name):
"""Method called by the client to download a file from the system.
Searches the active servers and initiates the download from the first
it finds containing the specified file.
"""
print("Starting download process.")
print("Checking if file exists on system.")
file_list = self.list_all()
if file_name not in file_list:
return "File not on system. Use LIST to check available files."
else:
print("Looking for server containing file.")
for server in self.active_servers:
if file_name in server.list_contents():
print("Found file, readying server.")
response = server.init_download()
return response
if __name__ == "__main__":
main()
| [
"Pyro4.Daemon",
"Pyro4.locateNS",
"Pyro4.Proxy",
"pprint.PrettyPrinter"
] | [((329, 351), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), '()\n', (349, 351), False, 'import pprint\n'), ((538, 552), 'Pyro4.Daemon', 'Pyro4.Daemon', ([], {}), '()\n', (550, 552), False, 'import Pyro4\n'), ((626, 642), 'Pyro4.locateNS', 'Pyro4.locateNS', ([], {}), '()\n', (640, 642), False, 'import Pyro4\n'), ((1274, 1290), 'Pyro4.locateNS', 'Pyro4.locateNS', ([], {}), '()\n', (1288, 1290), False, 'import Pyro4\n'), ((1490, 1513), 'Pyro4.Proxy', 'Pyro4.Proxy', (['server_uri'], {}), '(server_uri)\n', (1501, 1513), False, 'import Pyro4\n')] |
import unittest
from kleat.hexamer.search import plus_search, minus_search, search
from kleat.hexamer.hexamer import extract_seq
class TestSearchHexamer(unittest.TestCase):
def test_plus_search(self):
self.assertEqual(plus_search('GGGAATAAAG', 9), ('AATAAA', 16, 3))
self.assertEqual(plus_search('GGGAATAAA', 9), ('AATAAA', 16, 4))
self.assertEqual(plus_search('GGGAATAAAGG', 9), ('AATAAA', 16, 2))
self.assertEqual(plus_search('GGGATTAAAGG', 9), ('ATTAAA', 15, 2))
self.assertEqual(plus_search('GGGAATAA', 9), None)
self.assertEqual(plus_search('GAATAAAC', 10), ('AATAAA', 16, 4))
self.assertEqual(plus_search('GGGGCTAC', 20), ('GGGGCT', 1, 13))
self.assertEqual(plus_search('GTTTATTC', 6), None)
def test_plus_search_lowercase(self):
seq = 'GAATaaaC'
# 4567890
# 1
self.assertEqual(plus_search(seq, 10), ('AATAAA', 16, 4))
def test_plus_search_take_right_most_hexamer(self):
self.assertEqual(plus_search('CAATAAANAATAAAC', 200), ('AATAAA', 16, 194))
def test_plus_search_take_right_most_hexamer_with_Ns(self):
self.assertEqual(plus_search('GCATTAAAAATNAAC', 200), ('ATTAAA', 15, 188))
def test_plus_search_take_the_strongest_hexamer(self):
self.assertEqual(plus_search('GCAATAAAATTAAAC', 200), ('AATAAA', 16, 188))
def test_minus_search(self):
seq = 'ATTTATTCCC'
# 90123456789 <- one coord
# 1 <- ten coord
self.assertEqual(minus_search(seq, 9), ('AATAAA', 16, 15))
seq = 'ATTTAATCCC'
# 90123456789 <- one coord
# 1 <- ten coord
self.assertEqual(minus_search(seq, 9), ('ATTAAA', 15, 15))
self.assertEqual(minus_search('GTTTATTC', 1), ('AATAAA', 16, 7))
self.assertEqual(minus_search('ATCGTATATTGC', 5), ('AATATA', 10, 14))
def test_minus_search_lowercase(self):
self.assertEqual(minus_search('GTttattc', 1), ('AATAAA', 16, 7))
def test_minus_search_take_left_most_hexamer(self):
self.assertEqual(minus_search('GTTTATTTTTATTCG', 10), ('AATAAA', 16, 16))
def test_minus_search_take_left_most_hexamer_with_Ns(self):
self.assertEqual(minus_search('GTTTATTNTTTATTNNNTGTATTCG', 10), ('AATAAA', 16, 16))
def test_minus_search_take_the_strongest_hexamer(self):
self.assertEqual(minus_search('GTTTAATNTTTATTNNNTGTATTCG', 20), ('AATAAA', 16, 33))
def test_minus_search_take_the_strongest_hexamer_in_lower_case(self):
self.assertEqual(minus_search('gtttaatntttattnnntgtattcg', 20), ('AATAAA', 16, 33))
class TestSearch(unittest.TestCase):
def test_plus_strand(self):
"""
CaataaaGT
0123456789 <-genome coord
| |
PAS clv
"""
seq = 'CaataaaGT'
clv = 9
self.assertEqual(search('+', clv, seq, 50), ('AATAAA', 16, 2))
def test_minus_strand(self):
"""
GGTTTATT
0123456789 <-genome coord
| |
clv PAS
"""
seq = 'GGTTTATT'
clv = 1
self.assertEqual(search('-', clv, seq, 50), ('AATAAA', 16, 8))
# Good drawing example, utilize them later
# def test_extract_seq_where_for_plus_strand_clv_supported_by_suffix():
# """
# AATAAA AA <-tail of suffix contig
# ACGG┘||||└CGGCC┘ <-suffix contig
# 0123456789012345 <-contig coord
# 1 |
# ...7890123456789012... <-genome coord
# 1 2|
# ^ref_clv
# """
# clv = 11
# strand = '+'
# contig = MagicMock()
# contig.query_sequence = 'ACGGAATAAACGGCCAA'
# contig.cigartuples = ((S.BAM_CMATCH, 15), (S.BAM_CSOFT_CLIP, 2))
# ref_fa = MagicMock()
# assert extract_seq(contig, strand, clv, ref_fa) == 'ACGGAATAAACGGCC'
# def test_extract_seq_where_for_minus_strand_clv_supported_by_suffix():
# """
# TTT TTTATT <-tail of suffix contig
# └AC┘||||└CGGC <-suffix contig
# 012345678901 <-contig coord
# | 1
# ...890123456789... <-genome coord
# | 1
# ^ref_clv
# """
# clv = 11
# strand = '+'
# contig = MagicMock()
# contig.query_sequence = 'TTACTTTATTCGC'
# contig.cigartuples = ((S.BAM_CMATCH, 15), (S.BAM_CSOFT_CLIP, 2))
# ref_fa = MagicMock()
# assert extract_seq(contig, strand, clv, ref_fa) == 'ACTTTATTCGC'
| [
"kleat.hexamer.search.search",
"kleat.hexamer.search.minus_search",
"kleat.hexamer.search.plus_search"
] | [((233, 261), 'kleat.hexamer.search.plus_search', 'plus_search', (['"""GGGAATAAAG"""', '(9)'], {}), "('GGGAATAAAG', 9)\n", (244, 261), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((307, 334), 'kleat.hexamer.search.plus_search', 'plus_search', (['"""GGGAATAAA"""', '(9)'], {}), "('GGGAATAAA', 9)\n", (318, 334), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((380, 409), 'kleat.hexamer.search.plus_search', 'plus_search', (['"""GGGAATAAAGG"""', '(9)'], {}), "('GGGAATAAAGG', 9)\n", (391, 409), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((455, 484), 'kleat.hexamer.search.plus_search', 'plus_search', (['"""GGGATTAAAGG"""', '(9)'], {}), "('GGGATTAAAGG', 9)\n", (466, 484), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((530, 556), 'kleat.hexamer.search.plus_search', 'plus_search', (['"""GGGAATAA"""', '(9)'], {}), "('GGGAATAA', 9)\n", (541, 556), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((590, 617), 'kleat.hexamer.search.plus_search', 'plus_search', (['"""GAATAAAC"""', '(10)'], {}), "('GAATAAAC', 10)\n", (601, 617), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((663, 690), 'kleat.hexamer.search.plus_search', 'plus_search', (['"""GGGGCTAC"""', '(20)'], {}), "('GGGGCTAC', 20)\n", (674, 690), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((736, 762), 'kleat.hexamer.search.plus_search', 'plus_search', (['"""GTTTATTC"""', '(6)'], {}), "('GTTTATTC', 6)\n", (747, 762), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((911, 931), 'kleat.hexamer.search.plus_search', 'plus_search', (['seq', '(10)'], {}), '(seq, 10)\n', (922, 931), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((1034, 1069), 'kleat.hexamer.search.plus_search', 'plus_search', (['"""CAATAAANAATAAAC"""', '(200)'], {}), "('CAATAAANAATAAAC', 200)\n", (1045, 1069), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((1182, 1217), 'kleat.hexamer.search.plus_search', 'plus_search', (['"""GCATTAAAAATNAAC"""', '(200)'], {}), "('GCATTAAAAATNAAC', 200)\n", (1193, 1217), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((1325, 1360), 'kleat.hexamer.search.plus_search', 'plus_search', (['"""GCAATAAAATTAAAC"""', '(200)'], {}), "('GCAATAAAATTAAAC', 200)\n", (1336, 1360), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((1549, 1569), 'kleat.hexamer.search.minus_search', 'minus_search', (['seq', '(9)'], {}), '(seq, 9)\n', (1561, 1569), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((1723, 1743), 'kleat.hexamer.search.minus_search', 'minus_search', (['seq', '(9)'], {}), '(seq, 9)\n', (1735, 1743), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((1790, 1817), 'kleat.hexamer.search.minus_search', 'minus_search', (['"""GTTTATTC"""', '(1)'], {}), "('GTTTATTC', 1)\n", (1802, 1817), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((1863, 1894), 'kleat.hexamer.search.minus_search', 'minus_search', (['"""ATCGTATATTGC"""', '(5)'], {}), "('ATCGTATATTGC', 5)\n", (1875, 1894), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((1985, 2012), 'kleat.hexamer.search.minus_search', 'minus_search', (['"""GTttattc"""', '(1)'], {}), "('GTttattc', 1)\n", (1997, 2012), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((2115, 2150), 'kleat.hexamer.search.minus_search', 'minus_search', (['"""GTTTATTTTTATTCG"""', '(10)'], {}), "('GTTTATTTTTATTCG', 10)\n", (2127, 2150), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((2262, 2307), 'kleat.hexamer.search.minus_search', 'minus_search', (['"""GTTTATTNTTTATTNNNTGTATTCG"""', '(10)'], {}), "('GTTTATTNTTTATTNNNTGTATTCG', 10)\n", (2274, 2307), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((2415, 2460), 'kleat.hexamer.search.minus_search', 'minus_search', (['"""GTTTAATNTTTATTNNNTGTATTCG"""', '(20)'], {}), "('GTTTAATNTTTATTNNNTGTATTCG', 20)\n", (2427, 2460), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((2582, 2627), 'kleat.hexamer.search.minus_search', 'minus_search', (['"""gtttaatntttattnnntgtattcg"""', '(20)'], {}), "('gtttaatntttattnnntgtattcg', 20)\n", (2594, 2627), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((2904, 2929), 'kleat.hexamer.search.search', 'search', (['"""+"""', 'clv', 'seq', '(50)'], {}), "('+', clv, seq, 50)\n", (2910, 2929), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n'), ((3164, 3189), 'kleat.hexamer.search.search', 'search', (['"""-"""', 'clv', 'seq', '(50)'], {}), "('-', clv, seq, 50)\n", (3170, 3189), False, 'from kleat.hexamer.search import plus_search, minus_search, search\n')] |
import nuke
def delete_pt():
max_pts = int(nuke.thisNode().knob('Max PTS').value()) - 1
if max_pts < 2:
nuke.message('Minimum 2 points')
return
pt_num = int(nuke.thisKnob().name()[6:])
node = nuke.thisNode()
for pt in xrange(pt_num, max_pts):
knob_name = 'pt' + str(pt)
next_knob = 'pt' + str(pt + 1)
next_value = node.knob(next_knob).value()
node.knob(knob_name).setValue(next_value)
node.knob('pt' + str(max_pts)).setValue([0, 0])
for name in ('pt', 'delete', 'insert'):
node.knobs()[name + str(max_pts)].setVisible(False)
node.knob('Max PTS').setValue(max_pts)
def insert_pt():
max_pts = int(nuke.thisNode().knob('Max PTS').value())
MAX_POINTS = int(nuke.thisNode().knob('Max Limit').value())
if max_pts >= MAX_POINTS:
nuke.message('Maximum %i points' % (MAX_POINTS))
return
pt_num = int(nuke.thisKnob().name()[6:])
node = nuke.thisNode()
# Shuffle values upwards
for pt in xrange(max_pts, pt_num, -1):
knob_name = 'pt' + str(pt)
prev_knob = 'pt' + str(pt - 1)
prev_value = node.knob(prev_knob).value()
node.knob(knob_name).setValue(prev_value)
# Set new position to midpoint of adjacent points
if pt_num > 1:
ptA = node.knob('pt' + str(pt_num - 1)).value()
else:
ptA = node.knob('Start').value()
ptB = node.knob('pt' + str(pt_num + 1)).value()
midpoint = [sum(x) / 2 for x in zip(ptA, ptB)]
node.knob('pt' + str(pt_num)).setValue(midpoint)
# Reveal next row
for name in ('pt', 'delete', 'insert'):
node.knobs()[name + str(max_pts)].setVisible(True)
node.knob('Max PTS').setValue(max_pts + 1)
def add_pt():
max_pts = int(nuke.thisNode().knob('Max PTS').value())
MAX_POINTS = int(nuke.thisNode().knob('Max Limit').value())
if max_pts >= MAX_POINTS:
nuke.message('Maximum %i points' % (MAX_POINTS))
return
node = nuke.thisNode()
for name in ('pt', 'delete', 'insert'):
node.knobs()[name + str(max_pts)].setVisible(True)
node.knob('Max PTS').setValue(max_pts + 1)
def initialiseNode(node, max_num=4):
node.knob(node.name()).setLabel('Appearance')
knob_names = [x for x in node.knobs().keys() if x.startswith('pt')]
knob_names.sort(key=lambda x: int(x[2:]))
# Add new Tab for points
start_knob = node.knobs()['Start']
node.removeKnob(start_knob)
node.addKnob(nuke.Tab_Knob('Points'))
text = "Insert adds a point between its adjacent and previous point\nDelete removes the adjacent point\nAdd adds a point at the end"
node.addKnob(nuke.Text_Knob('info', '', text))
node.addKnob(nuke.Text_Knob('', ''))
node.addKnob(start_knob)
# Remove and store all pt knobs
knobs = []
for name in knob_names:
knob = node.knobs()[name]
knobs.append(knob)
node.removeKnob(knob)
# Add each back along with their delete and insert buttons
for knob in knobs:
num = knob.name()[2:]
delete = nuke.PyScript_Knob('delete' + num, 'Delete', "Lines_Callbacks.delete_pt()")
insert = nuke.PyScript_Knob('insert' + num, 'Insert', "Lines_Callbacks.insert_pt()")
# Hide knobs greater than the max value
if int(num) >= max_num:
knob.setVisible(False)
delete.setVisible(False)
insert.setVisible(False)
node.addKnob(knob)
node.addKnob(delete)
node.addKnob(insert)
# Add the Add knob
add_knob = nuke.PyScript_Knob('add_pt', 'Add', "Lines_Callbacks.add_pt()")
add_knob.setFlag(nuke.STARTLINE)
node.addKnob(add_knob)
node.knob('Max PTS').setValue(max_num)
node.knobs()['Max PTS'].setVisible(False)
node.knobs()['Max Limit'].setVisible(False)
| [
"nuke.message",
"nuke.Text_Knob",
"nuke.PyScript_Knob",
"nuke.thisNode",
"nuke.thisKnob",
"nuke.Tab_Knob"
] | [((228, 243), 'nuke.thisNode', 'nuke.thisNode', ([], {}), '()\n', (241, 243), False, 'import nuke\n'), ((962, 977), 'nuke.thisNode', 'nuke.thisNode', ([], {}), '()\n', (975, 977), False, 'import nuke\n'), ((1991, 2006), 'nuke.thisNode', 'nuke.thisNode', ([], {}), '()\n', (2004, 2006), False, 'import nuke\n'), ((3559, 3622), 'nuke.PyScript_Knob', 'nuke.PyScript_Knob', (['"""add_pt"""', '"""Add"""', '"""Lines_Callbacks.add_pt()"""'], {}), "('add_pt', 'Add', 'Lines_Callbacks.add_pt()')\n", (3577, 3622), False, 'import nuke\n'), ((123, 155), 'nuke.message', 'nuke.message', (['"""Minimum 2 points"""'], {}), "('Minimum 2 points')\n", (135, 155), False, 'import nuke\n'), ((841, 887), 'nuke.message', 'nuke.message', (["('Maximum %i points' % MAX_POINTS)"], {}), "('Maximum %i points' % MAX_POINTS)\n", (853, 887), False, 'import nuke\n'), ((1915, 1961), 'nuke.message', 'nuke.message', (["('Maximum %i points' % MAX_POINTS)"], {}), "('Maximum %i points' % MAX_POINTS)\n", (1927, 1961), False, 'import nuke\n'), ((2486, 2509), 'nuke.Tab_Knob', 'nuke.Tab_Knob', (['"""Points"""'], {}), "('Points')\n", (2499, 2509), False, 'import nuke\n'), ((2665, 2697), 'nuke.Text_Knob', 'nuke.Text_Knob', (['"""info"""', '""""""', 'text'], {}), "('info', '', text)\n", (2679, 2697), False, 'import nuke\n'), ((2716, 2738), 'nuke.Text_Knob', 'nuke.Text_Knob', (['""""""', '""""""'], {}), "('', '')\n", (2730, 2738), False, 'import nuke\n'), ((3075, 3150), 'nuke.PyScript_Knob', 'nuke.PyScript_Knob', (["('delete' + num)", '"""Delete"""', '"""Lines_Callbacks.delete_pt()"""'], {}), "('delete' + num, 'Delete', 'Lines_Callbacks.delete_pt()')\n", (3093, 3150), False, 'import nuke\n'), ((3168, 3243), 'nuke.PyScript_Knob', 'nuke.PyScript_Knob', (["('insert' + num)", '"""Insert"""', '"""Lines_Callbacks.insert_pt()"""'], {}), "('insert' + num, 'Insert', 'Lines_Callbacks.insert_pt()')\n", (3186, 3243), False, 'import nuke\n'), ((189, 204), 'nuke.thisKnob', 'nuke.thisKnob', ([], {}), '()\n', (202, 204), False, 'import nuke\n'), ((923, 938), 'nuke.thisKnob', 'nuke.thisKnob', ([], {}), '()\n', (936, 938), False, 'import nuke\n'), ((697, 712), 'nuke.thisNode', 'nuke.thisNode', ([], {}), '()\n', (710, 712), False, 'import nuke\n'), ((759, 774), 'nuke.thisNode', 'nuke.thisNode', ([], {}), '()\n', (772, 774), False, 'import nuke\n'), ((1771, 1786), 'nuke.thisNode', 'nuke.thisNode', ([], {}), '()\n', (1784, 1786), False, 'import nuke\n'), ((1833, 1848), 'nuke.thisNode', 'nuke.thisNode', ([], {}), '()\n', (1846, 1848), False, 'import nuke\n'), ((49, 64), 'nuke.thisNode', 'nuke.thisNode', ([], {}), '()\n', (62, 64), False, 'import nuke\n')] |
import json
from django.core.management.base import BaseCommand
from 臺灣言語平臺.正規化團隊模型 import 正規化sheet表
from django.conf import settings
class Command(BaseCommand):
help = '加sheet的json'
def add_arguments(self, parser):
parser.add_argument(
'服務帳戶json',
type=str,
help='google developers console下載的服務帳戶json'
)
parser.add_argument(
'網址',
type=str,
help='google sheet的網址'
)
def handle(self, *args, **參數):
with open(參數['服務帳戶json']) as 檔案:
服務帳戶資料 = json.load(檔案)
正規化sheet表.加sheet(
語言腔口=settings.MOTHER_TONGUE,
key_file_name=參數['服務帳戶json'],
url=參數['網址'],
)
self.stdout.write(
'愛記得到「Google Sheets右上角的Share」裡分享「Can edit」的權限予 {} 喲!!'.format(
服務帳戶資料['client_email']
)
)
| [
"json.load",
"臺灣言語平臺.正規化團隊模型.正規化sheet表.加sheet"
] | [((605, 698), '臺灣言語平臺.正規化團隊模型.正規化sheet表.加sheet', '正規化sheet表.加sheet', ([], {'語言腔口': 'settings.MOTHER_TONGUE', 'key_file_name': "參數['服務帳戶json']", 'url': "參數['網址']"}), "(語言腔口=settings.MOTHER_TONGUE, key_file_name=參數['服務帳戶json'],\n url=參數['網址'])\n", (621, 698), False, 'from 臺灣言語平臺.正規化團隊模型 import 正規化sheet表\n'), ((595, 608), 'json.load', 'json.load', (['檔案'], {}), '(檔案)\n', (604, 608), False, 'import json\n')] |
"""
Flask-GoogleLogin
"""
from base64 import (urlsafe_b64encode as b64encode,
urlsafe_b64decode as b64decode)
from urllib import urlencode
from urlparse import parse_qsl
from functools import wraps
from flask import request, redirect, abort, current_app, url_for
from flask_login import LoginManager, make_secure_token
import requests
GOOGLE_OAUTH2_AUTH_URL = 'https://accounts.google.com/o/oauth2/auth'
GOOGLE_OAUTH2_TOKEN_URL = 'https://accounts.google.com/o/oauth2/token'
GOOGLE_OAUTH2_USERINFO_URL = 'https://www.googleapis.com/oauth2/v2/userinfo'
USERINFO_EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
USERINFO_PROFILE_SCOPE = 'https://www.googleapis.com/auth/userinfo.profile'
class GoogleLogin(object):
"""
Main extension class
"""
def __init__(self, app=None, login_manager=None):
if login_manager:
self.login_manager = login_manager
else:
self.login_manager = LoginManager()
if app:
self._app = app
self.init_app(app)
def init_app(self, app, add_context_processor=True):
"""
Initialize with app configuration
"""
# Check if login manager has been initialized
if not hasattr(app, 'login_manager'):
self.login_manager.init_app(
app,
add_context_processor=add_context_processor)
# Clear flashed messages since we redirect to auth immediately
self.login_manager.login_message = None
self.login_manager.needs_refresh_message = None
# Set default unauthorized callback
self.login_manager.unauthorized_handler(self.unauthorized_callback)
@property
def app(self):
return getattr(self, '_app', current_app)
@property
def scopes(self):
return self.app.config.get('GOOGLE_LOGIN_SCOPES', '')
@property
def client_id(self):
return self.app.config['GOOGLE_LOGIN_CLIENT_ID']
@property
def client_secret(self):
return self.app.config['GOOGLE_LOGIN_CLIENT_SECRET']
@property
def redirect_uri(self):
return self.app.config.get('GOOGLE_LOGIN_REDIRECT_URI')
@property
def redirect_scheme(self):
return self.app.config.get('GOOGLE_LOGIN_REDIRECT_SCHEME', 'http')
def sign_params(self, params):
return b64encode(urlencode(dict(sig=make_secure_token(**params),
**params)))
def parse_state(self, state):
return dict(parse_qsl(b64decode(str(state))))
def login_url(self, params=None, **kwargs):
"""
Return login url with params encoded in state
Available Google auth server params:
response_type: code, token
prompt: none, select_account, consent
approval_prompt: force, auto
access_type: online, offline
scopes: string (separated with commas) or list
redirect_uri: string
login_hint: string
"""
kwargs.setdefault('response_type', 'code')
kwargs.setdefault('access_type', 'online')
if 'prompt' not in kwargs:
kwargs.setdefault('approval_prompt', 'auto')
scopes = kwargs.pop('scopes', self.scopes.split(','))
if USERINFO_PROFILE_SCOPE not in scopes:
scopes.append(USERINFO_PROFILE_SCOPE)
redirect_uri = kwargs.pop('redirect_uri', self.redirect_uri)
state = self.sign_params(params or {})
return GOOGLE_OAUTH2_AUTH_URL + '?' + urlencode(
dict(client_id=self.client_id,
scope=' '.join(scopes),
redirect_uri=redirect_uri,
state=state,
**kwargs))
def unauthorized_callback(self):
"""
Redirect to login url with next param set as request.url
"""
return redirect(self.login_url(params=dict(next=request.url)))
def exchange_code(self, code, redirect_uri):
"""
Exchanges code for token/s
"""
token = requests.post(GOOGLE_OAUTH2_TOKEN_URL, data=dict(
code=code,
redirect_uri=redirect_uri,
grant_type='authorization_code',
client_id=self.client_id,
client_secret=self.client_secret,
)).json()
if not token or token.get('error'):
abort(400)
return token
def get_userinfo(self, access_token):
userinfo = requests.get(GOOGLE_OAUTH2_USERINFO_URL, params=dict(
access_token=access_token,
)).json()
if not userinfo or userinfo.get('error'):
abort(400)
return userinfo
def get_access_token(self, refresh_token):
"""
Use a refresh token to obtain a new access token
"""
token = requests.post(GOOGLE_OAUTH2_TOKEN_URL, data=dict(
refresh_token=refresh_token,
grant_type='refresh_token',
client_id=self.client_id,
client_secret=self.client_secret,
)).json()
if not token or token.get('error'):
return
return token
def oauth2callback(self, view_func):
"""
Decorator for OAuth2 callback. Calls `GoogleLogin.login` then
passes results to `view_func`.
"""
@wraps(view_func)
def decorated(*args, **kwargs):
params = {}
# Check sig
if 'state' in request.args:
params.update(**self.parse_state(request.args.get('state')))
if params.pop('sig', None) != make_secure_token(**params):
return self.login_manager.unauthorized()
code = request.args.get('code')
# Web server flow
if code:
token = self.exchange_code(
code,
url_for(
request.endpoint,
_external=True,
_scheme=self.redirect_scheme,
),
)
userinfo = self.get_userinfo(token['access_token'])
params.update(token=token, userinfo=userinfo)
# Browser flow
else:
if params:
params.update(dict(request.args.items()))
else:
return '''
<script>
window.onload = function() {
location.href = '?' + window.location.hash.substr(1);
};
</script>
'''
return view_func(**params)
return decorated
def user_loader(self, func):
"""
Shortcut for `login_manager`'s `flask_login.LoginManager.user_loader`
"""
self.login_manager.user_loader(func)
| [
"flask_login.LoginManager",
"flask.request.args.get",
"flask_login.make_secure_token",
"flask.request.args.items",
"functools.wraps",
"flask.url_for",
"flask.abort"
] | [((5317, 5333), 'functools.wraps', 'wraps', (['view_func'], {}), '(view_func)\n', (5322, 5333), False, 'from functools import wraps\n'), ((970, 984), 'flask_login.LoginManager', 'LoginManager', ([], {}), '()\n', (982, 984), False, 'from flask_login import LoginManager, make_secure_token\n'), ((4365, 4375), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (4370, 4375), False, 'from flask import request, redirect, abort, current_app, url_for\n'), ((4632, 4642), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (4637, 4642), False, 'from flask import request, redirect, abort, current_app, url_for\n'), ((5696, 5720), 'flask.request.args.get', 'request.args.get', (['"""code"""'], {}), "('code')\n", (5712, 5720), False, 'from flask import request, redirect, abort, current_app, url_for\n'), ((5586, 5613), 'flask_login.make_secure_token', 'make_secure_token', ([], {}), '(**params)\n', (5603, 5613), False, 'from flask_login import LoginManager, make_secure_token\n'), ((5863, 5934), 'flask.url_for', 'url_for', (['request.endpoint'], {'_external': '(True)', '_scheme': 'self.redirect_scheme'}), '(request.endpoint, _external=True, _scheme=self.redirect_scheme)\n', (5870, 5934), False, 'from flask import request, redirect, abort, current_app, url_for\n'), ((2399, 2426), 'flask_login.make_secure_token', 'make_secure_token', ([], {}), '(**params)\n', (2416, 2426), False, 'from flask_login import LoginManager, make_secure_token\n'), ((5512, 5537), 'flask.request.args.get', 'request.args.get', (['"""state"""'], {}), "('state')\n", (5528, 5537), False, 'from flask import request, redirect, abort, current_app, url_for\n'), ((6291, 6311), 'flask.request.args.items', 'request.args.items', ([], {}), '()\n', (6309, 6311), False, 'from flask import request, redirect, abort, current_app, url_for\n')] |
"""Test agrirouter/environments/environments.py"""
from agrirouter.environments.environments import ProductionEnvironment as PE
from agrirouter.environments.environments import QAEnvironment as QAE
from tests.constants import application_id
class TestPE:
def test_get_base_url(self):
assert PE().get_base_url() == PE._ENV_BASE_URL
def test_get_api_prefix(self):
assert PE().get_api_prefix() == PE._API_PREFIX
def test_get_registration_service_url(self):
assert PE().get_registration_service_url() == PE._REGISTRATION_SERVICE_URL
def test_get_onboard_url(self):
onb_url = PE._REGISTRATION_SERVICE_URL + PE._API_PREFIX + "/registration/onboard"
assert PE().get_onboard_url() == onb_url
def test_get_secured_onboard_url(self):
onb_url = PE._REGISTRATION_SERVICE_URL + PE._API_PREFIX + "/registration/onboard/request"
assert PE().get_secured_onboard_url() == onb_url
def test_get_verify_onboard_request_url(self):
req_url = PE._REGISTRATION_SERVICE_URL + PE._API_PREFIX + "/registration/onboard/verify"
assert PE().get_verify_onboard_request_url() == req_url
def test_get_revoke_url(self):
rev_url = PE._REGISTRATION_SERVICE_URL + PE._API_PREFIX + "/registration/onboard/revoke"
assert PE().get_revoke_url() == rev_url
def test_get_agrirouter_login_url(self):
login_url = PE._ENV_BASE_URL + PE._AGRIROUTER_LOGIN_URL
assert PE().get_agrirouter_login_url() == login_url
def test_get_secured_onboarding_authorization_url(self):
redirect_uri = "www.my_redirect.com"
response_type = "response_type"
assert PE().get_secured_onboarding_authorization_url(
application_id, response_type, "state", redirect_uri
) == "https://goto.my-agrirouter.com/application/{application_id}/authorize?response_type={response_type}&state={state}".format( # noqa
application_id=application_id,
response_type=response_type,
state="state") + f"&redirect_uri={redirect_uri}"
def test_get_mqtt_server_url(self):
assert PE().get_mqtt_server_url(
"localhost", "5000"
) == PE._MQTT_URL_TEMPLATE.format(
host="localhost", port="5000"
)
def test_get_env_public_key(self):
assert PE().get_env_public_key() == PE.AR_PUBLIC_KEY
class TestQAE:
def test_get_base_url(self):
assert QAE().get_base_url() == QAE._ENV_BASE_URL
def test_get_api_prefix(self):
assert QAE().get_api_prefix() == QAE._API_PREFIX
def test_get_registration_service_url(self):
assert QAE().get_registration_service_url() == QAE._REGISTRATION_SERVICE_URL
def test_get_onboard_url(self):
onb_url = QAE._REGISTRATION_SERVICE_URL + QAE._API_PREFIX + "/registration/onboard"
assert QAE().get_onboard_url() == onb_url
def test_get_secured_onboard_url(self):
onb_url = QAE._REGISTRATION_SERVICE_URL + QAE._API_PREFIX + "/registration/onboard/request"
assert QAE().get_secured_onboard_url() == onb_url
def test_get_verify_onboard_request_url(self):
req_url = QAE._REGISTRATION_SERVICE_URL + QAE._API_PREFIX + "/registration/onboard/verify"
assert QAE().get_verify_onboard_request_url() == req_url
def test_get_revoke_url(self):
rev_url = QAE._REGISTRATION_SERVICE_URL + QAE._API_PREFIX + "/registration/onboard/revoke"
assert QAE().get_revoke_url() == rev_url
def test_get_agrirouter_login_url(self):
login_url = QAE._ENV_BASE_URL + QAE._AGRIROUTER_LOGIN_URL
assert QAE().get_agrirouter_login_url() == login_url
def test_get_secured_onboarding_authorization_url(self):
redirect_uri = "www.my_redirect.com"
response_type = "response_type"
assert QAE().get_secured_onboarding_authorization_url(
application_id, response_type, "state", redirect_uri
) == QAE._ENV_BASE_URL + QAE._SECURED_ONBOARDING_AUTHORIZATION_LINK_TEMPLATE.format(
application_id=application_id,
response_type=response_type,
state="state") + f"&redirect_uri={redirect_uri}"
def test_get_mqtt_server_url(self):
assert QAE().get_mqtt_server_url(
"localhost", "5000"
) == QAE._MQTT_URL_TEMPLATE.format(host="localhost", port="5000")
def test_get_env_public_key(self):
assert QAE().get_env_public_key() == QAE.AR_PUBLIC_KEY
| [
"agrirouter.environments.environments.ProductionEnvironment._MQTT_URL_TEMPLATE.format",
"agrirouter.environments.environments.QAEnvironment",
"agrirouter.environments.environments.QAEnvironment._MQTT_URL_TEMPLATE.format",
"agrirouter.environments.environments.ProductionEnvironment",
"agrirouter.environments... | [((2200, 2259), 'agrirouter.environments.environments.ProductionEnvironment._MQTT_URL_TEMPLATE.format', 'PE._MQTT_URL_TEMPLATE.format', ([], {'host': '"""localhost"""', 'port': '"""5000"""'}), "(host='localhost', port='5000')\n", (2228, 2259), True, 'from agrirouter.environments.environments import ProductionEnvironment as PE\n'), ((4314, 4374), 'agrirouter.environments.environments.QAEnvironment._MQTT_URL_TEMPLATE.format', 'QAE._MQTT_URL_TEMPLATE.format', ([], {'host': '"""localhost"""', 'port': '"""5000"""'}), "(host='localhost', port='5000')\n", (4343, 4374), True, 'from agrirouter.environments.environments import QAEnvironment as QAE\n'), ((306, 310), 'agrirouter.environments.environments.ProductionEnvironment', 'PE', ([], {}), '()\n', (308, 310), True, 'from agrirouter.environments.environments import ProductionEnvironment as PE\n'), ((397, 401), 'agrirouter.environments.environments.ProductionEnvironment', 'PE', ([], {}), '()\n', (399, 401), True, 'from agrirouter.environments.environments import ProductionEnvironment as PE\n'), ((502, 506), 'agrirouter.environments.environments.ProductionEnvironment', 'PE', ([], {}), '()\n', (504, 506), True, 'from agrirouter.environments.environments import ProductionEnvironment as PE\n'), ((712, 716), 'agrirouter.environments.environments.ProductionEnvironment', 'PE', ([], {}), '()\n', (714, 716), True, 'from agrirouter.environments.environments import ProductionEnvironment as PE\n'), ((904, 908), 'agrirouter.environments.environments.ProductionEnvironment', 'PE', ([], {}), '()\n', (906, 908), True, 'from agrirouter.environments.environments import ProductionEnvironment as PE\n'), ((1110, 1114), 'agrirouter.environments.environments.ProductionEnvironment', 'PE', ([], {}), '()\n', (1112, 1114), True, 'from agrirouter.environments.environments import ProductionEnvironment as PE\n'), ((1307, 1311), 'agrirouter.environments.environments.ProductionEnvironment', 'PE', ([], {}), '()\n', (1309, 1311), True, 'from agrirouter.environments.environments import ProductionEnvironment as PE\n'), ((1465, 1469), 'agrirouter.environments.environments.ProductionEnvironment', 'PE', ([], {}), '()\n', (1467, 1469), True, 'from agrirouter.environments.environments import ProductionEnvironment as PE\n'), ((1672, 1676), 'agrirouter.environments.environments.ProductionEnvironment', 'PE', ([], {}), '()\n', (1674, 1676), True, 'from agrirouter.environments.environments import ProductionEnvironment as PE\n'), ((2129, 2133), 'agrirouter.environments.environments.ProductionEnvironment', 'PE', ([], {}), '()\n', (2131, 2133), True, 'from agrirouter.environments.environments import ProductionEnvironment as PE\n'), ((2337, 2341), 'agrirouter.environments.environments.ProductionEnvironment', 'PE', ([], {}), '()\n', (2339, 2341), True, 'from agrirouter.environments.environments import ProductionEnvironment as PE\n'), ((2448, 2453), 'agrirouter.environments.environments.QAEnvironment', 'QAE', ([], {}), '()\n', (2451, 2453), True, 'from agrirouter.environments.environments import QAEnvironment as QAE\n'), ((2541, 2546), 'agrirouter.environments.environments.QAEnvironment', 'QAE', ([], {}), '()\n', (2544, 2546), True, 'from agrirouter.environments.environments import QAEnvironment as QAE\n'), ((2648, 2653), 'agrirouter.environments.environments.QAEnvironment', 'QAE', ([], {}), '()\n', (2651, 2653), True, 'from agrirouter.environments.environments import QAEnvironment as QAE\n'), ((2862, 2867), 'agrirouter.environments.environments.QAEnvironment', 'QAE', ([], {}), '()\n', (2865, 2867), True, 'from agrirouter.environments.environments import QAEnvironment as QAE\n'), ((3057, 3062), 'agrirouter.environments.environments.QAEnvironment', 'QAE', ([], {}), '()\n', (3060, 3062), True, 'from agrirouter.environments.environments import QAEnvironment as QAE\n'), ((3266, 3271), 'agrirouter.environments.environments.QAEnvironment', 'QAE', ([], {}), '()\n', (3269, 3271), True, 'from agrirouter.environments.environments import QAEnvironment as QAE\n'), ((3466, 3471), 'agrirouter.environments.environments.QAEnvironment', 'QAE', ([], {}), '()\n', (3469, 3471), True, 'from agrirouter.environments.environments import QAEnvironment as QAE\n'), ((3627, 3632), 'agrirouter.environments.environments.QAEnvironment', 'QAE', ([], {}), '()\n', (3630, 3632), True, 'from agrirouter.environments.environments import QAEnvironment as QAE\n'), ((3835, 3840), 'agrirouter.environments.environments.QAEnvironment', 'QAE', ([], {}), '()\n', (3838, 3840), True, 'from agrirouter.environments.environments import QAEnvironment as QAE\n'), ((3981, 4119), 'agrirouter.environments.environments.QAEnvironment._SECURED_ONBOARDING_AUTHORIZATION_LINK_TEMPLATE.format', 'QAE._SECURED_ONBOARDING_AUTHORIZATION_LINK_TEMPLATE.format', ([], {'application_id': 'application_id', 'response_type': 'response_type', 'state': '"""state"""'}), "(application_id=\n application_id, response_type=response_type, state='state')\n", (4039, 4119), True, 'from agrirouter.environments.environments import QAEnvironment as QAE\n'), ((4242, 4247), 'agrirouter.environments.environments.QAEnvironment', 'QAE', ([], {}), '()\n', (4245, 4247), True, 'from agrirouter.environments.environments import QAEnvironment as QAE\n'), ((4430, 4435), 'agrirouter.environments.environments.QAEnvironment', 'QAE', ([], {}), '()\n', (4433, 4435), True, 'from agrirouter.environments.environments import QAEnvironment as QAE\n')] |
#/bin/python3
import numpy as np
from scipy import signal as sig
class pySparSDRCompress():
'''
Implementation of the SparSDR Compressor based on
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2019, June. Sparsdr: Sparsity-proportional backhaul and compute for sdrs. In Proceedings of the 17th Annual International Conference on Mobile Systems, Applications, and Services (pp. 391-403).
'''
def __init__(self,nfft=1024,thresholdVec=None):
'''
Initialize SparSDR Compressor
:input: nfft :shouldBeEven: Number of bins in fft
'''
assert not nfft%2
self.nfft = nfft
self.nover = int(self.nfft/2)
self.windowVec = sig.windows.hann(self.nfft, sym=False)
self.windowVec = np.expand_dims(self.windowVec,axis=1)
if thresholdVec is None:
self.setThreshold(np.zeros((1,self.nfft)))
else:
self.setThreshold(thresholdVec)
self.bufferState = np.zeros((self.nover,))
self.numWinProcessed = 0
def reset(self):
'''
Resets internal memory if the compressor needs to be re-started
(soft-reset)
'''
self.bufferState = 0*self.bufferState
self.numWinProcessed = 0
def setThreshold(self, thresholdVec):
'''
Sets internal threshold vector
:input: thresholdVec :shape==(1,nfft): real-valued thresholds as numpy array
'''
assert thresholdVec.shape == (1,self.nfft)
self.thresholdVec = thresholdVec
def work(self, xIn):
'''
Perform compression on input vector
:input: xIn :numElements==k*nfft: input signal as a numpy array
:output: (windowIdx, binIdx, binValue)
:output: windowIdx : Index of window over all-time
:output: binIdx : Index of bin in a particular window
:output: binValue : Value of the binIdx at the windowIdx
This function remembers past input and stores overlap in the bufferState
variable
'''
assert not xIn.size%self.nfft
# concatenate filter state
xIn = np.concatenate((self.bufferState, xIn))
# Half-Overlapped windowing
evenWindows = self.windowVec*xIn[:-self.nover].reshape((self.nfft,-1))
oddWindows = self.windowVec*xIn[self.nover:].reshape((self.nfft,-1))
# Fourier Transform
evenWindows = np.fft.fft(evenWindows,axis=0)
oddWindows = np.fft.fft(oddWindows,axis=0)
# Interleave overlapped windows
output = np.empty((self.nfft, 2*evenWindows.shape[1]) , dtype=evenWindows.dtype)
output[:,0::2] = evenWindows
output[:,1::2] = oddWindows
output = output.transpose()
# Threshold to find areas of activity
thresholdFlag = np.abs(output) > self.thresholdVec
thresholdFlag = np.transpose(thresholdFlag.nonzero())
# Select only active bins
output = output[thresholdFlag[:,0],thresholdFlag[:,1]]
thresholdFlag[:,0] = self.numWinProcessed + thresholdFlag[:,0]
# Update internal states
self.bufferState = xIn[-self.nover:]
self.numWinProcessed = self.numWinProcessed + 2*evenWindows.shape[1]
return thresholdFlag[:,0], thresholdFlag[:,1], output | [
"numpy.abs",
"numpy.fft.fft",
"scipy.signal.windows.hann",
"numpy.zeros",
"numpy.empty",
"numpy.concatenate",
"numpy.expand_dims"
] | [((718, 756), 'scipy.signal.windows.hann', 'sig.windows.hann', (['self.nfft'], {'sym': '(False)'}), '(self.nfft, sym=False)\n', (734, 756), True, 'from scipy import signal as sig\n'), ((782, 820), 'numpy.expand_dims', 'np.expand_dims', (['self.windowVec'], {'axis': '(1)'}), '(self.windowVec, axis=1)\n', (796, 820), True, 'import numpy as np\n'), ((995, 1018), 'numpy.zeros', 'np.zeros', (['(self.nover,)'], {}), '((self.nover,))\n', (1003, 1018), True, 'import numpy as np\n'), ((2144, 2183), 'numpy.concatenate', 'np.concatenate', (['(self.bufferState, xIn)'], {}), '((self.bufferState, xIn))\n', (2158, 2183), True, 'import numpy as np\n'), ((2437, 2468), 'numpy.fft.fft', 'np.fft.fft', (['evenWindows'], {'axis': '(0)'}), '(evenWindows, axis=0)\n', (2447, 2468), True, 'import numpy as np\n'), ((2489, 2519), 'numpy.fft.fft', 'np.fft.fft', (['oddWindows'], {'axis': '(0)'}), '(oddWindows, axis=0)\n', (2499, 2519), True, 'import numpy as np\n'), ((2577, 2649), 'numpy.empty', 'np.empty', (['(self.nfft, 2 * evenWindows.shape[1])'], {'dtype': 'evenWindows.dtype'}), '((self.nfft, 2 * evenWindows.shape[1]), dtype=evenWindows.dtype)\n', (2585, 2649), True, 'import numpy as np\n'), ((2829, 2843), 'numpy.abs', 'np.abs', (['output'], {}), '(output)\n', (2835, 2843), True, 'import numpy as np\n'), ((883, 907), 'numpy.zeros', 'np.zeros', (['(1, self.nfft)'], {}), '((1, self.nfft))\n', (891, 907), True, 'import numpy as np\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Workload registration and serialization.
We use a json string to represent a workload (a computation graph).
The format of the string is `[func_name, [args...]]`.
The dag should be the return value of this `func_name(*args)`.
Rationale: The workload is actually a compute dag defined by tvm dsl. But serializing compute dags
and matching them efficiently is not easy. Therefore, we use the above string to encode a compute
dag.
These strings are efficient for serialization/matching and won't be too long.
When we need the dag, we decode the string and call the function, which will return the dag.
"""
import pickle
import json
import tvm._ffi
from .utils import serialize_args, deserialize_args, get_func_name
WORKLOAD_FUNC_REGISTRY = {}
def register_workload(func_name, f=None, override=False):
""" Register a function that generates a certain workload.
The input function should take hashable and jsonable arguments
(int, float, tuple of int, tvm.tensor.Tensor, ...) and return a list of tvm.tensor.Tensor.
Parameters
----------
func_name : Union[Function, str]
The generation function that returns the compute declaration Tensors or its function name.
f : Optional[Function]
The generation function to be registered.
override : boolean = False
Whether override existing entry.
Examples
--------
@auto_scheduler.register_workload
def matmul(N, M, K):
A = te.placeholder((N, K), name='A')
B = te.placeholder((K, M), name='B')
k = te.reduce_axis((0, K), name='k')
C = te.compute((N, M), lambda i, j: tvm.sum(A[i][k] * B[k][j], axis=[k]), name='C')
return [A, B, C]
"""
global WORKLOAD_FUNC_REGISTRY
if callable(func_name):
f = func_name
func_name = get_func_name(f)
if not isinstance(func_name, str):
raise ValueError("expect string function name")
def register(myf):
"""internal register function"""
if func_name in WORKLOAD_FUNC_REGISTRY and not override:
raise RuntimeError('%s has been registered already' % func_name)
WORKLOAD_FUNC_REGISTRY[func_name] = myf
return myf
if f:
return register(f)
return register
def make_workload_key(func, args):
""" Make a workload key by function and arguments.
Parameters
----------
func : Union[Function, str]
The function that returns the compute declaration Tensors.
Can be the a function or the function name.
args : Args
The args of the function.
Returns
-------
workload_key : Str
The workload key of the function.
"""
global WORKLOAD_FUNC_REGISTRY
if callable(func):
func_name = get_func_name(func)
elif isinstance(func, str):
func_name = func
else:
raise ValueError("Invalid function: " + str(func) +
" . `make_workload_key` expects a callable function or its function name")
if not func_name in WORKLOAD_FUNC_REGISTRY:
raise ValueError("%s is not registered. " % func,
"Please register it with @auto_scheduler.register_workload")
args = serialize_args(args)
return json.dumps((func_name,) + args)
def decode_workload_key_to_func_args(workload_key):
""" Decode a workload key to the registerd function name and its corresponding args.
Parameters
----------
workload_key : str
The input workload key.
Returns
-------
name : str
The function name of this workload key.
args : List[Tensor]
The args of the generation function.
"""
global WORKLOAD_FUNC_REGISTRY
workload = json.loads(workload_key)
if not workload[0] in WORKLOAD_FUNC_REGISTRY:
raise ValueError("%s is not registered. " % workload[0] +
"Please register it with @auto_scheduler.register_workload")
return workload[0], deserialize_args(workload[1:])
@tvm._ffi.register_func("auto_scheduler.workload_key_to_tensors")
def workload_key_to_tensors(workload_key):
""" Get the input/output tensors from the workload key.
This method is usually used to create a ComputeDAG by workload key.
Parameters
----------
workload_key : str
The input workload key.
Returns
-------
tensors : List[Tensor]
The registered compute declaration Tensors.
"""
global WORKLOAD_FUNC_REGISTRY
name, args = decode_workload_key_to_func_args(workload_key)
lookup = WORKLOAD_FUNC_REGISTRY[name]
assert callable(lookup)
return lookup(*args)
def save_workload_func_registry(filename):
""" Dump workload function registry to a pickle binary file.
Parameters
----------
filename : str
The filename to dump workload function registry to.
"""
global WORKLOAD_FUNC_REGISTRY
pickle.dump(WORKLOAD_FUNC_REGISTRY, open(filename, 'wb'))
def load_workload_func_registry(filename):
""" Load workload function registry from a pickle binary file.
Parameters
----------
filename : str
The filename to load workload function registry from.
"""
global WORKLOAD_FUNC_REGISTRY
WORKLOAD_FUNC_REGISTRY = pickle.load(open(filename, 'rb'))
| [
"json.loads",
"json.dumps"
] | [((4021, 4052), 'json.dumps', 'json.dumps', (['((func_name,) + args)'], {}), '((func_name,) + args)\n', (4031, 4052), False, 'import json\n'), ((4497, 4521), 'json.loads', 'json.loads', (['workload_key'], {}), '(workload_key)\n', (4507, 4521), False, 'import json\n')] |
import pickle
import pandas as pd
import yaml
from sklearn.linear_model import ElasticNet, LogisticRegression
from sklearn.ensemble import RandomForestRegressor
from config import Config
Config.MODELS_PATH.mkdir(parents=True, exist_ok=True)
with open ("params.yaml", "r") as fd:
params = yaml.safe_load(fd)
model_type = params['model_type']
lr = params['lr']
random_state = params['random_state']
#epochs = params['train']['epochs']
alpha = params['train']['alpha']
l1_rate = params['train']['l1_rate']
X_train = pd.read_csv(str(Config.FEATURES_PATH / "train_features.csv"))
y_train = pd.read_csv(str(Config.FEATURES_PATH / "train_labels.csv"))
if model_type == "LogisticRegression":
model = LogisticRegression(l1_ratio=l1_rate, random_state=random_state)
if model_type == "RandomForestRegressor":
model = RandomForestRegressor(
n_estimators=150, max_depth=6, random_state=random_state
)
if model_type == "ElasticNet":
model = ElasticNet(
alpha=alpha, l1_ratio=l1_rate, random_state=random_state
)
model.fit(X_train, y_train)
pickle.dump(model, open(str(Config.MODELS_PATH / "model.pickle"), "wb")) | [
"sklearn.ensemble.RandomForestRegressor",
"sklearn.linear_model.ElasticNet",
"config.Config.MODELS_PATH.mkdir",
"sklearn.linear_model.LogisticRegression",
"yaml.safe_load"
] | [((189, 242), 'config.Config.MODELS_PATH.mkdir', 'Config.MODELS_PATH.mkdir', ([], {'parents': '(True)', 'exist_ok': '(True)'}), '(parents=True, exist_ok=True)\n', (213, 242), False, 'from config import Config\n'), ((295, 313), 'yaml.safe_load', 'yaml.safe_load', (['fd'], {}), '(fd)\n', (309, 313), False, 'import yaml\n'), ((707, 770), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'l1_ratio': 'l1_rate', 'random_state': 'random_state'}), '(l1_ratio=l1_rate, random_state=random_state)\n', (725, 770), False, 'from sklearn.linear_model import ElasticNet, LogisticRegression\n'), ((826, 905), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(150)', 'max_depth': '(6)', 'random_state': 'random_state'}), '(n_estimators=150, max_depth=6, random_state=random_state)\n', (847, 905), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((964, 1032), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {'alpha': 'alpha', 'l1_ratio': 'l1_rate', 'random_state': 'random_state'}), '(alpha=alpha, l1_ratio=l1_rate, random_state=random_state)\n', (974, 1032), False, 'from sklearn.linear_model import ElasticNet, LogisticRegression\n')] |
# Copyright (c) 2016 The OpenTracing Authors.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import time
import pytest
import opentracing
from opentracing import Format
class APICompatibilityCheckMixin(object):
"""
A mixin class for validation that a given tracer implementation
satisfies the requirements of the OpenTracing API.
"""
def tracer(self):
raise NotImplementedError('Subclass must implement tracer()')
def check_baggage_values(self):
"""If true, the test will validate Baggage items by storing and
retrieving them from the trace context. If false, it will only attempt
to store and retrieve the Baggage items to check the API compliance,
but not actually validate stored values. The latter mode is only
useful for no-op tracer.
"""
return True
def test_start_span(self):
tracer = self.tracer()
span = tracer.start_span(operation_name='Fry')
span.finish()
with tracer.start_span(operation_name='Fry',
tags={'birthday': 'August 14 1974'}) as span:
span.log_event('birthplace',
payload={'hospital': 'Brooklyn Pre-Med Hospital',
'city': 'Old <NAME>'})
def test_start_span_with_parent(self):
tracer = self.tracer()
parent_span = tracer.start_span(operation_name='parent')
assert parent_span is not None
span = tracer.start_span(
operation_name='Leela',
child_of=parent_span)
span.finish()
span = tracer.start_span(
operation_name='Leela',
references=[opentracing.follows_from(parent_span.context)],
tags={'birthplace': 'sewers'})
span.finish()
parent_span.finish()
def test_start_child_span(self):
tracer = self.tracer()
parent_span = tracer.start_span(operation_name='parent')
assert parent_span is not None
child_span = opentracing.start_child_span(
parent_span, operation_name='Leela')
child_span.finish()
parent_span.finish()
def test_set_operation_name(self):
span = self.tracer().start_span().set_operation_name('Farnsworth')
span.finish()
def test_span_as_context_manager(self):
finish = {'called': False}
def mock_finish(*_):
finish['called'] = True
with self.tracer().start_span(operation_name='antiquing') as span:
setattr(span, 'finish', mock_finish)
assert finish['called'] is True
# now try with exception
finish['called'] = False
try:
with self.tracer().start_span(operation_name='antiquing') as span:
setattr(span, 'finish', mock_finish)
raise ValueError()
except ValueError:
assert finish['called'] is True
else:
raise AssertionError('Expected ValueError') # pragma: no cover
def test_span_tag_value_types(self):
with self.tracer().start_span(operation_name='ManyTypes') as span:
span. \
set_tag('an_int', 9). \
set_tag('a_bool', True). \
set_tag('a_string', 'aoeuidhtns')
def test_span_tags_with_chaining(self):
span = self.tracer().start_span(operation_name='Farnsworth')
span. \
set_tag('birthday', '9 April, 2841'). \
set_tag('loves', 'different lengths of wires')
span. \
set_tag('unicode_val', u'non-ascii: \u200b'). \
set_tag(u'unicode_key_\u200b', 'ascii val')
span.finish()
def test_span_logs(self):
span = self.tracer().start_span(operation_name='Fry')
# Newer API
span.log_kv(
{'frozen.year': 1999, 'frozen.place': 'Cryogenics Labs'})
span.log_kv(
{'defrosted.year': 2999, 'defrosted.place': 'Cryogenics Labs'},
time.time())
# Older API
span.\
log_event('frozen', {'year': 1999, 'place': 'Cryogenics Labs'}). \
log_event('defrosted', {'year': 2999}). \
log_event('became his own grandfather', 1947)
span.\
log(event='frozen'). \
log(payload={'year': 1999}). \
log(timestamp=time.time(),
event='frozen',
payload={'year': 1999}). \
log(timestamp=time.time(),
event='unfrozen',
payload={'year': 2999})
def test_span_baggage(self):
with self.tracer().start_span(operation_name='Fry') as span:
assert span.context.baggage == {}
span_ref = span.set_baggage_item('Kiff-loves', 'Amy')
assert span_ref is span
val = span.get_baggage_item('Kiff-loves')
if self.check_baggage_values():
assert 'Amy' == val
pass
def test_context_baggage(self):
with self.tracer().start_span(operation_name='Fry') as span:
assert span.context.baggage == {}
span.set_baggage_item('Kiff-loves', 'Amy')
if self.check_baggage_values():
assert span.context.baggage == {'Kiff-loves': 'Amy'}
pass
def test_text_propagation(self):
with self.tracer().start_span(operation_name='Bender') as span:
text_carrier = {}
self.tracer().inject(
span_context=span.context,
format=opentracing.Format.TEXT_MAP,
carrier=text_carrier)
extracted_ctx = self.tracer().extract(
format=opentracing.Format.TEXT_MAP,
carrier=text_carrier)
assert extracted_ctx.baggage == {}
def test_binary_propagation(self):
with self.tracer().start_span(operation_name='Bender') as span:
bin_carrier = bytearray()
self.tracer().inject(
span_context=span.context,
format=opentracing.Format.BINARY,
carrier=bin_carrier)
extracted_ctx = self.tracer().extract(
format=opentracing.Format.BINARY,
carrier=bin_carrier)
assert extracted_ctx.baggage == {}
def test_mandatory_formats(self):
formats = [
(Format.TEXT_MAP, {}),
(Format.HTTP_HEADERS, {}),
(Format.BINARY, bytearray()),
]
with self.tracer().start_span(operation_name='Bender') as span:
for fmt, carrier in formats:
# expecting no exceptions
span.tracer.inject(span.context, fmt, carrier)
span.tracer.extract(fmt, carrier)
def test_unknown_format(self):
custom_format = 'kiss my shiny metal ...'
with self.tracer().start_span(operation_name='Bender') as span:
with pytest.raises(opentracing.UnsupportedFormatException):
span.tracer.inject(span.context, custom_format, {})
with pytest.raises(opentracing.UnsupportedFormatException):
span.tracer.extract(custom_format, {})
| [
"opentracing.follows_from",
"time.time",
"pytest.raises",
"opentracing.start_child_span"
] | [((3082, 3147), 'opentracing.start_child_span', 'opentracing.start_child_span', (['parent_span'], {'operation_name': '"""Leela"""'}), "(parent_span, operation_name='Leela')\n", (3110, 3147), False, 'import opentracing\n'), ((5053, 5064), 'time.time', 'time.time', ([], {}), '()\n', (5062, 5064), False, 'import time\n'), ((5526, 5537), 'time.time', 'time.time', ([], {}), '()\n', (5535, 5537), False, 'import time\n'), ((7974, 8027), 'pytest.raises', 'pytest.raises', (['opentracing.UnsupportedFormatException'], {}), '(opentracing.UnsupportedFormatException)\n', (7987, 8027), False, 'import pytest\n'), ((8114, 8167), 'pytest.raises', 'pytest.raises', (['opentracing.UnsupportedFormatException'], {}), '(opentracing.UnsupportedFormatException)\n', (8127, 8167), False, 'import pytest\n'), ((2746, 2791), 'opentracing.follows_from', 'opentracing.follows_from', (['parent_span.context'], {}), '(parent_span.context)\n', (2770, 2791), False, 'import opentracing\n'), ((5412, 5423), 'time.time', 'time.time', ([], {}), '()\n', (5421, 5423), False, 'import time\n')] |
from typing import TypeVar, Generic, Optional, Type, Any, Union, Dict, TYPE_CHECKING
from unipipeline.errors.uni_payload_error import UniPayloadParsingError, UniAnswerPayloadParsingError
from unipipeline.errors.uni_sending_to_worker_error import UniSendingToWorkerError
from unipipeline.answer.uni_answer_message import UniAnswerMessage
from unipipeline.brokers.uni_broker_message_manager import UniBrokerMessageManager
from unipipeline.errors.uni_work_flow_error import UniWorkFlowError
from unipipeline.message.uni_message import UniMessage
from unipipeline.message_meta.uni_message_meta import UniMessageMeta, UniMessageMetaErrTopic, UniAnswerParams
from unipipeline.worker.uni_worker import UniWorker
from unipipeline.worker.uni_worker_consumer_manager import UniWorkerConsumerManager
from unipipeline.worker.uni_worker_consumer_message import UniWorkerConsumerMessage
from unipipeline.definitions.uni_worker_definition import UniWorkerDefinition
if TYPE_CHECKING:
from unipipeline.modules.uni_mediator import UniMediator
TInputMsgPayload = TypeVar('TInputMsgPayload', bound=UniMessage)
TAnswerMsgPayload = TypeVar('TAnswerMsgPayload', bound=Optional[UniMessage])
class UniWorkerConsumer(Generic[TInputMsgPayload, TAnswerMsgPayload]):
def __init__(self, definition: UniWorkerDefinition, mediator: 'UniMediator', worker_type: Type[UniWorker[TInputMsgPayload, TAnswerMsgPayload]]) -> None:
self._definition = definition
self._mediator = mediator
self._worker_manager = UniWorkerConsumerManager(self.send_to)
self._worker = worker_type(self._worker_manager)
self._uni_echo = mediator.echo.mk_child(f'worker[{definition.name}]')
self._input_message_type: Type[TInputMsgPayload] = mediator.get_message_type(self._definition.input_message.name) # type: ignore
self._answer_message_type: Optional[Type[TAnswerMsgPayload]] = mediator.get_message_type(self._definition.answer_message.name) if self._definition.answer_message is not None else None # type: ignore
self._current_meta: Optional[UniMessageMeta] = None
def send_to(self, worker: Union[Type['UniWorker[Any, Any]'], str], data: Union[Dict[str, Any], UniMessage], *, alone: bool = False, need_answer: bool = False) -> Optional[UniAnswerMessage[UniMessage]]:
wd = self._mediator.config.get_worker_definition(worker)
if wd.name not in self._definition.output_workers:
raise UniSendingToWorkerError(f'worker {wd.name} is not defined in workers->{self._definition.name}->output_workers')
if need_answer and not wd.need_answer:
raise UniWorkFlowError(f'you will get no response form worker {wd.name}')
if need_answer:
answ_params = UniAnswerParams(topic=self._definition.answer_topic, id=self._worker_manager.id)
return self._mediator.send_to(wd.name, data, parent_meta=self._current_meta, answer_params=answ_params, alone=alone)
self._mediator.send_to(wd.name, data, parent_meta=self._current_meta, answer_params=None, alone=alone)
return None
def process_message(self, meta: UniMessageMeta, manager: UniBrokerMessageManager) -> None:
self._current_meta = meta
msg = UniWorkerConsumerMessage[TInputMsgPayload](self._input_message_type, manager, meta)
try:
result: Optional[Union[TAnswerMsgPayload, Dict[str, Any]]] = self._worker.handle_message(msg)
except UniAnswerPayloadParsingError as e:
self._mediator.move_to_error_topic(self._definition, meta, UniMessageMetaErrTopic.HANDLE_MESSAGE_ERR, e)
except UniPayloadParsingError as e:
self._mediator.move_to_error_topic(self._definition, meta, UniMessageMetaErrTopic.MESSAGE_PAYLOAD_ERR, e)
# except Exception as e: # TODO: correct error handling
# self._mediator.move_to_error_topic(self._definition, meta, UniMessageMetaErrTopic.HANDLE_MESSAGE_ERR, e)
else:
if self._definition.need_answer:
try:
self._mediator.answer_to(self._definition.name, meta, result, unwrapped=self._definition.answer_unwrapped)
except UniSendingToWorkerError:
pass
if self._definition.ack_after_success:
msg.ack()
self._current_meta = None
| [
"unipipeline.errors.uni_work_flow_error.UniWorkFlowError",
"unipipeline.message_meta.uni_message_meta.UniAnswerParams",
"unipipeline.errors.uni_sending_to_worker_error.UniSendingToWorkerError",
"unipipeline.worker.uni_worker_consumer_manager.UniWorkerConsumerManager",
"typing.TypeVar"
] | [((1052, 1097), 'typing.TypeVar', 'TypeVar', (['"""TInputMsgPayload"""'], {'bound': 'UniMessage'}), "('TInputMsgPayload', bound=UniMessage)\n", (1059, 1097), False, 'from typing import TypeVar, Generic, Optional, Type, Any, Union, Dict, TYPE_CHECKING\n'), ((1118, 1174), 'typing.TypeVar', 'TypeVar', (['"""TAnswerMsgPayload"""'], {'bound': 'Optional[UniMessage]'}), "('TAnswerMsgPayload', bound=Optional[UniMessage])\n", (1125, 1174), False, 'from typing import TypeVar, Generic, Optional, Type, Any, Union, Dict, TYPE_CHECKING\n'), ((1509, 1547), 'unipipeline.worker.uni_worker_consumer_manager.UniWorkerConsumerManager', 'UniWorkerConsumerManager', (['self.send_to'], {}), '(self.send_to)\n', (1533, 1547), False, 'from unipipeline.worker.uni_worker_consumer_manager import UniWorkerConsumerManager\n'), ((2440, 2561), 'unipipeline.errors.uni_sending_to_worker_error.UniSendingToWorkerError', 'UniSendingToWorkerError', (['f"""worker {wd.name} is not defined in workers->{self._definition.name}->output_workers"""'], {}), "(\n f'worker {wd.name} is not defined in workers->{self._definition.name}->output_workers'\n )\n", (2463, 2561), False, 'from unipipeline.errors.uni_sending_to_worker_error import UniSendingToWorkerError\n'), ((2617, 2684), 'unipipeline.errors.uni_work_flow_error.UniWorkFlowError', 'UniWorkFlowError', (['f"""you will get no response form worker {wd.name}"""'], {}), "(f'you will get no response form worker {wd.name}')\n", (2633, 2684), False, 'from unipipeline.errors.uni_work_flow_error import UniWorkFlowError\n'), ((2735, 2820), 'unipipeline.message_meta.uni_message_meta.UniAnswerParams', 'UniAnswerParams', ([], {'topic': 'self._definition.answer_topic', 'id': 'self._worker_manager.id'}), '(topic=self._definition.answer_topic, id=self._worker_manager.id\n )\n', (2750, 2820), False, 'from unipipeline.message_meta.uni_message_meta import UniMessageMeta, UniMessageMetaErrTopic, UniAnswerParams\n')] |
import math
from oscontainer.constants import CGROUP_TYPE_V2, PER_CPU_SHARES, NO_LIMIT
from oscontainer.cgroup_subsystem import CgroupController, CgroupSubsystem
from oscontainer.utils import limit_from_str
CPU_WEIGHT = "cpu.weight"
CPU_MAX = "cpu.max"
CPU_CPUSET_CPUS = "cpuset.cpus"
CPU_CPUSET_CPUS_EFFECTIVE = "cpuset.cpus.effective"
MEMORY_CURRENT = "memory.current"
MEMORY_MAX = "memory.max"
class CgroupV2Controller(CgroupController):
def __init__(self, mount_path, cgroup_path):
# type: (str, str) -> None
"""
Creates new cgroup V2 controller.
:param mount_path: the mount path of the cgroup v2 hierarchy
:param cgroup_path: the cgroup path for the controller
"""
super().__init__()
self.mount_path = mount_path
self.cgroup_path = cgroup_path
self.subsystem_path = self._create_subsystem_path(mount_path, cgroup_path)
@staticmethod
def _create_subsystem_path(mount_path, cgroup_path):
# type: (str, str) -> str
return mount_path + cgroup_path
class CgroupV2Subsystem(CgroupSubsystem):
"""
The implementation for cgroup V2
"""
def __init__(self, unified):
# type: (CgroupV2Controller) -> None
"""
Creates new instance.
:param unified: the unified cgroup controller
"""
self.unified = unified
def cpu_shares(self):
# type: () -> int
shares = int(self.unified.read_container_param(CPU_WEIGHT))
if shares == 100:
# Convert default value of 100 to no shares setup
return NO_LIMIT
# CPU shares (OCI) value needs to get translated into
# a proper Cgroups v2 value. See:
# https://github.com/containers/crun/blob/master/crun.1.md#cpu-controller
#
# Use the inverse of (x == OCI value, y == cgroupsv2 value):
# ((262142 * y - 1)/9999) + 2 = x
x = 262142 * shares - 1
frac = float(x) / 9999.0
x = int(frac) + 2
if x <= PER_CPU_SHARES:
# will always map to 1 CPU
return x
# Since the scaled value is not precise, return the closest
# multiple of PER_CPU_SHARES for a more conservative mapping
f = x / PER_CPU_SHARES
lower_multiple = math.floor(f) * PER_CPU_SHARES
upper_multiple = math.ceil(f) * PER_CPU_SHARES
distance_lower = max(lower_multiple, x) - min(lower_multiple, x)
distance_upper = max(upper_multiple, x) - min(upper_multiple, x)
if distance_lower <= distance_upper:
return lower_multiple
else:
return upper_multiple
def cpu_quota(self):
# type: () -> int
cpu_quota_res = self.unified.read_container_params_with_format(CPU_MAX, scan_format="%s %*d")
if len(cpu_quota_res) == 0:
return NO_LIMIT
return limit_from_str(cpu_quota_res[0])
def cpu_period(self):
# type: () -> int
cpu_period_res = self.unified.read_container_params_with_format(CPU_MAX, scan_format="%*s %d")
if len(cpu_period_res) == 0:
return NO_LIMIT
return cpu_period_res[0]
def cpu_cpuset_cpus(self):
# type: () -> str
cpuset = self.unified.read_container_param(CPU_CPUSET_CPUS)
if cpuset is None or cpuset == "":
cpuset = self.unified.read_container_param(CPU_CPUSET_CPUS_EFFECTIVE)
return cpuset
def memory_usage_in_bytes(self):
# type: () -> int
return int(self.unified.read_container_param(MEMORY_CURRENT))
def memory_limit_in_bytes(self):
# type: () -> int
memory_str = self.unified.read_container_param(MEMORY_MAX)
return limit_from_str(memory_str)
def container_type(self):
# type: () -> str
return CGROUP_TYPE_V2
| [
"oscontainer.utils.limit_from_str",
"math.ceil",
"math.floor"
] | [((2893, 2925), 'oscontainer.utils.limit_from_str', 'limit_from_str', (['cpu_quota_res[0]'], {}), '(cpu_quota_res[0])\n', (2907, 2925), False, 'from oscontainer.utils import limit_from_str\n'), ((3733, 3759), 'oscontainer.utils.limit_from_str', 'limit_from_str', (['memory_str'], {}), '(memory_str)\n', (3747, 3759), False, 'from oscontainer.utils import limit_from_str\n'), ((2301, 2314), 'math.floor', 'math.floor', (['f'], {}), '(f)\n', (2311, 2314), False, 'import math\n'), ((2357, 2369), 'math.ceil', 'math.ceil', (['f'], {}), '(f)\n', (2366, 2369), False, 'import math\n')] |
"""
Enum Assembler-Directives
"""
from enum import Enum, auto
class AssemblerDirectives(Enum):
START = auto()
END = auto()
ORG = auto()
DEFINE = auto()
@classmethod
def to_string(cls):
return "{START},{END},{ORG},{DEFINE}".format(
START=cls.START.name,
END=cls.END.name,
ORG=cls.ORG.name,
DEFINE=cls.DEFINE.name
)
| [
"enum.auto"
] | [((110, 116), 'enum.auto', 'auto', ([], {}), '()\n', (114, 116), False, 'from enum import Enum, auto\n'), ((127, 133), 'enum.auto', 'auto', ([], {}), '()\n', (131, 133), False, 'from enum import Enum, auto\n'), ((144, 150), 'enum.auto', 'auto', ([], {}), '()\n', (148, 150), False, 'from enum import Enum, auto\n'), ((164, 170), 'enum.auto', 'auto', ([], {}), '()\n', (168, 170), False, 'from enum import Enum, auto\n')] |
from dataclasses import dataclass
import pele_platform.Checker.main as ck
import pele_platform.Frag.simulation as fr
import pele_platform.Adaptive.simulation as ad
from pele_platform.Allosteric.main import run_allosteric
import pele_platform.gpcr.main as gpcr
import pele_platform.out_in.main as outin
from pele_platform.PPI.main import run_ppi
import pele_platform.Utilities.Parameters.pele_env as pv
import argparse
@dataclass
class Launcher:
_args: argparse.ArgumentParser
frag: str="frag"
ppi: str="PPI"
allosteric: str="allosteric"
gpcr_orth: str="gpcr_orth"
out_in: str="out_in"
adaptive: str="adaptive"
def launch(self) -> pv.EnviroBuilder:
# Launch package from input.yaml
self._define_package_to_run()
job_variables = self.launch_package(self._args.package, no_check=self._args.no_check)
return job_variables
def launch_package(self, package: str, no_check=False) -> pv.EnviroBuilder:
# Launch package from API
if not no_check:
ck.check_executable_and_env_variables(self._args)
if package == self.adaptive:
job_variables = ad.run_adaptive(self._args)
elif package == self.gpcr_orth:
job_variables = gpcr.GpcrLauncher(self._args).run_gpcr_simulation()
elif package == self.out_in:
job_variables = outin.OutInLauncher(self._args).run_gpcr_simulation()
elif package == self.allosteric:
job_variables = run_allosteric(self._args)
elif package == self.ppi:
job_variables = run_ppi(self._args)
elif package == self.frag:
# Set variables and input ready
job_variables = fr.FragRunner(self._args).run_simulation()
return job_variables
def _define_package_to_run(self) -> None:
# Define package being run from input.yaml flags
if self._args.frag_core:
self._args.package = self.frag
elif self._args.ppi:
self._args.package = self.ppi
elif self._args.allosteric:
self._args.package = self.allosteric
elif self._args.gpcr_orth:
self._args.package = self.gpcr_orth
elif self._args.out_in:
self._args.package = self.out_in
else:
self._args.package = self.adaptive
| [
"pele_platform.out_in.main.OutInLauncher",
"pele_platform.gpcr.main.GpcrLauncher",
"pele_platform.Checker.main.check_executable_and_env_variables",
"pele_platform.PPI.main.run_ppi",
"pele_platform.Allosteric.main.run_allosteric",
"pele_platform.Adaptive.simulation.run_adaptive",
"pele_platform.Frag.simu... | [((1038, 1087), 'pele_platform.Checker.main.check_executable_and_env_variables', 'ck.check_executable_and_env_variables', (['self._args'], {}), '(self._args)\n', (1075, 1087), True, 'import pele_platform.Checker.main as ck\n'), ((1153, 1180), 'pele_platform.Adaptive.simulation.run_adaptive', 'ad.run_adaptive', (['self._args'], {}), '(self._args)\n', (1168, 1180), True, 'import pele_platform.Adaptive.simulation as ad\n'), ((1249, 1278), 'pele_platform.gpcr.main.GpcrLauncher', 'gpcr.GpcrLauncher', (['self._args'], {}), '(self._args)\n', (1266, 1278), True, 'import pele_platform.gpcr.main as gpcr\n'), ((1489, 1515), 'pele_platform.Allosteric.main.run_allosteric', 'run_allosteric', (['self._args'], {}), '(self._args)\n', (1503, 1515), False, 'from pele_platform.Allosteric.main import run_allosteric\n'), ((1366, 1397), 'pele_platform.out_in.main.OutInLauncher', 'outin.OutInLauncher', (['self._args'], {}), '(self._args)\n', (1385, 1397), True, 'import pele_platform.out_in.main as outin\n'), ((1578, 1597), 'pele_platform.PPI.main.run_ppi', 'run_ppi', (['self._args'], {}), '(self._args)\n', (1585, 1597), False, 'from pele_platform.PPI.main import run_ppi\n'), ((1705, 1730), 'pele_platform.Frag.simulation.FragRunner', 'fr.FragRunner', (['self._args'], {}), '(self._args)\n', (1718, 1730), True, 'import pele_platform.Frag.simulation as fr\n')] |
"""
all subsets of given subset
"""
def subsets_of_subset(subset):
s = subset
superset = subset
while True:
yield s
s = (s - 1) & superset
if s == superset:
break
# --- end of library ---
def debugprint(g):
for x in g:
print(f"{x:06b}")
TEST_1 = """
>>> debugprint(subsets_of_subset(0b010101))
010101
010100
010001
010000
000101
000100
000001
000000
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
print(k)
doctest.run_docstring_examples(g[k], g, name=k)
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
if sys.argv[-1] == "-t":
_test()
sys.exit()
| [
"doctest.testmod",
"doctest.run_docstring_examples",
"sys.exit"
] | [((458, 475), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (473, 475), False, 'import doctest\n'), ((801, 811), 'sys.exit', 'sys.exit', ([], {}), '()\n', (809, 811), False, 'import sys\n'), ((585, 632), 'doctest.run_docstring_examples', 'doctest.run_docstring_examples', (['g[k]', 'g'], {'name': 'k'}), '(g[k], g, name=k)\n', (615, 632), False, 'import doctest\n')] |
#from https://www.assemblyai.com/blog/end-to-end-speech-recognition-pytorch/
from torch import nn
import torch.nn.functional as F
from hw_asr.base import BaseModel
class CNNLayerNorm(nn.Module):
def __init__(self, n_feats):
super().__init__()
self.layer_norm = nn.LayerNorm(n_feats)
def forward(self, x):
# x (batch, channel, feature, time)
x = x.transpose(2, 3).contiguous() # (batch, channel, time, feature)
x = self.layer_norm(x)
return x.transpose(2, 3).contiguous() # (batch, channel, feature, time)
class ResidualCNN(nn.Module):
"""inspired by https://arxiv.org/pdf/1603.05027.pdf
"""
def __init__(self, in_channels, out_channels, kernel, stride, dropout, n_feats):
super().__init__()
self.do_residual = in_channels != out_channels
if self.do_residual:
self.residual = nn.Conv2d(in_channels, out_channels, kernel_size=1)
self.net = nn.Sequential(
CNNLayerNorm(n_feats),
nn.GELU(),
nn.Dropout(dropout),
nn.Conv2d(in_channels, out_channels, kernel_size=kernel, stride=stride, padding=kernel//2),
CNNLayerNorm(n_feats),
nn.GELU(),
nn.Dropout(dropout),
nn.Conv2d(out_channels, out_channels, kernel_size=kernel, stride=stride, padding=kernel // 2)
)
def forward(self, x):
if self.do_residual:
residual = self.residual(x)
else:
residual = x
x = self.net(x)
x += residual
return x # (batch, channel, feature, time)
class BidirectionalGRU(nn.Module):
def __init__(self, rnn_dim, hidden_size, dropout, batch_first=True):
super().__init__()
self.BiGRU = nn.GRU(
input_size=rnn_dim, hidden_size=hidden_size,
num_layers=1, batch_first=batch_first, bidirectional=True)
self.layer_norm = nn.LayerNorm(rnn_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.layer_norm(x)
x = F.gelu(x)
x, _ = self.BiGRU(x)
x = self.dropout(x)
return x
class DeepSpeechModel(BaseModel):
def __init__(self, n_cnn_layers, n_rnn_layers, rnn_dim, n_class, n_feats, stride=2, kernel_size=3, dropout=0.1):
super(DeepSpeechModel, self).__init__(n_feats, n_class)
n_feats = n_feats // 2
self.cnn = nn.Conv2d(1, 32, kernel_size=3, stride=stride, padding=kernel_size // 2)
layers = []
for _ in range(n_cnn_layers):
layers.append(ResidualCNN(32, 32, kernel=3, stride=1, dropout=dropout, n_feats=n_feats))
self.cnn_net = nn.Sequential(*layers)
self.fully_connected = nn.Linear(n_feats * 32, rnn_dim)
layers = [BidirectionalGRU(rnn_dim=rnn_dim, hidden_size=rnn_dim, dropout=dropout)]
for _ in range(n_rnn_layers - 1):
layers.append(BidirectionalGRU(rnn_dim=rnn_dim*2, hidden_size=rnn_dim, dropout=dropout))
self.rnn_net = nn.Sequential(*layers)
self.classifier = nn.Sequential(
nn.Linear(rnn_dim * 2, rnn_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(rnn_dim, n_class)
)
def forward(self, spectrogram, *args, **kwargs):
x = spectrogram.transpose(1, 2).unsqueeze(1)
x = self.cnn(x)
x = self.cnn_net(x)
sizes = x.size()
x = x.view(sizes[0], sizes[1] * sizes[2], sizes[3]) # (batch, feature, time)
x = x.transpose(1, 2) # (batch, time, feature)
x = self.fully_connected(x)
x = self.rnn_net(x)
x = self.classifier(x)
return x
def transform_input_lengths(self, input_lengths):
return input_lengths // 2
| [
"torch.nn.Dropout",
"torch.nn.GELU",
"torch.nn.Sequential",
"torch.nn.LayerNorm",
"torch.nn.functional.gelu",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.GRU"
] | [((285, 306), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['n_feats'], {}), '(n_feats)\n', (297, 306), False, 'from torch import nn\n'), ((1768, 1882), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': 'rnn_dim', 'hidden_size': 'hidden_size', 'num_layers': '(1)', 'batch_first': 'batch_first', 'bidirectional': '(True)'}), '(input_size=rnn_dim, hidden_size=hidden_size, num_layers=1,\n batch_first=batch_first, bidirectional=True)\n', (1774, 1882), False, 'from torch import nn\n'), ((1930, 1951), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['rnn_dim'], {}), '(rnn_dim)\n', (1942, 1951), False, 'from torch import nn\n'), ((1975, 1994), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1985, 1994), False, 'from torch import nn\n'), ((2065, 2074), 'torch.nn.functional.gelu', 'F.gelu', (['x'], {}), '(x)\n', (2071, 2074), True, 'import torch.nn.functional as F\n'), ((2416, 2488), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(32)'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(kernel_size // 2)'}), '(1, 32, kernel_size=3, stride=stride, padding=kernel_size // 2)\n', (2425, 2488), False, 'from torch import nn\n'), ((2672, 2694), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (2685, 2694), False, 'from torch import nn\n'), ((2726, 2758), 'torch.nn.Linear', 'nn.Linear', (['(n_feats * 32)', 'rnn_dim'], {}), '(n_feats * 32, rnn_dim)\n', (2735, 2758), False, 'from torch import nn\n'), ((3017, 3039), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (3030, 3039), False, 'from torch import nn\n'), ((889, 940), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(1)'}), '(in_channels, out_channels, kernel_size=1)\n', (898, 940), False, 'from torch import nn\n'), ((1022, 1031), 'torch.nn.GELU', 'nn.GELU', ([], {}), '()\n', (1029, 1031), False, 'from torch import nn\n'), ((1045, 1064), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1055, 1064), False, 'from torch import nn\n'), ((1078, 1174), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': 'kernel', 'stride': 'stride', 'padding': '(kernel // 2)'}), '(in_channels, out_channels, kernel_size=kernel, stride=stride,\n padding=kernel // 2)\n', (1087, 1174), False, 'from torch import nn\n'), ((1217, 1226), 'torch.nn.GELU', 'nn.GELU', ([], {}), '()\n', (1224, 1226), False, 'from torch import nn\n'), ((1240, 1259), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1250, 1259), False, 'from torch import nn\n'), ((1273, 1370), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_channels', 'out_channels'], {'kernel_size': 'kernel', 'stride': 'stride', 'padding': '(kernel // 2)'}), '(out_channels, out_channels, kernel_size=kernel, stride=stride,\n padding=kernel // 2)\n', (1282, 1370), False, 'from torch import nn\n'), ((3094, 3125), 'torch.nn.Linear', 'nn.Linear', (['(rnn_dim * 2)', 'rnn_dim'], {}), '(rnn_dim * 2, rnn_dim)\n', (3103, 3125), False, 'from torch import nn\n'), ((3139, 3148), 'torch.nn.GELU', 'nn.GELU', ([], {}), '()\n', (3146, 3148), False, 'from torch import nn\n'), ((3162, 3181), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (3172, 3181), False, 'from torch import nn\n'), ((3195, 3222), 'torch.nn.Linear', 'nn.Linear', (['rnn_dim', 'n_class'], {}), '(rnn_dim, n_class)\n', (3204, 3222), False, 'from torch import nn\n')] |
import datetime
from typing import List
from reminders.events import Buttons, Alerts
from reminders.screen import Screen
# highest level, things that can be in a list menu
class ListMenuItem:
def __init__(self, name):
self._name = str(name)
@property
def name(self):
return self._name
def set_name(self, name):
self._name = str(name)
def selected(self):
pass
# an item in a menu that does something other than going to another menu
class ActionItem(ListMenuItem):
def __init__(self, name, action):
super().__init__(name)
self.action = action
def selected(self):
self.action()
# an action item that is displayed on a menu with a checkbox
class ToggleableItem(ActionItem):
def __init__(self, name, is_selected, toggle, pad_width=9):
super().__init__(name.ljust(pad_width), toggle)
self.is_selected = is_selected
@property
def name(self):
return self._name + ("[×]" if self.is_selected() else "[ ]")
# parent for menus that can be displayed as their own screen
class Menu(ListMenuItem):
menu_stack = []
def __init__(self, name):
super().__init__(name)
def display(self):
Screen.text_screen(self.name + "\n" + "-" * len(self.name))
def handle_button_press(self, button):
pass
def handle_time(self):
pass
# returns current menu, ie top of stack
@staticmethod
def current():
return Menu.menu_stack[-1]
# adds the top level menu to the stack
@staticmethod
def initialise(menu):
Menu.menu_stack = [menu]
# when back button is pressed - go back to previous level of menu
@staticmethod
def back():
if len(Menu.menu_stack) > 1:
Menu.menu_stack.pop()
# menu for the home screen
# no back button available
class HomeMenu(Menu):
translation = Buttons.home_menu_buttons
def __init__(self, main_menu):
super().__init__("Home")
self.main_menu = main_menu
def handle_time(self):
self.display()
def handle_button_press(self, button):
button = HomeMenu.translation[button]
if button == "home":
# go to main menu
Menu.menu_stack.append(self.main_menu)
elif button == "backlight":
Menu.menu_stack.append(BacklightOffMenu())
def display(self):
now = datetime.datetime.now()
Screen.home_screen(self.name, now.strftime("%H:%M"), now.strftime("%a %d %b"))
# menu that stores and displays a list of ListMenuItem
class ListMenu(Menu):
translation = Buttons.list_menu_buttons
# initialise a MenuList
def __init__(self, name: str, items):
super().__init__(name)
self.unevaluated = items
self.items: List[ListMenuItem] = [ActionItem("..", Menu.back)]
self.position = 0
# decides what to do depending on which button was pressed
# a = select, b = up menu, y = down menu, x = home screen
def handle_button_press(self, button):
button = ListMenu.translation[button]
if button == "select":
# select
self.items[self.position].selected()
elif button == "up":
# up
self.position -= 1
self.position %= len(self.items)
elif button == "down":
# down
self.position += 1
self.position %= len(self.items)
elif button == "home":
# home/toplevel button
Menu.menu_stack = Menu.menu_stack[:1]
# displays menu on screen
def display(self, title=None):
if not title:
title = self.name
self.items = [ActionItem("..", Menu.back)] + self.unevaluated()
self.position = min(len(self.items) - 1, self.position)
text = ""
for i, item in enumerate(self.items):
if i == self.position:
text += "> {}\n".format(item.name)
else:
text += " {}\n".format(item.name)
print(title, "\n", text)
Screen.menu_screen(title, text)
# adds menu to the stack when selected
def selected(self):
Menu.menu_stack.append(self)
self.position = 0
# menu for reaching the task time editing menu, and to edit on and complete
class TaskMenu(ListMenu):
def __init__(self, task):
self.task = task
super().__init__(self.task.name, self.task_options)
def display(self, title=None):
title = "Edit " + self.name
super(TaskMenu, self).display(title)
def task_options(self):
options = [
TimeMenu(self.task),
ToggleableItem("On", lambda: self.task.on, self.task.on_toggle)
]
if self.task.on:
options.append(ToggleableItem("Complete", lambda: self.task.complete, self.task.complete_toggle))
return options
# menu for editing a task's time
class TimeMenu(ListMenu):
units_stages = [1, 5, 10]
menu_stages = ["Hours", "Minutes", "Save/Cancel"]
translation = Buttons.time_menu_buttons
def __init__(self, task):
super().__init__(task.get_task_time().strftime("Time %H:%M"), lambda: [])
self.task = task
self.time = task.get_task_time()
self.menu_stage = 0
self.units_stage = 0
def display(self, title="Edit Time"):
Screen.multi_line_text(
[Screen.TextLine(title, 1),
Screen.TextLine("Unit change: {}".format(TimeMenu.units_stages[self.units_stage]), 0),
Screen.TextLine(self.time.strftime("%H:%M"), 2, align="c"),
Screen.TextLine(TimeMenu.menu_stages[self.menu_stage], 1, align="c")])
def change_task_time(self):
self.menu_stage = 0
self.task.set_task_time(self.task.get_task_time().replace(hour=self.time.hour, minute=self.time.minute))
self.set_name(self.time.strftime("Time %H:%M"))
Alerts.sort_alerts()
def hour_change(self, difference):
self.time = self.time.replace(hour=(self.time.hour + difference) % 24)
def minute_change(self, difference):
self.time = self.time.replace(minute=(self.time.minute + difference) % 60)
def handle_button_press(self, button):
button = TimeMenu.translation[button]
if button == "next":
self.menu_stage += 1
self.menu_stage %= len(TimeMenu.menu_stages)
if button == "decrease":
if TimeMenu.menu_stages[self.menu_stage] == "Hours":
self.hour_change(-1)
elif TimeMenu.menu_stages[self.menu_stage] == "Minutes":
self.minute_change(0 - TimeMenu.units_stages[self.units_stage])
elif TimeMenu.menu_stages[self.menu_stage] == "Save/Cancel":
self.change_task_time()
super().handle_button_press("a")
if button == "units":
self.units_stage += 1
self.units_stage %= len(TimeMenu.units_stages)
if button == "increase":
if TimeMenu.menu_stages[self.menu_stage] == "Hours":
self.hour_change(1)
elif TimeMenu.menu_stages[self.menu_stage] == "Minutes":
self.minute_change(TimeMenu.units_stages[self.units_stage])
elif TimeMenu.menu_stages[self.menu_stage] == "Save/Cancel":
super().handle_button_press("a")
def selected(self):
super().selected()
self.menu_stage = 0
self.units_stage = 0
# menu which is put at top of stack when backlight is turned off
class BacklightOffMenu(Menu):
def __init__(self):
super().__init__("Backlight")
def display(self):
Screen.off()
def handle_button_press(self, button):
if button == "x":
Menu.menu_stack.pop()
Screen.toggle_backlight()
# menu to display alert and delay or mark complete
class AlertMenu(Menu):
translation = Buttons.alert_menu_buttons
def __init__(self, task, delay=datetime.timedelta(minutes=1)):
super().__init__(task.name)
self.task = task
self.delayed_for = 0
self.delay_period = delay
def display(self):
if self.delayed_for > 0:
Screen.multi_line_text(
[Screen.TextLine(self.name, 1), Screen.TextLine("Delaying until:", 0, uniform_y=True),
Screen.TextLine(self.task.get_task_time().strftime("%H:%M"), 1),
Screen.TextLine(" ", 0), Screen.TextLine("Delayed for", 0),
Screen.TextLine(str(self.delayed_for * self.delay_period), 0)])
else:
Screen.multi_line_text(
[Screen.TextLine(self.name, 1), Screen.TextLine("Alert time:", 0, uniform_y=True),
Screen.TextLine(self.task.get_task_time().strftime("%H:%M"), 1)])
def handle_button_press(self, button):
button = AlertMenu.translation[button]
if button == "dismiss":
Menu.menu_stack.pop()
elif button == "delay":
self.task.delay(self.delay_period)
self.delayed_for += 1
self.display()
elif button == "complete":
self.task.complete_toggle()
| [
"reminders.screen.Screen.menu_screen",
"reminders.screen.Screen.off",
"datetime.datetime.now",
"reminders.screen.Screen.toggle_backlight",
"reminders.screen.Screen.TextLine",
"datetime.timedelta",
"reminders.events.Alerts.sort_alerts"
] | [((2412, 2435), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2433, 2435), False, 'import datetime\n'), ((4075, 4106), 'reminders.screen.Screen.menu_screen', 'Screen.menu_screen', (['title', 'text'], {}), '(title, text)\n', (4093, 4106), False, 'from reminders.screen import Screen\n'), ((5944, 5964), 'reminders.events.Alerts.sort_alerts', 'Alerts.sort_alerts', ([], {}), '()\n', (5962, 5964), False, 'from reminders.events import Buttons, Alerts\n'), ((7689, 7701), 'reminders.screen.Screen.off', 'Screen.off', ([], {}), '()\n', (7699, 7701), False, 'from reminders.screen import Screen\n'), ((8001, 8030), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (8019, 8030), False, 'import datetime\n'), ((7818, 7843), 'reminders.screen.Screen.toggle_backlight', 'Screen.toggle_backlight', ([], {}), '()\n', (7841, 7843), False, 'from reminders.screen import Screen\n'), ((5418, 5443), 'reminders.screen.Screen.TextLine', 'Screen.TextLine', (['title', '(1)'], {}), '(title, 1)\n', (5433, 5443), False, 'from reminders.screen import Screen\n'), ((5631, 5699), 'reminders.screen.Screen.TextLine', 'Screen.TextLine', (['TimeMenu.menu_stages[self.menu_stage]', '(1)'], {'align': '"""c"""'}), "(TimeMenu.menu_stages[self.menu_stage], 1, align='c')\n", (5646, 5699), False, 'from reminders.screen import Screen\n'), ((8267, 8296), 'reminders.screen.Screen.TextLine', 'Screen.TextLine', (['self.name', '(1)'], {}), '(self.name, 1)\n', (8282, 8296), False, 'from reminders.screen import Screen\n'), ((8298, 8351), 'reminders.screen.Screen.TextLine', 'Screen.TextLine', (['"""Delaying until:"""', '(0)'], {'uniform_y': '(True)'}), "('Delaying until:', 0, uniform_y=True)\n", (8313, 8351), False, 'from reminders.screen import Screen\n'), ((8452, 8475), 'reminders.screen.Screen.TextLine', 'Screen.TextLine', (['""" """', '(0)'], {}), "(' ', 0)\n", (8467, 8475), False, 'from reminders.screen import Screen\n'), ((8477, 8510), 'reminders.screen.Screen.TextLine', 'Screen.TextLine', (['"""Delayed for"""', '(0)'], {}), "('Delayed for', 0)\n", (8492, 8510), False, 'from reminders.screen import Screen\n'), ((8660, 8689), 'reminders.screen.Screen.TextLine', 'Screen.TextLine', (['self.name', '(1)'], {}), '(self.name, 1)\n', (8675, 8689), False, 'from reminders.screen import Screen\n'), ((8691, 8740), 'reminders.screen.Screen.TextLine', 'Screen.TextLine', (['"""Alert time:"""', '(0)'], {'uniform_y': '(True)'}), "('Alert time:', 0, uniform_y=True)\n", (8706, 8740), False, 'from reminders.screen import Screen\n')] |
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_NAME
from .const import (
CONF_CHILD_LOCK,
CONF_CLIMATE,
CONF_DEVICE_ID,
CONF_DISPLAY_LIGHT,
CONF_LOCAL_KEY,
CONF_TYPE,
CONF_TYPE_AUTO,
CONF_TYPE_DEHUMIDIFIER,
CONF_TYPE_FAN,
CONF_TYPE_GECO_HEATER,
CONF_TYPE_GPCV_HEATER,
CONF_TYPE_GPPH_HEATER,
)
INDIVIDUAL_CONFIG_SCHEMA_TEMPLATE = [
{"key": CONF_NAME, "type": str, "required": True, "option": False},
{"key": CONF_HOST, "type": str, "required": True, "option": True},
{"key": CONF_DEVICE_ID, "type": str, "required": True, "option": False},
{"key": CONF_LOCAL_KEY, "type": str, "required": True, "option": True},
{
"key": CONF_TYPE,
"type": vol.In(
[
CONF_TYPE_AUTO,
CONF_TYPE_GPPH_HEATER,
CONF_TYPE_DEHUMIDIFIER,
CONF_TYPE_FAN,
CONF_TYPE_GECO_HEATER,
CONF_TYPE_GPCV_HEATER,
]
),
"required": False,
"default": CONF_TYPE_AUTO,
"option": True,
},
{
"key": CONF_CLIMATE,
"type": bool,
"required": False,
"default": True,
"option": True,
},
{
"key": CONF_DISPLAY_LIGHT,
"type": bool,
"required": False,
"default": False,
"option": True,
},
{
"key": CONF_CHILD_LOCK,
"type": bool,
"required": False,
"default": False,
"option": True,
},
]
def individual_config_schema(defaults={}, options_only=False):
output = {}
for prop in INDIVIDUAL_CONFIG_SCHEMA_TEMPLATE:
if options_only and not prop.get("option"):
continue
options = {}
default = defaults.get(prop["key"], prop.get("default"))
if default is not None:
options["default"] = default
key = (
vol.Required(prop["key"], **options)
if prop["required"]
else vol.Optional(prop["key"], **options)
)
output[key] = prop["type"]
return output
| [
"voluptuous.Required",
"voluptuous.Optional",
"voluptuous.In"
] | [((751, 887), 'voluptuous.In', 'vol.In', (['[CONF_TYPE_AUTO, CONF_TYPE_GPPH_HEATER, CONF_TYPE_DEHUMIDIFIER,\n CONF_TYPE_FAN, CONF_TYPE_GECO_HEATER, CONF_TYPE_GPCV_HEATER]'], {}), '([CONF_TYPE_AUTO, CONF_TYPE_GPPH_HEATER, CONF_TYPE_DEHUMIDIFIER,\n CONF_TYPE_FAN, CONF_TYPE_GECO_HEATER, CONF_TYPE_GPCV_HEATER])\n', (757, 887), True, 'import voluptuous as vol\n'), ((1940, 1976), 'voluptuous.Required', 'vol.Required', (["prop['key']"], {}), "(prop['key'], **options)\n", (1952, 1976), True, 'import voluptuous as vol\n'), ((2026, 2062), 'voluptuous.Optional', 'vol.Optional', (["prop['key']"], {}), "(prop['key'], **options)\n", (2038, 2062), True, 'import voluptuous as vol\n')] |
from nlp20 import get_england
import re
str = get_england()
lines = str.split('\n')
p = re.compile(r'^(=+)\s*(.+?)\s*=+')
for l in lines:
m = re.search(p, l)
if m is not None:
level = len(m.group(1)) - 1
print(m.group(2), level)
| [
"re.search",
"nlp20.get_england",
"re.compile"
] | [((47, 60), 'nlp20.get_england', 'get_england', ([], {}), '()\n', (58, 60), False, 'from nlp20 import get_england\n'), ((89, 123), 're.compile', 're.compile', (['"""^(=+)\\\\s*(.+?)\\\\s*=+"""'], {}), "('^(=+)\\\\s*(.+?)\\\\s*=+')\n", (99, 123), False, 'import re\n'), ((148, 163), 're.search', 're.search', (['p', 'l'], {}), '(p, l)\n', (157, 163), False, 'import re\n')] |
"""This file contain the model for the usermanagement app."""
from django.contrib.auth.models import AbstractUser, Group, Permission
from django.db import models
class UserProfile(AbstractUser):
"""
Define a user.
Here, we use heritage of abstract user and addition of the field nb_tries
to detect if the user use a false password to login.
"""
nb_tries = models.IntegerField(default=0)
USERNAME_FIELD = 'username'
class Meta:
"""Add metadata on the class."""
ordering = ('pk',)
def deactivate_user(self):
"""Deactivate a user."""
self.is_active = False
def reactivate_user(self):
"""Reactivate a user if it was deactivated, else, do nothing."""
if not self.is_active:
self.is_active = True
def __repr__(self):
"""Define formal representation of a user."""
return "<User: id={id}, username='{name}'>".format(id=self.id, name=self.username)
class TeamType(models.Model):
"""
Define a team type.
It inherits of Model class and redefine _apply_ and __str__ methods.
"""
name = models.CharField(max_length=200)
perms = models.ManyToManyField(
Permission,
verbose_name='Team Type permissions',
blank=True,
help_text='Specific permissions for this team type.',
related_name="teamType_set",
related_query_name="teamType"
)
def __str__(self):
"""Return the name of the teamtype."""
return self.name
def __repr__(self):
"""Define formal representation of a team type."""
return "<TeamType: id={id}, name='{name}', permissions={perms}>".format(
id=self.id, name=self.name, perms=self.perms
)
def _apply_(self):
teams_with_this_teamtype = self.team_set.all()
for team in teams_with_this_teamtype:
# team.permissions.set()
team.permissions.set(list(self.perms.all()))
class Team(Group):
"""
Define a team.
It inherits of Group class and define set_team_type.
"""
team_type = models.ForeignKey(
TeamType,
verbose_name="Team Type",
on_delete=models.CASCADE,
help_text='Group of users, extends the auth.models.Group model',
related_name="team_set",
related_query_name="team",
blank=False,
null=True
)
def set_team_type(self, new_team_type):
"""Assign the team type to the team."""
self.team_type = new_team_type
self.save()
new_team_type._apply_()
def __repr__(self):
"""Define formal representation of a team."""
return "<Team: id={id}, team_type='{name}'>".format(id=self.id, name=self.team_type)
| [
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((384, 414), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (403, 414), False, 'from django.db import models\n'), ((1127, 1159), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1143, 1159), False, 'from django.db import models\n'), ((1172, 1378), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Permission'], {'verbose_name': '"""Team Type permissions"""', 'blank': '(True)', 'help_text': '"""Specific permissions for this team type."""', 'related_name': '"""teamType_set"""', 'related_query_name': '"""teamType"""'}), "(Permission, verbose_name='Team Type permissions',\n blank=True, help_text='Specific permissions for this team type.',\n related_name='teamType_set', related_query_name='teamType')\n", (1194, 1378), False, 'from django.db import models\n'), ((2103, 2338), 'django.db.models.ForeignKey', 'models.ForeignKey', (['TeamType'], {'verbose_name': '"""Team Type"""', 'on_delete': 'models.CASCADE', 'help_text': '"""Group of users, extends the auth.models.Group model"""', 'related_name': '"""team_set"""', 'related_query_name': '"""team"""', 'blank': '(False)', 'null': '(True)'}), "(TeamType, verbose_name='Team Type', on_delete=models.\n CASCADE, help_text=\n 'Group of users, extends the auth.models.Group model', related_name=\n 'team_set', related_query_name='team', blank=False, null=True)\n", (2120, 2338), False, 'from django.db import models\n')] |
# -*- coding: utf-8 -*-:
from django import template
import urllib
import hashlib
register = template.Library()
def gravatar(email, size=80, username=None):
gravatar_url = "http://www.gravatar.com/avatar.php?"
gravatar_url += urllib.urlencode({
'gravatar_id': hashlib.md5(email).hexdigest(),
'size': str(size)
})
if username is not None:
return """<img src="%s" alt="gravatar for %s" />""" % (gravatar_url, username)
else:
return """<img src="%s" alt="gravatar" />""" % (gravatar_url)
register.simple_tag(gravatar)
| [
"hashlib.md5",
"django.template.Library"
] | [((95, 113), 'django.template.Library', 'template.Library', ([], {}), '()\n', (111, 113), False, 'from django import template\n'), ((280, 298), 'hashlib.md5', 'hashlib.md5', (['email'], {}), '(email)\n', (291, 298), False, 'import hashlib\n')] |
# encoding=utf-8
"""
Misc PyTorch utils
Author: <EMAIL>
update 12.7
Usage:
`from torch_utils import *`
`func_name()` # to call functions in this file
"""
from datetime import datetime
import math
import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
##############################
# Functional utils
##############################
from utils.misc_utils import format_num
def clamp(x, min=0.01, max=0.99):
"""
value > max will be set to max
value < min will be set to min
:param x: input tensor
:param min:
:param max:
:return:
"""
return torch.clamp(x, min, max)
def repeat(x: torch.Tensor, *sizes):
"""
Example:
>>> t = repeat(t, 1, 3, 1, 1)
# t = t.repeat(1, 3, 1, 1) or t = torch.cat([t, t, t], dim=1)
:param x:
:param sizes:
:return:
"""
return x.repeat(*sizes)
def tensor2im(x: torch.Tensor, norm=False, dtype='float32'):
"""
:param x: [n, c, h, w] float32 type
:param dtype:
:return:
"""
if norm:
x = (x + 1) / 2
x[x > 1] = 1
x[x < 0] = 0
return x.detach().cpu().data[0]
##############################
# Network utils
##############################
def print_network(net: nn.Module, print_size=False):
num_params = 0
print(net)
for name, param in net.named_parameters():
num_params += param.numel()
size = list(param.size())
if len(size) > 1:
if print_size:
print(name, size[1:2]+size[:1]+size[2:], format_num(param.numel()))
else:
print(name, size[1:2] + size[:1] + size[2:])
print('Total number of parameters: %s' % format_num(num_params))
print('The size of receptive field: %s' % format_num(receptive_field(net)))
def receptive_field(net):
def _f(output_size, ksize, stride, dilation):
return (output_size - 1) * stride + ksize * dilation - dilation + 1
stats = []
for m in net.modules():
if isinstance(m, torch.nn.Conv2d):
stats.append((m.kernel_size, m.stride, m.dilation))
rsize = 1
for (ksize, stride, dilation) in reversed(stats):
if type(ksize) == tuple: ksize = ksize[0]
if type(stride) == tuple: stride = stride[0]
if type(dilation) == tuple: dilation = dilation[0]
rsize = _f(rsize, ksize, stride, dilation)
return rsize
##############################
# Abstract Meters class
##############################
class Meters(object):
def __init__(self):
pass
def update(self, new_dic):
raise NotImplementedError
def __getitem__(self, key):
raise NotImplementedError
def keys(self):
raise NotImplementedError
def items(self):
return self.dic.items()
class AverageMeters(Meters):
"""
Example:
avg_meters = AverageMeters()
for i in range(100):
avg_meters.update({'f': i})
print(str(avg_meters))
"""
def __init__(self, dic=None, total_num=None):
self.dic = dic or {}
# self.total_num = total_num
self.total_num = total_num or {}
def update(self, new_dic):
for key in new_dic:
if not key in self.dic:
self.dic[key] = new_dic[key]
self.total_num[key] = 1
else:
self.dic[key] += new_dic[key]
self.total_num[key] += 1
# self.total_num += 1
def __getitem__(self, key):
return self.dic[key] / self.total_num[key]
def __str__(self):
keys = sorted(self.keys())
res = ''
for key in keys:
res += (key + ': %.4f' % self[key] + ' | ')
return res
def keys(self):
return self.dic.keys()
class ExponentialMovingAverage(Meters):
"""
Example:
ema_meters = ExponentialMovingAverage(0.98)
for i in range(100):
ema_meters.update({'f': i})
print(str(ema_meters))
"""
def __init__(self, decay=0.9, dic=None, total_num=None):
self.decay = decay
self.dic = dic or {}
# self.total_num = total_num
self.total_num = total_num or {}
def update(self, new_dic):
decay = self.decay
for key in new_dic:
if not key in self.dic:
self.dic[key] = (1 - decay) * new_dic[key]
self.total_num[key] = 1
else:
self.dic[key] = decay * self.dic[key] + (1 - decay) * new_dic[key]
self.total_num[key] += 1
# self.total_num += 1
def __getitem__(self, key):
return self.dic[key] # / self.total_num[key]
def __str__(self):
keys = sorted(self.keys())
res = ''
for key in keys:
res += (key + ': %.4f' % self[key] + ' | ')
return res
def keys(self):
return self.dic.keys()
##############################
# Checkpoint helper
##############################
def load_ckpt(model, ckpt_path):
"""
Example:
class Model(nn.Module):
....
model = Model().cuda()
load_ckpt(model, 'model.pt')
:param model: object of a subclass of nn.Module
:param ckpt_path: *.pt file to load
:return:
"""
model.load_state_dict(torch.load(ckpt_path))
def save_ckpt(model, ckpt_path):
"""
Example:
class Model(nn.Module):
....
model = Model().cuda()
save_ckpt(model, 'model.pt')
:param model: object of a subclass of nn.Module
:param ckpt_path: *.pt file to save
:return:
"""
torch.save(model.state_dict(), ckpt_path)
##############################
# LR_Scheduler
##############################
class LR_Scheduler(object):
"""Learning Rate Scheduler
Example:
>>> scheduler = LR_Scheduler('cosine', opt.lr, opt.epochs, len(dataloader), warmup_epochs=20)
>>> for i, data in enumerate(dataloader)
>>> scheduler(self.g_optimizer, i, epoch)
Step mode: ``lr = baselr * 0.1 ^ {floor(epoch-1 / lr_step)}`` 每到达lr_step, lr就乘以0.1
Cosine mode: ``lr = baselr * 0.5 * (1 + cos(iter/maxiter))``
Poly mode: ``lr = baselr * (1 - iter/maxiter) ^ 0.9``
iters_per_epoch: number of iterations per epoch
"""
def __init__(self, mode, base_lr, num_epochs, iters_per_epoch=0,
lr_step=0, warmup_epochs=0, logger=None):
"""
:param mode: `step` `cos` or `poly`
:param base_lr:
:param num_epochs:
:param iters_per_epoch:
:param lr_step: lr step to change lr/ for `step` mode
:param warmup_epochs:
:param logger:
"""
self.mode = mode
print('Using {} LR Scheduler!'.format(self.mode))
self.lr = base_lr
if mode == 'step':
assert lr_step
self.lr_step = lr_step
self.iters_per_epoch = iters_per_epoch
self.N = num_epochs * iters_per_epoch
self.epoch = -1
self.warmup_iters = warmup_epochs * iters_per_epoch
self.logger = logger
if logger:
self.logger.info('Using {} LR Scheduler!'.format(self.mode))
def __call__(self, optimizer, i, epoch):
T = epoch * self.iters_per_epoch + i
if self.mode == 'cos':
lr = 0.5 * self.lr * (1 + math.cos(1.0 * T / self.N * math.pi))
elif self.mode == 'poly':
lr = self.lr * pow((1 - 1.0 * T / self.N), 0.9)
elif self.mode == 'step':
lr = self.lr * (0.1 ** (epoch // self.lr_step))
else:
raise NotImplemented
# warm up lr schedule
if self.warmup_iters > 0 and T < self.warmup_iters:
lr = lr * 1.0 * T / self.warmup_iters
if epoch > self.epoch:
if self.logger:
self.logger.info('\n=>Epoches %i, learning rate = %.4f' % (epoch, lr))
else:
print('\nepoch: %d lr: %.6f' % (epoch, lr))
self.epoch = epoch
assert lr >= 0
self._adjust_learning_rate(optimizer, lr)
def _adjust_learning_rate(self, optimizer, lr):
if len(optimizer.param_groups) == 1:
optimizer.param_groups[0]['lr'] = lr
else:
# enlarge the lr at the head
optimizer.param_groups[0]['lr'] = lr
for i in range(1, len(optimizer.param_groups)):
optimizer.param_groups[i]['lr'] = lr * 10
"""
TensorBoard
Example:
writer = create_summary_writer(os.path.join(self.basedir, 'logs'))
write_meters_loss(writer, 'train', avg_meters, iteration)
write_loss(writer, 'train', 'F1', 0.78, iteration)
write_image(writer, 'train', 'input', img, iteration)
# shell
tensorboard --logdir {base_path}/logs
"""
def create_summary_writer(log_dir):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_dir = os.path.join(log_dir, datetime.now().strftime('%m-%d_%H-%M-%S'))
if not os.path.exists(log_dir):
os.mkdir(log_dir)
writer = SummaryWriter(log_dir, max_queue=0, flush_secs=10)
return writer
def write_loss(writer: SummaryWriter, prefix, loss_name: str, value: float, iteration):
"""
Example:
write_loss(writer, 'train', 'F1', 0.78, iteration)
:param writer: writer created by create_summary_writer()
:param prefix: e.g. for '/train/loss1' is 'train'
:param loss_name:
:param value:
:param iteration:
:return:
"""
writer.add_scalar(
os.path.join(prefix, loss_name), value, iteration)
def write_image(writer: SummaryWriter, prefix, image_name: str, img, iteration, dataformats='CHW'):
"""
Example:
write_image(writer, 'train', 'input', img, iteration)
:param writer: writer created by create_summary_writer()
:param prefix:
:param image_name:
:param img: image Tensor, should be channel first. Specific size of [C, H, W].
:param iteration:
:param dataformats: 'CHW' or 'HWC' or 'NCHW'''
:return:
"""
writer.add_image(
os.path.join(prefix, image_name), img, iteration, dataformats=dataformats)
def write_meters_loss(writer: SummaryWriter, prefix, avg_meters: Meters, iteration):
"""
Example:
writer = create_summary_writer(os.path.join(self.basedir, 'logs'))
ema_meters = ExponentialMovingAverage(0.98)
for i in range(100):
ema_meters.update({'f1': i, 'f2': i*0.5})
write_meters_loss(writer, 'train', ema_meters, i)
:param writer:
:param prefix:
:param avg_meters: avg_meters param should be a Meters subclass
:param iteration:
:return:
"""
for key in avg_meters.keys():
meter = avg_meters[key]
writer.add_scalar(
os.path.join(prefix, key), meter, iteration)
| [
"os.path.exists",
"tensorboardX.SummaryWriter",
"os.makedirs",
"torch.load",
"os.path.join",
"math.cos",
"datetime.datetime.now",
"os.mkdir",
"utils.misc_utils.format_num",
"torch.clamp"
] | [((648, 672), 'torch.clamp', 'torch.clamp', (['x', 'min', 'max'], {}), '(x, min, max)\n', (659, 672), False, 'import torch\n'), ((9263, 9313), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['log_dir'], {'max_queue': '(0)', 'flush_secs': '(10)'}), '(log_dir, max_queue=0, flush_secs=10)\n', (9276, 9313), False, 'from tensorboardX import SummaryWriter\n'), ((5428, 5449), 'torch.load', 'torch.load', (['ckpt_path'], {}), '(ckpt_path)\n', (5438, 5449), False, 'import torch\n'), ((9055, 9078), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (9069, 9078), False, 'import os\n'), ((9088, 9108), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (9099, 9108), False, 'import os\n'), ((9199, 9222), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (9213, 9222), False, 'import os\n'), ((9232, 9249), 'os.mkdir', 'os.mkdir', (['log_dir'], {}), '(log_dir)\n', (9240, 9249), False, 'import os\n'), ((9760, 9791), 'os.path.join', 'os.path.join', (['prefix', 'loss_name'], {}), '(prefix, loss_name)\n', (9772, 9791), False, 'import os\n'), ((10338, 10370), 'os.path.join', 'os.path.join', (['prefix', 'image_name'], {}), '(prefix, image_name)\n', (10350, 10370), False, 'import os\n'), ((1767, 1789), 'utils.misc_utils.format_num', 'format_num', (['num_params'], {}), '(num_params)\n', (1777, 1789), False, 'from utils.misc_utils import format_num\n'), ((11091, 11116), 'os.path.join', 'os.path.join', (['prefix', 'key'], {}), '(prefix, key)\n', (11103, 11116), False, 'import os\n'), ((9145, 9159), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9157, 9159), False, 'from datetime import datetime\n'), ((7533, 7569), 'math.cos', 'math.cos', (['(1.0 * T / self.N * math.pi)'], {}), '(1.0 * T / self.N * math.pi)\n', (7541, 7569), False, 'import math\n')] |
#!/usr/bin/python
# Copyright (c) 2017 <NAME>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: route_vpn
short_description: Create a route based VPN
description:
- Create a route based VPN. Route VPN's are typically created between a managed
Stonesoft FW and a 3rd party device (AWS, Azure, etc). You must pre-create
the internal FW prior to running this module. If doing an IPSEC wrapped VPN,
you must also specify a tunnel interface for which to bind (must be pre-created)
and specify an IP address/interface id to specify the ISAKMP listener.
version_added: '2.5'
options:
name:
description:
- The name for this route VPN.
required: true
type: str
type:
description:
- The type of IPSEC vpn to create
type: str
choices: ['ipsec', 'gre']
default: ipsec
enabled:
description:
- Whether the VPN is enabled or disabled
type: bool
local_gw:
description:
- Represents the locally managed Stonesoft FW gateway. If the remote_gw is also
a Stonesoft managed device, use the same parameters to define
type: str
suboptions:
name:
description:
- The name of the Stonesoft FW gateway
type: str
required: true
tunnel_interface:
description:
- The ID for the tunnel interface
type: str
required: true
interface_id:
description:
- The interface ID to enable IPSEC. If multiple IP addresses exist
on the interface, IPSEC will be enabled on all. Use I(interface_ip) as
an alternative.
type: str
required: true
address:
description:
- An interface IP addresses to enable IPSEC. If there are multiple IP addresses
on a single interface specified with I(interface_id) and you want to bind to
only that address
type: str
required: false
remote_gw:
description:
- The name of the remote GW. If the remote gateway is an Stonesoft FW, it must
pre-exist. Use the local_gw documentation for settings. If it is an External Gateway,
this module will create the gateway based on the gateway settings provided if it
doesn't already exist. This documents an External Gateway configuration. See also
the external_gateway module for additional external endpoint settings.
type: str
suboptions:
name:
description:
- The name of the External Gateway. If the gateway does not exist, it will be created
if you provide the I(address) and I(networks) parameters.
type: str
required: true
preshared_key:
description:
- If this is an External Gateway, you must provide a pre-shared key to be used between
the gateways. If the gateway is another Stonesoft FW, a key will be auto-generated.
type: str
type:
description:
- Set to external_gateway if this is an external gateway element type
type: str
vpn_site:
description:
- Defines the VPN site for the protected networks on other end of external gateway
type: dict
suboptions:
name:
description:
- Name of VPN site
type: str
required: true
network:
description:
- A valid element type from SMC. Typically this is network or host. List elements
should be valid names of the specified element
type: list
external_endpoint:
description:
- The external endpoint gateways where the RBVPN will terminate. Any options that are
supported by the smcpython ExternalEndpoint.create constructor are supported values
for this definition
type: list
required: true
suboptions:
name:
description:
- Name of the external endpoint
type: str
required: True
address:
description:
- A valid IP address of the external gateway
type: str
required: true
enabled:
description:
- Whether to enable the gateway.
type: bool
tags:
description:
- Provide an optional category tag to the engine. If the category does not
exist, it will be created
type: list
state:
description:
- Specify a create or delete operation
required: false
default: present
choices:
- present
- absent
extends_documentation_fragment: stonesoft
notes:
- Login credential information is either obtained by providing them directly
to the task/play, specifying an alt_filepath to read the credentials from to
the play, or from environment variables (in that order). See
U(http://smc-python.readthedocs.io/en/latest/pages/session.html) for more
information.
requirements:
- smc-python
author:
- <NAME> (@gabstopper)
'''
EXAMPLES = '''
- name: Route VPN between internal engine and 3rd party external gateway
register: result
route_vpn:
smc_logging:
level: 10
path: ansible-smc.log
enabled: true
local_gw:
address: 172.16.31.10
name: newcluster
tunnel_interface: '1001'
name: myrbvpn
remote_gw:
external_endpoint:
- address: 33.33.33.41
enabled: true
name: extgw3 (33.33.33.41)
connection_type: 'Active 1'
- address: 34.34.34.34
enabled: true
name: endpoint2 (34.34.34.34)
connection_type: 'Active 1'
- address: 44.44.44.44
enabled: false
name: extgw4 (44.44.44.44)
connection_type: 'Active 1'
- address: 33.33.33.50
enabled: false
name: endpoint1 (33.33.33.50)
connection_type: 'Active 1'
name: extgw3
preshared_key: '********'
type: external_gateway
vpn_site:
name: extgw3-site
network:
- network-172.18.15.0/24
- network-172.18.1.0/24
- network-172.18.2.0/24
- name: Create a new Route VPN with internal gateways
route_vpn:
smc_logging:
level: 10
path: ansible-smc.log
name: myrbvpn
type: ipsec
local_gw:
name: newcluster
tunnel_interface: 1001
interface_id: 1
#address: 192.168.127.12
remote_gw:
name: myfw
tunnel_interface: 1000
interface_id: 0
tags:
- footag
'''
RETURN = '''
changed:
description: Whether or not the change succeeded
returned: always
type: bool
state:
description: The current state of the element
return: always
type: dict
'''
import traceback
from ansible.module_utils.stonesoft_util import (
StonesoftModuleBase, Cache)
try:
from smc.vpn.route import RouteVPN, TunnelEndpoint
from smc.vpn.elements import ExternalGateway
from smc.core.engine import Engine
from smc.api.exceptions import SMCException
except ImportError:
pass
class StonesoftRouteVPN(StonesoftModuleBase):
def __init__(self):
self.module_args = dict(
name=dict(type='str', required=True),
type=dict(default='ipsec', type='str', choices=['ipsec', 'gre']),
local_gw=dict(type='dict'),
remote_gw=dict(type='dict'),
enabled=dict(type='bool'),
tags=dict(type='list'),
state=dict(default='present', type='str', choices=['present', 'absent'])
)
self.name = None
self.type = None
self.enabled = None
self.local_gw = None
self.remote_gw = None
self.tags = None
required_if=([
('state', 'present', ['local_gw', 'remote_gw'])
])
self.results = dict(
changed=False,
state=[]
)
super(StonesoftRouteVPN, self).__init__(self.module_args, supports_check_mode=True,
required_if=required_if)
def exec_module(self, **kwargs):
state = kwargs.pop('state', 'present')
for name, value in kwargs.items():
setattr(self, name, value)
rbvpn = self.fetch_element(RouteVPN)
changed = False
if state == 'present':
# Short circuit disable
if rbvpn and self.enabled is not None and (rbvpn.enabled and not self.enabled):
rbvpn.disable()
self.results['changed'] = True
return self.results
local_engine = self.get_managed_gateway(self.local_gw)
local_tunnel_interface = self.get_tunnel_interface(
local_engine, self.local_gw.get('tunnel_interface'))
local_internal_endpoint = self.get_ipsec_endpoint(
local_engine, self.local_gw.get('interface_id'),
address=self.local_gw.get('address'))
if self.remote_gw.get('type', None) != 'external_gateway':
remote_engine = self.get_managed_gateway(self.remote_gw)
remote_tunnel_interface = self.get_tunnel_interface(
remote_engine, self.remote_gw.get('tunnel_interface'))
remote_internal_endpoint = self.get_ipsec_endpoint(
remote_engine, self.remote_gw.get('interface_id'),
address=self.remote_gw.get('address'))
else:
# External Gateway
req = ('name', 'preshared_key', 'external_endpoint')
for required in req:
if required not in self.remote_gw:
self.fail(msg='Missing required field for the external endpoint '
'configuration: %s' % required)
cache = Cache()
external_gateway = dict(name=self.remote_gw['name'])
# External Endpoints are defined in the External Gateway.
# Build the data structures for a call to ExternalGateway.update_or_create
ctypes = [] # connection_type element
for endpoint in self.remote_gw['external_endpoint']:
if 'name' not in endpoint or 'address' not in endpoint:
self.fail(msg='An external endpoint must have at least a '
'name and an address definition.')
# SMC version 6.5 requires the connection type element to specify
# the role for the given external endpoint
if 'connection_type' not in endpoint:
self.fail(msg='You must provide the connection_type parameter '
'when creating an external endpoint')
ctypes.append(endpoint.get('connection_type'))
cache.add(dict(connection_type=ctypes))
if cache.missing:
self.fail(msg=cache.missing)
# Verify specified VPN Sites exist before continuing
if 'vpn_site' in self.remote_gw:
site_name = self.remote_gw.get('vpn_site', {}).pop('name', None)
if not site_name:
self.fail(msg='A VPN site requires a name to continue')
# Get the elements
cache.add(self.remote_gw.get('vpn_site', {}))
vpn_site_types = self.remote_gw.get('vpn_site', {}).keys() # Save the VPN site types for retrieval
if cache.missing:
self.fail(msg='Could not find the specified elements for the '
'VPN site configuration: %s' % cache.missing)
site_element = [element.href for element_type in vpn_site_types
for element in cache.get_type(element_type)]
external_gateway.update(
vpn_site=[dict(name=site_name, site_element=site_element)])
external_endpoint = []
for endpoint in self.remote_gw['external_endpoint']:
endpoint.update(connection_type_ref=\
cache.get('connection_type',endpoint.pop('connection_type')).href)
external_endpoint.append(endpoint)
external_gateway.update(external_endpoint=external_endpoint)
try:
if state == 'present':
if self.check_mode:
return self.results
# Create the tunnel endpoints
if not rbvpn:
local_gateway = TunnelEndpoint.create_ipsec_endpoint(
local_engine.vpn.internal_gateway, local_tunnel_interface)
# Enable the IPSEC listener on specified interface/s
if self.update_ipsec_listener(local_internal_endpoint):
changed = True
is_external = self.remote_gw.get('type', None) == 'external_gateway'
if not is_external:
remote_gateway = TunnelEndpoint.create_ipsec_endpoint(
remote_engine.vpn.internal_gateway, remote_tunnel_interface)
if self.update_ipsec_listener(remote_internal_endpoint):
changed = True
else: # Update or Create
gw, updated, created = ExternalGateway.update_or_create(
with_status=True, **external_gateway)
remote_gateway = TunnelEndpoint.create_ipsec_endpoint(gw)
if created or updated:
changed = True
vpn = dict(
name=self.name,
local_endpoint=local_gateway,
remote_endpoint=remote_gateway)
if is_external:
vpn.update(preshared_key=self.remote_gw['preshared_key'])
rbvpn = RouteVPN.create_ipsec_tunnel(**vpn)
changed = True
else:
#TODO: Update or create from top level RBVPN
#rbvpn.update_or_create()
if rbvpn and self.enabled is not None and (not rbvpn.enabled and self.enabled):
rbvpn.enable()
changed = True
if self.remote_gw.get('type') == 'external_gateway':
gw, updated, created = ExternalGateway.update_or_create(
with_status=True, **external_gateway)
if updated or created:
changed = True
self.results['state'] = rbvpn.data.data
self.results['changed'] = changed
elif state == 'absent':
if rbvpn:
rbvpn.delete()
changed = True
except SMCException as err:
self.fail(msg=str(err), exception=traceback.format_exc())
self.results['changed'] = changed
return self.results
def get_ipsec_endpoint(self, engine, interface_id, address=None):
"""
Get the internal endpoint for which to enable IPSEC on for the
internal FW. This is required for IPSEC based RBVPN.
:param engine Engine: engine reference, already obtained
:param str interface_id: interface ID specified for IPSEC listener
:rtype: list(InternalEndpoint)
"""
try:
interface = engine.interface.get(interface_id)
except SMCException as e:
self.fail(msg='Fetch IPSEC interface for endpoint failed: %s' % str(e))
internal_endpoint = engine.vpn.internal_endpoint # Collection
endpoints = []
if address:
ep = internal_endpoint.get_exact(address)
if ep:
endpoints.append(ep)
else: # Get all endpoints for the interface
for addr, network, nicid in interface.addresses: # @UnusedVariable
if internal_endpoint.get_exact(addr):
endpoints.append(
internal_endpoint.get_exact(addr))
if not endpoints:
self.fail(msg='No IPSEC endpoint interfaces found. The specified '
'interface ID was: %s and address: %s' % (interface_id, address))
return endpoints
def update_ipsec_listener(self, internal_endpoints):
"""
Update the internal endpoint to enable the IPSEC listener on
the specified interface/s.
:param list(InternalEndpoint) internal_endpoints: internal endpoints
:rtype: bool
"""
changed = False
for endpoint in internal_endpoints:
if not endpoint.enabled:
endpoint.update(enabled=True)
changed = True
return changed
def get_tunnel_interface(self, engine, interface_id):
"""
Get the specified Tunnel Interface for the gateway.
:param engine Engine: engine ref
:param str interface_id: pulled from gateway yaml
:rtype: TunnelInterface
"""
tunnel_interface = None
for interface in engine.tunnel_interface:
if interface.interface_id == str(interface_id):
tunnel_interface = interface
break
if not tunnel_interface:
self.fail(msg='Cannot find specified tunnel interface: %s for specified gateway '
'%s' % (interface_id, engine.name))
return tunnel_interface
def get_managed_gateway(self, gw):
"""
If the gateway is a locally managed SMC gateway, tunnel interface and
an IPSEC interface is required.
:param dict local_gw,remote_gw: yaml definition
:rtype: Engine
"""
for req in ('name', 'tunnel_interface', 'interface_id'):
if req not in gw:
self.fail(msg='Managed gateway requires name, interface_id and '
'tunnel_interface fields')
managed_gw = Engine.get(gw.get('name'), raise_exc=False)
if not managed_gw:
self.fail(msg='The specified managed gateway specified does not '
'exist: %s' % gw.get('name'))
return managed_gw
def main():
StonesoftRouteVPN()
if __name__ == '__main__':
main() | [
"ansible.module_utils.stonesoft_util.Cache",
"traceback.format_exc",
"smc.vpn.route.RouteVPN.create_ipsec_tunnel",
"smc.vpn.route.TunnelEndpoint.create_ipsec_endpoint",
"smc.vpn.elements.ExternalGateway.update_or_create"
] | [((10225, 10232), 'ansible.module_utils.stonesoft_util.Cache', 'Cache', ([], {}), '()\n', (10230, 10232), False, 'from ansible.module_utils.stonesoft_util import StonesoftModuleBase, Cache\n'), ((13173, 13272), 'smc.vpn.route.TunnelEndpoint.create_ipsec_endpoint', 'TunnelEndpoint.create_ipsec_endpoint', (['local_engine.vpn.internal_gateway', 'local_tunnel_interface'], {}), '(local_engine.vpn.internal_gateway,\n local_tunnel_interface)\n', (13209, 13272), False, 'from smc.vpn.route import RouteVPN, TunnelEndpoint\n'), ((14751, 14786), 'smc.vpn.route.RouteVPN.create_ipsec_tunnel', 'RouteVPN.create_ipsec_tunnel', ([], {}), '(**vpn)\n', (14779, 14786), False, 'from smc.vpn.route import RouteVPN, TunnelEndpoint\n'), ((13694, 13795), 'smc.vpn.route.TunnelEndpoint.create_ipsec_endpoint', 'TunnelEndpoint.create_ipsec_endpoint', (['remote_engine.vpn.internal_gateway', 'remote_tunnel_interface'], {}), '(remote_engine.vpn.internal_gateway,\n remote_tunnel_interface)\n', (13730, 13795), False, 'from smc.vpn.route import RouteVPN, TunnelEndpoint\n'), ((14087, 14157), 'smc.vpn.elements.ExternalGateway.update_or_create', 'ExternalGateway.update_or_create', ([], {'with_status': '(True)'}), '(with_status=True, **external_gateway)\n', (14119, 14157), False, 'from smc.vpn.elements import ExternalGateway\n'), ((14228, 14268), 'smc.vpn.route.TunnelEndpoint.create_ipsec_endpoint', 'TunnelEndpoint.create_ipsec_endpoint', (['gw'], {}), '(gw)\n', (14264, 14268), False, 'from smc.vpn.route import RouteVPN, TunnelEndpoint\n'), ((15312, 15382), 'smc.vpn.elements.ExternalGateway.update_or_create', 'ExternalGateway.update_or_create', ([], {'with_status': '(True)'}), '(with_status=True, **external_gateway)\n', (15344, 15382), False, 'from smc.vpn.elements import ExternalGateway\n'), ((15894, 15916), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (15914, 15916), False, 'import traceback\n')] |
# Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recording pipeline from MLMD metadata."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from typing import Iterable, List, Mapping, Optional, Text, Tuple
from absl import logging
import tensorflow as tf
from tfx.orchestration import metadata
from tfx.utils import io_utils
from ml_metadata.proto import metadata_store_pb2
def _get_paths(metadata_connection: metadata.Metadata, execution_ids: List[int],
output_dir: Text) -> Iterable[Tuple[Text, Text]]:
"""Yields tuple with source and destination artifact uris.
The destination artifact uris are located in the output_dir. The source
artifact uris are retrieved using execution ids.
Args:
metadata_connection: Instance of metadata.Metadata for I/O to MLMD.
execution_ids: List of execution ids of a pipeline run.
output_dir: Directory path where the pipeline outputs should be recorded.
Yields:
Iterable over tuples of source uri and destination uri.
"""
events = metadata_connection.store.get_events_by_execution_ids(execution_ids)
output_events = [
x for x in events if x.type == metadata_store_pb2.Event.OUTPUT
]
unique_artifact_ids = list({x.artifact_id for x in output_events})
for artifact in metadata_connection.store.get_artifacts_by_id(
unique_artifact_ids):
src_uri = artifact.uri
artifact_properties = artifact.custom_properties
component_id = artifact_properties['producer_component'].string_value
name = artifact_properties['name'].string_value
dest_uri = os.path.join(output_dir, component_id, name)
yield (src_uri, dest_uri)
def _get_execution_dict(
metadata_connection: metadata.Metadata
) -> Mapping[Text, List[metadata_store_pb2.Execution]]:
"""Returns a dictionary holding list of executions for all run_id in MLMD.
Args:
metadata_connection: Instance of metadata.Metadata for I/O to MLMD.
Returns:
A dictionary that holds list of executions for a run_id.
"""
execution_dict = collections.defaultdict(list)
for execution in metadata_connection.store.get_executions():
execution_run_id = execution.properties['run_id'].string_value
execution_dict[execution_run_id].append(execution)
return execution_dict
def _get_latest_executions(
metadata_connection: metadata.Metadata,
pipeline_name: Text) -> List[metadata_store_pb2.Execution]:
"""Fetches executions associated with the latest context.
Args:
metadata_connection: Instance of metadata.Metadata for I/O to MLMD.
pipeline_name: Name of the pipeline to rerieve the latest executions for.
Returns:
List of executions for the latest run of a pipeline with the given
pipeline_name.
"""
pipeline_run_contexts = [
c for c in metadata_connection.store.get_contexts_by_type(
metadata._CONTEXT_TYPE_PIPELINE_RUN) # pylint: disable=protected-access
if c.properties['pipeline_name'].string_value == pipeline_name
]
latest_context = max(
pipeline_run_contexts, key=lambda c: c.last_update_time_since_epoch)
return metadata_connection.store.get_executions_by_context(latest_context.id)
def record_pipeline(output_dir: Text, metadata_db_uri: Optional[Text],
host: Optional[Text], port: Optional[int],
pipeline_name: Optional[Text],
run_id: Optional[Text]) -> None:
"""Record pipeline run with run_id to output_dir.
For the beam pipeline, metadata_db_uri is required. For KFP pipeline,
host and port should be specified. If run_id is not specified, then
pipeline_name ought to be specified in order to fetch the latest execution
for the specified pipeline.
Args:
output_dir: Directory path where the pipeline outputs should be recorded.
metadata_db_uri: Uri to metadata db.
host: Hostname of the metadata grpc server
port: Port number of the metadata grpc server.
pipeline_name: Pipeline name, which is required if run_id isn't specified.
run_id: Pipeline execution run_id.
Raises:
ValueError: In cases of invalid arguments:
- metadata_db_uri is None or host and/or port is None.
- run_id is None and pipeline_name is None.
FileNotFoundError: if the source artifact uri does not already exist.
"""
if host is not None and port is not None:
metadata_config = metadata_store_pb2.MetadataStoreClientConfig(
host=host, port=port)
elif metadata_db_uri is not None:
metadata_config = metadata.sqlite_metadata_connection_config(
metadata_db_uri)
else:
raise ValueError('For KFP, host and port are required. '
'For beam pipeline, metadata_db_uri is required.')
with metadata.Metadata(metadata_config) as metadata_connection:
if run_id is None:
if pipeline_name is None:
raise ValueError('If the run_id is not specified,'
' pipeline_name should be specified')
# fetch executions of the most recently updated execution context.
executions = _get_latest_executions(metadata_connection, pipeline_name)
else:
execution_dict = _get_execution_dict(metadata_connection)
if run_id in execution_dict:
executions = execution_dict[run_id]
else:
raise ValueError(
'run_id {} is not recorded in the MLMD metadata'.format(run_id))
execution_ids = [e.id for e in executions]
for src_uri, dest_uri in _get_paths(metadata_connection, execution_ids,
output_dir):
if not tf.io.gfile.exists(src_uri):
raise FileNotFoundError('{} does not exist'.format(src_uri))
io_utils.copy_dir(src_uri, dest_uri)
logging.info('Pipeline Recorded at %s', output_dir)
| [
"tfx.orchestration.metadata.Metadata",
"tfx.utils.io_utils.copy_dir",
"os.path.join",
"absl.logging.info",
"tfx.orchestration.metadata.sqlite_metadata_connection_config",
"collections.defaultdict",
"ml_metadata.proto.metadata_store_pb2.MetadataStoreClientConfig",
"tensorflow.io.gfile.exists"
] | [((2681, 2710), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (2704, 2710), False, 'import collections\n'), ((2224, 2268), 'os.path.join', 'os.path.join', (['output_dir', 'component_id', 'name'], {}), '(output_dir, component_id, name)\n', (2236, 2268), False, 'import os\n'), ((5014, 5080), 'ml_metadata.proto.metadata_store_pb2.MetadataStoreClientConfig', 'metadata_store_pb2.MetadataStoreClientConfig', ([], {'host': 'host', 'port': 'port'}), '(host=host, port=port)\n', (5058, 5080), False, 'from ml_metadata.proto import metadata_store_pb2\n'), ((5366, 5400), 'tfx.orchestration.metadata.Metadata', 'metadata.Metadata', (['metadata_config'], {}), '(metadata_config)\n', (5383, 5400), False, 'from tfx.orchestration import metadata\n'), ((6355, 6406), 'absl.logging.info', 'logging.info', (['"""Pipeline Recorded at %s"""', 'output_dir'], {}), "('Pipeline Recorded at %s', output_dir)\n", (6367, 6406), False, 'from absl import logging\n'), ((5148, 5207), 'tfx.orchestration.metadata.sqlite_metadata_connection_config', 'metadata.sqlite_metadata_connection_config', (['metadata_db_uri'], {}), '(metadata_db_uri)\n', (5190, 5207), False, 'from tfx.orchestration import metadata\n'), ((6314, 6350), 'tfx.utils.io_utils.copy_dir', 'io_utils.copy_dir', (['src_uri', 'dest_uri'], {}), '(src_uri, dest_uri)\n', (6331, 6350), False, 'from tfx.utils import io_utils\n'), ((6210, 6237), 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['src_uri'], {}), '(src_uri)\n', (6228, 6237), True, 'import tensorflow as tf\n')] |
import argparse
from distutils.util import strtobool
def str2bool(x):
return bool(strtobool(x))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--num_epochs', type=int, default=1000)
parser.add_argument('--learning_rate', type=float, default=0.0005)
parser.add_argument('--batch_size', type=int, default=4096)
parser.add_argument('--num_workers', type=int, default=16)
parser.add_argument('--non_graph_embedding_dim', type=int, default=200)
parser.add_argument('--graph_embedding_dims',
type=int,
nargs='+',
default=[200, 128, 64])
parser.add_argument(
'--neighbors_sampling_quantile',
type=float,
default=0.9,
help=
'Set the number of sampled neighbors to the quantile of the numbers of neighbors'
)
parser.add_argument('--min_neighbors_sampled', type=int, default=4)
parser.add_argument('--max_neighbors_sampled', type=int, default=512)
parser.add_argument('--single_attribute_dim', type=int,
default=40) # TODO: support attributes
parser.add_argument('--attention_query_vector_dim', type=int, default=200)
parser.add_argument(
'--dnn_predictor_dims',
type=int,
nargs='+',
default=[-1, 128, 1],
help=
'You can set first dim as -1 to make it automatically fit the input vector'
)
parser.add_argument('--num_batches_show_loss', type=int, default=50)
parser.add_argument('--num_epochs_validate', type=int, default=5)
parser.add_argument('--early_stop_patience', type=int, default=20)
parser.add_argument('--num_attention_heads', type=int, default=8)
parser.add_argument('--save_checkpoint', type=str2bool, default=False)
parser.add_argument('--different_embeddings', type=str2bool, default=False)
parser.add_argument('--negative_sampling_ratio', type=int, default=4)
parser.add_argument(
'--model_name',
type=str,
default='GCN',
choices=[
# Non-graph
'NCF',
# Graph with single type of edge (we think it as homogeneous graph)
'GCN',
'GAT',
'LightGCN',
'NGCF',
# Graph with multiple types of edge (we think it as heterogeneous graph)
'HET-GCN',
'HET-GAT',
'HET-NGCF',
'HET-LightGCN',
# To be categorized
'GraphRec',
'DeepFM',
'DSSM',
'DiffNet',
'DiffNet++',
'DANSER'
])
parser.add_argument('--embedding_aggregator',
type=str,
default='concat',
choices=['concat', 'attn'])
parser.add_argument('--predictor',
type=str,
default='dnn',
choices=['dot', 'dnn'])
parser.add_argument('--dataset_path', type=str, required=True)
parser.add_argument('--metadata_path', type=str, required=True)
parser.add_argument('--log_path', type=str, default='./log/')
parser.add_argument('--tensorboard_runs_path', type=str, default='./runs/')
parser.add_argument('--checkpoint_path', type=str, default='./checkpoint/')
parser.add_argument('--edge_choice',
type=int,
nargs='+',
default=[],
help='Left empty to use all in metadata file')
parser.add_argument('--training_task_choice',
type=int,
nargs='+',
default=[],
help='Left empty to use all in metadata file')
parser.add_argument('--evaluation_task_choice',
type=int,
nargs='+',
default=[],
help='Left empty to use all in `training_task_choice`')
parser.add_argument('--task_loss_overwrite', type=str, nargs='+')
parser.add_argument('--task_weight_overwrite', type=float, nargs='+')
args, unknown = parser.parse_known_args()
if len(unknown) > 0:
print(
'Warning: if you are not in testing mode, you may have got some parameters wrong input'
)
return args
| [
"distutils.util.strtobool",
"argparse.ArgumentParser"
] | [((135, 160), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (158, 160), False, 'import argparse\n'), ((88, 100), 'distutils.util.strtobool', 'strtobool', (['x'], {}), '(x)\n', (97, 100), False, 'from distutils.util import strtobool\n')] |
# This a training script launched with py_config_runner
# It should obligatory contain `run(config, **kwargs)` method
import sys
from collections.abc import Mapping
from pathlib import Path
import torch
from apex import amp
from dataflow.datasets import VOCSegmentationOpencv
from py_config_runner.config_utils import TRAINVAL_CONFIG, assert_config, get_params
from py_config_runner.utils import set_seed
from utils import exp_tracking
from utils.handlers import predictions_gt_images_handler
import ignite
import ignite.distributed as idist
from ignite.contrib.engines import common
from ignite.engine import Engine, Events, create_supervised_evaluator
from ignite.handlers import DiskSaver
from ignite.metrics import ConfusionMatrix, IoU, mIoU
from ignite.utils import setup_logger
# Adds "code" folder to python path
sys.path.insert(0, Path(__file__).parent.parent.as_posix())
def initialize(config):
model = config.model.to(config.device)
optimizer = config.optimizer
# Setup Nvidia/Apex AMP
model, optimizer = amp.initialize(model, optimizer, opt_level=getattr(config, "fp16_opt_level", "O2"), num_losses=1)
# Adapt model to dist conf
model = idist.auto_model(model)
criterion = config.criterion.to(config.device)
return model, optimizer, criterion
def get_save_handler(config):
if exp_tracking.has_clearml:
from ignite.contrib.handlers.clearml_logger import ClearMLSaver
return ClearMLSaver(dirname=config.output_path.as_posix())
return DiskSaver(config.output_path.as_posix())
def create_trainer(model, optimizer, criterion, train_sampler, config, logger):
prepare_batch = config.prepare_batch
device = config.device
# Setup trainer
accumulation_steps = getattr(config, "accumulation_steps", 1)
model_output_transform = getattr(config, "model_output_transform", lambda x: x)
def train_update_function(engine, batch):
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=True)
y_pred = model(x)
y_pred = model_output_transform(y_pred)
loss = criterion(y_pred, y)
if isinstance(loss, Mapping):
assert "supervised batch loss" in loss
loss_dict = loss
output = {k: v.item() for k, v in loss_dict.items()}
loss = loss_dict["supervised batch loss"] / accumulation_steps
else:
output = {"supervised batch loss": loss.item()}
with amp.scale_loss(loss, optimizer, loss_id=0) as scaled_loss:
scaled_loss.backward()
if engine.state.iteration % accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return output
output_names = getattr(config, "output_names", ["supervised batch loss",])
lr_scheduler = config.lr_scheduler
trainer = Engine(train_update_function)
trainer.logger = logger
to_save = {"model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler, "trainer": trainer, "amp": amp}
save_every_iters = getattr(config, "save_every_iters", 1000)
common.setup_common_training_handlers(
trainer,
train_sampler,
to_save=to_save,
save_every_iters=save_every_iters,
save_handler=get_save_handler(config),
lr_scheduler=lr_scheduler,
with_gpu_stats=exp_tracking.has_mlflow,
output_names=output_names,
with_pbars=False,
)
if idist.get_rank() == 0:
common.ProgressBar(persist=False).attach(trainer, metric_names="all")
return trainer
def create_evaluators(model, metrics, config):
model_output_transform = getattr(config, "model_output_transform", lambda x: x)
evaluator_args = dict(
model=model,
metrics=metrics,
device=config.device,
non_blocking=True,
prepare_batch=config.prepare_batch,
output_transform=lambda x, y, y_pred: (model_output_transform(y_pred), y,),
)
train_evaluator = create_supervised_evaluator(**evaluator_args)
evaluator = create_supervised_evaluator(**evaluator_args)
if idist.get_rank() == 0:
common.ProgressBar(desc="Evaluation (train)", persist=False).attach(train_evaluator)
common.ProgressBar(desc="Evaluation (val)", persist=False).attach(evaluator)
return evaluator, train_evaluator
def log_metrics(logger, epoch, elapsed, tag, metrics):
metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()])
logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {int(elapsed)} - {tag} metrics:\n {metrics_output}")
def log_basic_info(logger, config):
msg = f"\n- PyTorch version: {torch.__version__}"
msg += f"\n- Ignite version: {ignite.__version__}"
msg += f"\n- Cuda device name: {torch.cuda.get_device_name(idist.get_local_rank())}"
logger.info(msg)
if idist.get_world_size() > 1:
msg = "\nDistributed setting:"
msg += f"\tbackend: {idist.backend()}"
msg += f"\trank: {idist.get_rank()}"
msg += f"\tworld size: {idist.get_world_size()}"
logger.info(msg)
def training(local_rank, config, logger=None):
if not getattr(config, "use_fp16", True):
raise RuntimeError("This training script uses by default fp16 AMP")
torch.backends.cudnn.benchmark = True
set_seed(config.seed + local_rank)
train_loader, val_loader, train_eval_loader = config.train_loader, config.val_loader, config.train_eval_loader
# Setup model, optimizer, criterion
model, optimizer, criterion = initialize(config)
# Setup trainer for this specific task
trainer = create_trainer(model, optimizer, criterion, train_loader.sampler, config, logger)
# Setup evaluators
num_classes = config.num_classes
cm_metric = ConfusionMatrix(num_classes=num_classes)
val_metrics = {
"IoU": IoU(cm_metric),
"mIoU_bg": mIoU(cm_metric),
}
if hasattr(config, "val_metrics") and isinstance(config.val_metrics, dict):
val_metrics.update(config.val_metrics)
evaluator, train_evaluator = create_evaluators(model, val_metrics, config)
val_interval = getattr(config, "val_interval", 1)
@trainer.on(Events.EPOCH_COMPLETED(every=val_interval))
def run_validation():
epoch = trainer.state.epoch
state = train_evaluator.run(train_eval_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(val_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
if config.num_epochs % val_interval != 0:
trainer.add_event_handler(Events.COMPLETED, run_validation)
if getattr(config, "start_by_validation", False):
trainer.add_event_handler(Events.STARTED, run_validation)
score_metric_name = "mIoU_bg"
if hasattr(config, "es_patience"):
common.add_early_stopping_by_val_score(config.es_patience, evaluator, trainer, metric_name=score_metric_name)
# Store 3 best models by validation accuracy:
common.gen_save_best_models_by_val_score(
save_handler=get_save_handler(config),
evaluator=evaluator,
models=model,
metric_name=score_metric_name,
n_saved=3,
trainer=trainer,
tag="val",
)
if idist.get_rank() == 0:
tb_logger = common.setup_tb_logging(
config.output_path.as_posix(),
trainer,
optimizer,
evaluators={"training": train_evaluator, "validation": evaluator},
)
if not exp_tracking.has_clearml:
exp_tracking_logger = exp_tracking.setup_logging(
trainer, optimizer, evaluators={"training": train_evaluator, "validation": evaluator}
)
# Log validation predictions as images
# We define a custom event filter to log less frequently the images (to reduce storage size)
# - we plot images with masks of the middle validation batch
# - once every 3 validations and
# - at the end of the training
def custom_event_filter(_, val_iteration):
c1 = val_iteration == len(val_loader) // 2
c2 = trainer.state.epoch % (getattr(config, "val_interval", 1) * 3) == 0
c2 |= trainer.state.epoch == config.num_epochs
return c1 and c2
tb_logger.attach(
evaluator,
log_handler=predictions_gt_images_handler(
img_denormalize_fn=config.img_denormalize, n_images=15, another_engine=trainer, prefix_tag="validation"
),
event_name=Events.ITERATION_COMPLETED(event_filter=custom_event_filter),
)
# Log confusion matrix to ClearML:
if exp_tracking.has_clearml:
@trainer.on(Events.COMPLETED)
def compute_and_log_cm():
cm = cm_metric.compute()
# CM: values are normalized such that diagonal values represent class recalls
cm = ConfusionMatrix.normalize(cm, "recall").cpu().numpy()
if idist.get_rank() == 0:
try:
from clearml import Task
except ImportError:
# Backwards-compatibility for legacy Trains SDK
from trains import Task
clearml_logger = Task.current_task().get_logger()
clearml_logger.report_confusion_matrix(
title="Final Confusion Matrix",
series="cm-preds-gt",
matrix=cm,
iteration=trainer.state.iteration,
xlabels=VOCSegmentationOpencv.target_names,
ylabels=VOCSegmentationOpencv.target_names,
)
trainer.run(train_loader, max_epochs=config.num_epochs)
if idist.get_rank() == 0:
tb_logger.close()
if not exp_tracking.has_clearml:
exp_tracking_logger.close()
def run(config, **kwargs):
"""This is the main method to run the training. As this training script is launched with `py_config_runner`
it should obligatory contain `run(config, **kwargs)` method.
"""
assert torch.cuda.is_available(), torch.cuda.is_available()
assert torch.backends.cudnn.enabled, "Nvidia/Amp requires cudnn backend to be enabled."
with idist.Parallel(backend="nccl") as parallel:
logger = setup_logger(name="Pascal-VOC12 Training", distributed_rank=idist.get_rank())
assert_config(config, TRAINVAL_CONFIG)
# The following attributes are automatically added by py_config_runner
assert hasattr(config, "config_filepath") and isinstance(config.config_filepath, Path)
assert hasattr(config, "script_filepath") and isinstance(config.script_filepath, Path)
if idist.get_rank() == 0 and exp_tracking.has_clearml:
try:
from clearml import Task
except ImportError:
# Backwards-compatibility for legacy Trains SDK
from trains import Task
task = Task.init("Pascal-VOC12 Training", config.config_filepath.stem)
task.connect_configuration(config.config_filepath.as_posix())
log_basic_info(logger, config)
config.output_path = Path(exp_tracking.get_output_path())
# dump python files to reproduce the run
exp_tracking.log_artifact(config.config_filepath.as_posix())
exp_tracking.log_artifact(config.script_filepath.as_posix())
exp_tracking.log_params(get_params(config, TRAINVAL_CONFIG))
try:
parallel.run(training, config, logger=logger)
except KeyboardInterrupt:
logger.info("Catched KeyboardInterrupt -> exit")
except Exception as e: # noqa
logger.exception("")
raise e
| [
"ignite.engine.create_supervised_evaluator",
"apex.amp.scale_loss",
"ignite.contrib.engines.common.add_early_stopping_by_val_score",
"ignite.engine.Engine",
"torch.cuda.is_available",
"ignite.distributed.get_local_rank",
"ignite.contrib.engines.common.ProgressBar",
"utils.exp_tracking.get_output_path"... | [((1180, 1203), 'ignite.distributed.auto_model', 'idist.auto_model', (['model'], {}), '(model)\n', (1196, 1203), True, 'import ignite.distributed as idist\n'), ((2849, 2878), 'ignite.engine.Engine', 'Engine', (['train_update_function'], {}), '(train_update_function)\n', (2855, 2878), False, 'from ignite.engine import Engine, Events, create_supervised_evaluator\n'), ((3989, 4034), 'ignite.engine.create_supervised_evaluator', 'create_supervised_evaluator', ([], {}), '(**evaluator_args)\n', (4016, 4034), False, 'from ignite.engine import Engine, Events, create_supervised_evaluator\n'), ((4051, 4096), 'ignite.engine.create_supervised_evaluator', 'create_supervised_evaluator', ([], {}), '(**evaluator_args)\n', (4078, 4096), False, 'from ignite.engine import Engine, Events, create_supervised_evaluator\n'), ((5322, 5356), 'py_config_runner.utils.set_seed', 'set_seed', (['(config.seed + local_rank)'], {}), '(config.seed + local_rank)\n', (5330, 5356), False, 'from py_config_runner.utils import set_seed\n'), ((5784, 5824), 'ignite.metrics.ConfusionMatrix', 'ConfusionMatrix', ([], {'num_classes': 'num_classes'}), '(num_classes=num_classes)\n', (5799, 5824), False, 'from ignite.metrics import ConfusionMatrix, IoU, mIoU\n'), ((10160, 10185), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10183, 10185), False, 'import torch\n'), ((10187, 10212), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10210, 10212), False, 'import torch\n'), ((3448, 3464), 'ignite.distributed.get_rank', 'idist.get_rank', ([], {}), '()\n', (3462, 3464), True, 'import ignite.distributed as idist\n'), ((4105, 4121), 'ignite.distributed.get_rank', 'idist.get_rank', ([], {}), '()\n', (4119, 4121), True, 'import ignite.distributed as idist\n'), ((4861, 4883), 'ignite.distributed.get_world_size', 'idist.get_world_size', ([], {}), '()\n', (4881, 4883), True, 'import ignite.distributed as idist\n'), ((5861, 5875), 'ignite.metrics.IoU', 'IoU', (['cm_metric'], {}), '(cm_metric)\n', (5864, 5875), False, 'from ignite.metrics import ConfusionMatrix, IoU, mIoU\n'), ((5896, 5911), 'ignite.metrics.mIoU', 'mIoU', (['cm_metric'], {}), '(cm_metric)\n', (5900, 5911), False, 'from ignite.metrics import ConfusionMatrix, IoU, mIoU\n'), ((6199, 6241), 'ignite.engine.Events.EPOCH_COMPLETED', 'Events.EPOCH_COMPLETED', ([], {'every': 'val_interval'}), '(every=val_interval)\n', (6221, 6241), False, 'from ignite.engine import Engine, Events, create_supervised_evaluator\n'), ((6890, 7003), 'ignite.contrib.engines.common.add_early_stopping_by_val_score', 'common.add_early_stopping_by_val_score', (['config.es_patience', 'evaluator', 'trainer'], {'metric_name': 'score_metric_name'}), '(config.es_patience, evaluator,\n trainer, metric_name=score_metric_name)\n', (6928, 7003), False, 'from ignite.contrib.engines import common\n'), ((7311, 7327), 'ignite.distributed.get_rank', 'idist.get_rank', ([], {}), '()\n', (7325, 7327), True, 'import ignite.distributed as idist\n'), ((9803, 9819), 'ignite.distributed.get_rank', 'idist.get_rank', ([], {}), '()\n', (9817, 9819), True, 'import ignite.distributed as idist\n'), ((10315, 10345), 'ignite.distributed.Parallel', 'idist.Parallel', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (10329, 10345), True, 'import ignite.distributed as idist\n'), ((10464, 10502), 'py_config_runner.config_utils.assert_config', 'assert_config', (['config', 'TRAINVAL_CONFIG'], {}), '(config, TRAINVAL_CONFIG)\n', (10477, 10502), False, 'from py_config_runner.config_utils import TRAINVAL_CONFIG, assert_config, get_params\n'), ((2473, 2515), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optimizer'], {'loss_id': '(0)'}), '(loss, optimizer, loss_id=0)\n', (2487, 2515), False, 'from apex import amp\n'), ((7632, 7749), 'utils.exp_tracking.setup_logging', 'exp_tracking.setup_logging', (['trainer', 'optimizer'], {'evaluators': "{'training': train_evaluator, 'validation': evaluator}"}), "(trainer, optimizer, evaluators={'training':\n train_evaluator, 'validation': evaluator})\n", (7658, 7749), False, 'from utils import exp_tracking\n'), ((11050, 11113), 'trains.Task.init', 'Task.init', (['"""Pascal-VOC12 Training"""', 'config.config_filepath.stem'], {}), "('Pascal-VOC12 Training', config.config_filepath.stem)\n", (11059, 11113), False, 'from trains import Task\n'), ((11263, 11293), 'utils.exp_tracking.get_output_path', 'exp_tracking.get_output_path', ([], {}), '()\n', (11291, 11293), False, 'from utils import exp_tracking\n'), ((11514, 11549), 'py_config_runner.config_utils.get_params', 'get_params', (['config', 'TRAINVAL_CONFIG'], {}), '(config, TRAINVAL_CONFIG)\n', (11524, 11549), False, 'from py_config_runner.config_utils import TRAINVAL_CONFIG, assert_config, get_params\n'), ((3479, 3512), 'ignite.contrib.engines.common.ProgressBar', 'common.ProgressBar', ([], {'persist': '(False)'}), '(persist=False)\n', (3497, 3512), False, 'from ignite.contrib.engines import common\n'), ((4136, 4196), 'ignite.contrib.engines.common.ProgressBar', 'common.ProgressBar', ([], {'desc': '"""Evaluation (train)"""', 'persist': '(False)'}), "(desc='Evaluation (train)', persist=False)\n", (4154, 4196), False, 'from ignite.contrib.engines import common\n'), ((4229, 4287), 'ignite.contrib.engines.common.ProgressBar', 'common.ProgressBar', ([], {'desc': '"""Evaluation (val)"""', 'persist': '(False)'}), "(desc='Evaluation (val)', persist=False)\n", (4247, 4287), False, 'from ignite.contrib.engines import common\n'), ((4805, 4827), 'ignite.distributed.get_local_rank', 'idist.get_local_rank', ([], {}), '()\n', (4825, 4827), True, 'import ignite.distributed as idist\n'), ((4957, 4972), 'ignite.distributed.backend', 'idist.backend', ([], {}), '()\n', (4970, 4972), True, 'import ignite.distributed as idist\n'), ((5001, 5017), 'ignite.distributed.get_rank', 'idist.get_rank', ([], {}), '()\n', (5015, 5017), True, 'import ignite.distributed as idist\n'), ((5052, 5074), 'ignite.distributed.get_world_size', 'idist.get_world_size', ([], {}), '()\n', (5072, 5074), True, 'import ignite.distributed as idist\n'), ((8427, 8565), 'utils.handlers.predictions_gt_images_handler', 'predictions_gt_images_handler', ([], {'img_denormalize_fn': 'config.img_denormalize', 'n_images': '(15)', 'another_engine': 'trainer', 'prefix_tag': '"""validation"""'}), "(img_denormalize_fn=config.img_denormalize,\n n_images=15, another_engine=trainer, prefix_tag='validation')\n", (8456, 8565), False, 'from utils.handlers import predictions_gt_images_handler\n'), ((8616, 8676), 'ignite.engine.Events.ITERATION_COMPLETED', 'Events.ITERATION_COMPLETED', ([], {'event_filter': 'custom_event_filter'}), '(event_filter=custom_event_filter)\n', (8642, 8676), False, 'from ignite.engine import Engine, Events, create_supervised_evaluator\n'), ((9048, 9064), 'ignite.distributed.get_rank', 'idist.get_rank', ([], {}), '()\n', (9062, 9064), True, 'import ignite.distributed as idist\n'), ((10437, 10453), 'ignite.distributed.get_rank', 'idist.get_rank', ([], {}), '()\n', (10451, 10453), True, 'import ignite.distributed as idist\n'), ((10784, 10800), 'ignite.distributed.get_rank', 'idist.get_rank', ([], {}), '()\n', (10798, 10800), True, 'import ignite.distributed as idist\n'), ((843, 857), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (847, 857), False, 'from pathlib import Path\n'), ((9319, 9338), 'trains.Task.current_task', 'Task.current_task', ([], {}), '()\n', (9336, 9338), False, 'from trains import Task\n'), ((8978, 9017), 'ignite.metrics.ConfusionMatrix.normalize', 'ConfusionMatrix.normalize', (['cm', '"""recall"""'], {}), "(cm, 'recall')\n", (9003, 9017), False, 'from ignite.metrics import ConfusionMatrix, IoU, mIoU\n')] |
#! usr/bin/dev python
from stages import Stages #Le as fases
from code import tanks #Responsável pelos tanques do player
from images import imagens #imagens do jogo
import pygame
import random
screen_Dimension=[32*20,32*20]
pygame.init()
screen = pygame.display.set_mode(screen_Dimension)
pygame.display.set_caption("My_Poor_NES_Batlle_City")
clock = pygame.time.Clock()
Fase_1 = Stages.Stages(screen)
Fase_1.readStage(1)
Tank = tanks.PlayerTank(imagens.blueTank, [64,64], screen)
while True:
screen.fill([0,0,0])
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
Tank.move(event)
Fase_1.plotStage()
Tank.plot()
pygame.display.update()
clock.tick(60)
| [
"pygame.display.set_caption",
"pygame.init",
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"code.tanks.PlayerTank",
"pygame.time.Clock",
"stages.Stages.Stages",
"pygame.display.update"
] | [((238, 251), 'pygame.init', 'pygame.init', ([], {}), '()\n', (249, 251), False, 'import pygame\n'), ((262, 303), 'pygame.display.set_mode', 'pygame.display.set_mode', (['screen_Dimension'], {}), '(screen_Dimension)\n', (285, 303), False, 'import pygame\n'), ((305, 358), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""My_Poor_NES_Batlle_City"""'], {}), "('My_Poor_NES_Batlle_City')\n", (331, 358), False, 'import pygame\n'), ((368, 387), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (385, 387), False, 'import pygame\n'), ((400, 421), 'stages.Stages.Stages', 'Stages.Stages', (['screen'], {}), '(screen)\n', (413, 421), False, 'from stages import Stages\n'), ((450, 502), 'code.tanks.PlayerTank', 'tanks.PlayerTank', (['imagens.blueTank', '[64, 64]', 'screen'], {}), '(imagens.blueTank, [64, 64], screen)\n', (466, 502), False, 'from code import tanks\n'), ((553, 571), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (569, 571), False, 'import pygame\n'), ((770, 793), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (791, 793), False, 'import pygame\n'), ((608, 621), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (619, 621), False, 'import pygame\n'), ((697, 710), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (708, 710), False, 'import pygame\n')] |
from typing import Dict
from numba import njit
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['image.cmap'] = 'binary'
def read_parameters(filename: str) -> Dict[str, float]:
"""Read parameters from a file to a dictionary and return it."""
parameters = {}
with open(filename, "r") as file:
for line in file.readlines():
if line != '\n':
line_split = line.split()
try:
parameters[line_split[0]] = int(line_split[2])
except ValueError:
parameters[line_split[0]] = float(line_split[2])
if len(parameters) != 6:
raise RuntimeError("Incorrect list of parameters in " + filename)
return parameters
def random_population(population_size: int, board_size: int) -> np.ndarray:
"""Return a random population of solutions."""
return np.array([np.random.permutation(board_size)
for _ in range(population_size)], dtype=np.int32)
@njit
def fitness(population: np.ndarray) -> np.ndarray:
"""Return an array of fitnesses of a given population"""
fitness_arr = np.empty(population.shape[0], dtype=np.float32)
for i, genome in enumerate(population):
diags_1 = np.array([0 for n in range(2 * genome.size - 1)])
diags_2 = np.array([0 for n in range(2 * genome.size - 1)])
for j in range(genome.size):
diags_1[j - genome[j] + genome.size - 1] += 1
diags_2[j + genome[j]] += 1
colls_1 = diags_1 > 1
colls_2 = diags_2 > 1
diags_1[colls_1] = diags_1[colls_1] * (diags_1[colls_1] - 1) // 2
diags_1[~colls_1] = 0
diags_2[colls_2] = diags_2[colls_2] * (diags_2[colls_2] - 1) // 2
diags_2[~colls_2] = 0
fitness_arr[i] = 1 / (1 + np.sum(diags_1) + np.sum(diags_2))
return fitness_arr
@njit
def selection(population: np.ndarray, n_best: int) -> np.ndarray:
"""Return an array of indices of individuals selected to mate.
n_best is the number of best individuals who will always be selected.
"""
fitnesses = fitness(population)
winners = np.empty((population.shape[0] // 2,), dtype=np.int32)
winners[0:n_best] = np.argsort(fitnesses)[-n_best:]
for i in range(n_best, fitnesses.shape[0] // 2):
pair = np.random.randint(0, fitnesses.shape[0], size=(2,))
if fitnesses[pair[0]] > fitnesses[pair[1]]:
winners[i] = pair[0]
else:
winners[i] = pair[1]
return winners
@njit
def crossover(population: np.ndarray, selected: np.ndarray):
"""Return a new population that results from crossover."""
N = population.shape[1]
new_population = np.empty_like(population)
for k in range(0, selected.shape[0]):
parents_ids = np.random.choice(selected, replace=False, size=2)
child_1 = np.empty_like(population[parents_ids[0]])
child_2 = np.empty_like(population[parents_ids[1]])
points = np.random.randint(0, N + 1, 2)
if points[0] != points[1]:
points = (np.min(points), np.max(points))
else:
if points[0] == N:
points = (points[0] - 1, points[0])
else:
points = (points[0], points[0] + 1)
cut_out = population[parents_ids[0]][points[0]:points[1]]
child_1[points[0]:points[1]] = cut_out
j = 0
for i in range(N):
if j == points[0]:
j = points[1]
if not np.any(cut_out == population[parents_ids[1]][i]):
child_1[j] = population[parents_ids[1]][i]
j += 1
cut_out = population[parents_ids[1]][points[0]:points[1]]
child_2[points[0]:points[1]] = cut_out
j = 0
for i in range(N):
if j == points[0]:
j = points[1]
if not np.any(cut_out == population[parents_ids[0]][i]):
child_2[j] = population[parents_ids[0]][i]
j += 1
new_population[2 * k, :] = child_1
new_population[2 * k + 1, :] = child_2
return new_population
@njit
def mutation(population: np.ndarray):
"""Perform mutation on a population."""
for i in range(population.shape[0]):
if np.random.random() > 0.7:
for _ in range(3):
points = np.random.randint(0, population.shape[1], 2)
tmp = population[i, points[0]]
population[i, points[0]] = population[i, points[1]]
population[i, points[1]] = tmp
def plot_genome_expression(genome: np.ndarray) -> None:
"""Plot a solution represented by the given genome."""
points = np.zeros((genome.shape[0], genome.shape[0]))
for i, g in enumerate(genome):
points[i, g] = 1
_, ax = plt.subplots(figsize=(10, 10))
ax.imshow(points, cmap='Purples')
ax.grid(True)
ax.set_xlim(-0.5, genome.shape[0] - 0.5)
ax.set_ylim(-0.5, genome.shape[0] - 0.5)
ax.set_xticks([i + 0.5 for i in range(genome.shape[0])])
ax.set_yticks([i + 0.5 for i in range(genome.shape[0])])
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.tick_params(axis='both', which='both', bottom=False, left=False)
plt.title("$N = {}$".format(genome.shape[0]), size=15)
plt.show()
def main() -> None:
parameters = read_parameters('parameters.txt')
population = random_population(parameters['pop_size'], parameters['N'])
generation_data = []
best_member_id = 0
winner_gen = parameters['generations']
for i in range(1, parameters['generations'] + 1):
selected = selection(population, parameters['n_best'])
population = crossover(population, selected)
mutation(population)
gen_fit = fitness(population)
best_member_id = np.argmax(gen_fit)
generation_data.append([i, gen_fit.mean(), gen_fit[best_member_id]])
if gen_fit[best_member_id] == 1.0:
print("\nWinner (gen. {}):\n{}".format(
i, str(population[best_member_id])))
winner_gen = i
break
if i % 50 == 0:
print("Gen", i)
if parameters['plot_winner_genome']:
plot_genome_expression(population[best_member_id])
if __name__ == "__main__":
main()
| [
"numpy.random.choice",
"numpy.random.permutation",
"numpy.random.random",
"matplotlib.pyplot.tick_params",
"numpy.argmax",
"numpy.any",
"numpy.max",
"numpy.argsort",
"numpy.sum",
"numpy.zeros",
"numpy.random.randint",
"numpy.empty_like",
"numpy.empty",
"numpy.min",
"matplotlib.pyplot.sub... | [((1140, 1187), 'numpy.empty', 'np.empty', (['population.shape[0]'], {'dtype': 'np.float32'}), '(population.shape[0], dtype=np.float32)\n', (1148, 1187), True, 'import numpy as np\n'), ((2136, 2189), 'numpy.empty', 'np.empty', (['(population.shape[0] // 2,)'], {'dtype': 'np.int32'}), '((population.shape[0] // 2,), dtype=np.int32)\n', (2144, 2189), True, 'import numpy as np\n'), ((2698, 2723), 'numpy.empty_like', 'np.empty_like', (['population'], {}), '(population)\n', (2711, 2723), True, 'import numpy as np\n'), ((4671, 4715), 'numpy.zeros', 'np.zeros', (['(genome.shape[0], genome.shape[0])'], {}), '((genome.shape[0], genome.shape[0]))\n', (4679, 4715), True, 'import numpy as np\n'), ((4788, 4818), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (4800, 4818), True, 'import matplotlib.pyplot as plt\n'), ((5145, 5213), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'bottom': '(False)', 'left': '(False)'}), "(axis='both', which='both', bottom=False, left=False)\n", (5160, 5213), True, 'import matplotlib.pyplot as plt\n'), ((5277, 5287), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5285, 5287), True, 'import matplotlib.pyplot as plt\n'), ((2214, 2235), 'numpy.argsort', 'np.argsort', (['fitnesses'], {}), '(fitnesses)\n', (2224, 2235), True, 'import numpy as np\n'), ((2314, 2365), 'numpy.random.randint', 'np.random.randint', (['(0)', 'fitnesses.shape[0]'], {'size': '(2,)'}), '(0, fitnesses.shape[0], size=(2,))\n', (2331, 2365), True, 'import numpy as np\n'), ((2788, 2837), 'numpy.random.choice', 'np.random.choice', (['selected'], {'replace': '(False)', 'size': '(2)'}), '(selected, replace=False, size=2)\n', (2804, 2837), True, 'import numpy as np\n'), ((2856, 2897), 'numpy.empty_like', 'np.empty_like', (['population[parents_ids[0]]'], {}), '(population[parents_ids[0]])\n', (2869, 2897), True, 'import numpy as np\n'), ((2916, 2957), 'numpy.empty_like', 'np.empty_like', (['population[parents_ids[1]]'], {}), '(population[parents_ids[1]])\n', (2929, 2957), True, 'import numpy as np\n'), ((2975, 3005), 'numpy.random.randint', 'np.random.randint', (['(0)', '(N + 1)', '(2)'], {}), '(0, N + 1, 2)\n', (2992, 3005), True, 'import numpy as np\n'), ((5790, 5808), 'numpy.argmax', 'np.argmax', (['gen_fit'], {}), '(gen_fit)\n', (5799, 5808), True, 'import numpy as np\n'), ((897, 930), 'numpy.random.permutation', 'np.random.permutation', (['board_size'], {}), '(board_size)\n', (918, 930), True, 'import numpy as np\n'), ((4252, 4270), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4268, 4270), True, 'import numpy as np\n'), ((1823, 1838), 'numpy.sum', 'np.sum', (['diags_2'], {}), '(diags_2)\n', (1829, 1838), True, 'import numpy as np\n'), ((3063, 3077), 'numpy.min', 'np.min', (['points'], {}), '(points)\n', (3069, 3077), True, 'import numpy as np\n'), ((3079, 3093), 'numpy.max', 'np.max', (['points'], {}), '(points)\n', (3085, 3093), True, 'import numpy as np\n'), ((3496, 3544), 'numpy.any', 'np.any', (['(cut_out == population[parents_ids[1]][i])'], {}), '(cut_out == population[parents_ids[1]][i])\n', (3502, 3544), True, 'import numpy as np\n'), ((3862, 3910), 'numpy.any', 'np.any', (['(cut_out == population[parents_ids[0]][i])'], {}), '(cut_out == population[parents_ids[0]][i])\n', (3868, 3910), True, 'import numpy as np\n'), ((4334, 4378), 'numpy.random.randint', 'np.random.randint', (['(0)', 'population.shape[1]', '(2)'], {}), '(0, population.shape[1], 2)\n', (4351, 4378), True, 'import numpy as np\n'), ((1805, 1820), 'numpy.sum', 'np.sum', (['diags_1'], {}), '(diags_1)\n', (1811, 1820), True, 'import numpy as np\n')] |
#!/usr/bin/env python
u"""
radial_basis.py
Written by <NAME> (01/2022)
Interpolates data using radial basis functions
CALLING SEQUENCE:
ZI = radial_basis(xs, ys, zs, XI, YI, polynomial=0,
smooth=smooth, epsilon=epsilon, method='inverse')
INPUTS:
xs: scaled input X data
ys: scaled input Y data
zs: input data
XI: scaled grid X for output ZI
YI: scaled grid Y for output ZI
OUTPUTS:
ZI: interpolated data grid
OPTIONS:
smooth: smoothing weights
metric: distance metric to use (default euclidean)
epsilon: adjustable constant for distance functions
default is mean Euclidean distance
polynomial: polynomial order if augmenting radial basis functions
default None: no polynomials
method: radial basis function
multiquadric
inverse_multiquadric or inverse (default)
inverse_quadratic
gaussian
linear (first-order polyharmonic spline)
cubic (third-order polyharmonic spline)
quintic (fifth-order polyharmonic spline)
thin_plate: thin-plate spline
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python (https://numpy.org)
scipy: Scientific Tools for Python (https://docs.scipy.org/doc/)
REFERENCES:
<NAME>, Multiquadric equations of topography and other irregular
surfaces, J. Geophys. Res., 76(8), 1905-1915, 1971.
<NAME>, "Radial Basis Functions", Cambridge Monographs on Applied and
Computational Mathematics, 2003.
UPDATE HISTORY:
Updated 01/2022: added function docstrings
Updated 07/2021: using scipy spatial distance routines
Updated 09/2017: using rcond=-1 in numpy least-squares algorithms
Updated 01/2017: epsilon in polyharmonic splines (linear, cubic, quintic)
Updated 08/2016: using format text within ValueError, edit constant vector
added low-order polynomial option (previously used default constant)
Updated 01/2016: new hierarchical_radial_basis function
that first reduces to points within distance. added cutoff option
Updated 10/2014: added third dimension (spherical)
Written 08/2014
"""
from __future__ import print_function, division
import numpy as np
import scipy.spatial
def radial_basis(xs, ys, zs, XI, YI, smooth=0.0, metric='euclidean',
epsilon=None, method='inverse', polynomial=None):
"""
Interpolates data using radial basis functions
Arguments
---------
xs: scaled input x-coordinates
ys: scaled input y-coordinates
zs: input data
XI: scaled output x-coordinates for data grid
YI: scaled output y-coordinates for data grid
Keyword arguments
-----------------
smooth: smoothing weights
metric: distance metric to use (default euclidean)
epsilon: adjustable constant for distance functions
method: radial basis function
- multiquadric
- inverse_multiquadric or inverse (default)
- inverse_quadratic
- gaussian
- linear (first-order polyharmonic spline)
- cubic (third-order polyharmonic spline)
- quintic (fifth-order polyharmonic spline)
- thin_plate: thin-plate spline
polynomial: polynomial order if augmenting radial basis functions
Returns
-------
ZI: interpolated data grid
"""
#-- remove singleton dimensions
xs = np.squeeze(xs)
ys = np.squeeze(ys)
zs = np.squeeze(zs)
XI = np.squeeze(XI)
YI = np.squeeze(YI)
#-- size of new matrix
if (np.ndim(XI) == 1):
nx = len(XI)
else:
nx,ny = np.shape(XI)
#-- Check to make sure sizes of input arguments are correct and consistent
if (len(zs) != len(xs)) | (len(zs) != len(ys)):
raise Exception('Length of X, Y, and Z must be equal')
if (np.shape(XI) != np.shape(YI)):
raise Exception('Size of XI and YI must be equal')
#-- create python dictionary of radial basis function formulas
radial_basis_functions = {}
radial_basis_functions['multiquadric'] = multiquadric
radial_basis_functions['inverse_multiquadric'] = inverse_multiquadric
radial_basis_functions['inverse'] = inverse_multiquadric
radial_basis_functions['inverse_quadratic'] = inverse_quadratic
radial_basis_functions['gaussian'] = gaussian
radial_basis_functions['linear'] = poly_spline1
radial_basis_functions['cubic'] = poly_spline3
radial_basis_functions['quintic'] = poly_spline5
radial_basis_functions['thin_plate'] = thin_plate
#-- check if formula name is listed
if method in radial_basis_functions.keys():
RBF = radial_basis_functions[method]
else:
raise ValueError("Method {0} not implemented".format(method))
#-- Creation of data distance matrix
#-- Data to Data
if (metric == 'brute'):
#-- use linear algebra to compute euclidean distances
Rd = distance_matrix(
np.array([xs, ys]),
np.array([xs, ys])
)
else:
#-- use scipy spatial distance routines
Rd = scipy.spatial.distance.cdist(
np.array([xs, ys]).T,
np.array([xs, ys]).T,
metric=metric)
#-- shape of distance matrix
N,M = np.shape(Rd)
#-- if epsilon is not specified
if epsilon is None:
#-- calculate norm with mean euclidean distance
uix,uiy = np.nonzero(np.tri(N,M=M,k=-1))
epsilon = np.mean(Rd[uix,uiy])
#-- possible augmentation of the PHI Matrix with polynomial Vectors
if polynomial is None:
#-- calculate radial basis function for data-to-data with smoothing
PHI = RBF(epsilon, Rd) + np.eye(N,M=M)*smooth
DMAT = zs.copy()
else:
#-- number of polynomial coefficients
nt = (polynomial**2 + 3*polynomial)//2 + 1
#-- calculate radial basis function for data-to-data with smoothing
PHI = np.zeros((N+nt,M+nt))
PHI[:N,:M] = RBF(epsilon, Rd) + np.eye(N,M=M)*smooth
#-- augmentation of PHI matrix with polynomials
POLY = polynomial_matrix(xs,ys,polynomial)
DMAT = np.concatenate(([zs,np.zeros((nt))]),axis=0)
#-- augment PHI matrix
for t in range(nt):
PHI[:N,M+t] = POLY[:,t]
PHI[N+t,:M] = POLY[:,t]
#-- Computation of the Weights
w = np.linalg.lstsq(PHI,DMAT[:,np.newaxis],rcond=-1)[0]
#-- Computation of distance Matrix
#-- Computation of distance Matrix (data to mesh points)
if (metric == 'brute'):
#-- use linear algebra to compute euclidean distances
Re = distance_matrix(
np.array([XI.flatten(),YI.flatten()]),
np.array([xs,ys])
)
else:
#-- use scipy spatial distance routines
Re = scipy.spatial.distance.cdist(
np.array([XI.flatten(),YI.flatten()]).T,
np.array([xs, ys]).T,
metric=metric)
#-- calculate radial basis function for data-to-mesh matrix
E = RBF(epsilon,Re)
#-- possible augmentation of the Evaluation Matrix with polynomial vectors
if polynomial is not None:
P = polynomial_matrix(XI.flatten(),YI.flatten(),polynomial)
E = np.concatenate(([E, P]),axis=1)
#-- calculate output interpolated array (or matrix)
if (np.ndim(XI) == 1):
ZI = np.squeeze(np.dot(E,w))
else:
ZI = np.zeros((nx,ny))
ZI[:,:] = np.dot(E,w).reshape(nx,ny)
#-- return the interpolated array (or matrix)
return ZI
#-- define radial basis function formulas
def multiquadric(epsilon, r):
#-- multiquadratic
f = np.sqrt((epsilon*r)**2 + 1.0)
return f
def inverse_multiquadric(epsilon, r):
#-- inverse multiquadratic
f = 1.0/np.sqrt((epsilon*r)**2 + 1.0)
return f
def inverse_quadratic(epsilon, r):
#-- inverse quadratic
f = 1.0/(1.0+(epsilon*r)**2)
return f
def gaussian(epsilon, r):
#-- gaussian
f = np.exp(-(epsilon*r)**2)
return f
def poly_spline1(epsilon, r):
#-- First-order polyharmonic spline
f = (epsilon*r)
return f
def poly_spline3(epsilon, r):
#-- Third-order polyharmonic spline
f = (epsilon*r)**3
return f
def poly_spline5(epsilon, r):
#-- Fifth-order polyharmonic spline
f = (epsilon*r)**5
return f
def thin_plate(epsilon, r):
#-- thin plate spline
f = r**2 * np.log(r)
#-- the spline is zero at zero
f[r == 0] = 0.0
return f
#-- calculate Euclidean distances between points as matrices
def distance_matrix(x,cntrs):
s,M = np.shape(x)
s,N = np.shape(cntrs)
D = np.zeros((M,N))
for d in range(s):
ii, = np.dot(d,np.ones((1,N))).astype(np.int)
jj, = np.dot(d,np.ones((1,M))).astype(np.int)
dx = x[ii,:].transpose() - cntrs[jj,:]
D += dx**2
D = np.sqrt(D)
return D
#-- calculate polynomial matrix to augment radial basis functions
def polynomial_matrix(x,y,order):
c = 0
M = len(x)
N = (order**2 + 3*order)//2 + 1
POLY = np.zeros((M,N))
for ii in range(order + 1):
for jj in range(ii + 1):
POLY[:,c] = (x**jj)*(y**(ii-jj))
c += 1
return POLY
| [
"numpy.mean",
"numpy.eye",
"numpy.sqrt",
"numpy.ones",
"numpy.log",
"numpy.ndim",
"numpy.squeeze",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.linalg.lstsq",
"numpy.concatenate",
"numpy.shape",
"numpy.tri"
] | [((3345, 3359), 'numpy.squeeze', 'np.squeeze', (['xs'], {}), '(xs)\n', (3355, 3359), True, 'import numpy as np\n'), ((3369, 3383), 'numpy.squeeze', 'np.squeeze', (['ys'], {}), '(ys)\n', (3379, 3383), True, 'import numpy as np\n'), ((3393, 3407), 'numpy.squeeze', 'np.squeeze', (['zs'], {}), '(zs)\n', (3403, 3407), True, 'import numpy as np\n'), ((3417, 3431), 'numpy.squeeze', 'np.squeeze', (['XI'], {}), '(XI)\n', (3427, 3431), True, 'import numpy as np\n'), ((3441, 3455), 'numpy.squeeze', 'np.squeeze', (['YI'], {}), '(YI)\n', (3451, 3455), True, 'import numpy as np\n'), ((5196, 5208), 'numpy.shape', 'np.shape', (['Rd'], {}), '(Rd)\n', (5204, 5208), True, 'import numpy as np\n'), ((7558, 7591), 'numpy.sqrt', 'np.sqrt', (['((epsilon * r) ** 2 + 1.0)'], {}), '((epsilon * r) ** 2 + 1.0)\n', (7565, 7591), True, 'import numpy as np\n'), ((7886, 7913), 'numpy.exp', 'np.exp', (['(-(epsilon * r) ** 2)'], {}), '(-(epsilon * r) ** 2)\n', (7892, 7913), True, 'import numpy as np\n'), ((8491, 8502), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (8499, 8502), True, 'import numpy as np\n'), ((8513, 8528), 'numpy.shape', 'np.shape', (['cntrs'], {}), '(cntrs)\n', (8521, 8528), True, 'import numpy as np\n'), ((8537, 8553), 'numpy.zeros', 'np.zeros', (['(M, N)'], {}), '((M, N))\n', (8545, 8553), True, 'import numpy as np\n'), ((8758, 8768), 'numpy.sqrt', 'np.sqrt', (['D'], {}), '(D)\n', (8765, 8768), True, 'import numpy as np\n'), ((8955, 8971), 'numpy.zeros', 'np.zeros', (['(M, N)'], {}), '((M, N))\n', (8963, 8971), True, 'import numpy as np\n'), ((3491, 3502), 'numpy.ndim', 'np.ndim', (['XI'], {}), '(XI)\n', (3498, 3502), True, 'import numpy as np\n'), ((3557, 3569), 'numpy.shape', 'np.shape', (['XI'], {}), '(XI)\n', (3565, 3569), True, 'import numpy as np\n'), ((3773, 3785), 'numpy.shape', 'np.shape', (['XI'], {}), '(XI)\n', (3781, 3785), True, 'import numpy as np\n'), ((3789, 3801), 'numpy.shape', 'np.shape', (['YI'], {}), '(YI)\n', (3797, 3801), True, 'import numpy as np\n'), ((5392, 5413), 'numpy.mean', 'np.mean', (['Rd[uix, uiy]'], {}), '(Rd[uix, uiy])\n', (5399, 5413), True, 'import numpy as np\n'), ((5865, 5891), 'numpy.zeros', 'np.zeros', (['(N + nt, M + nt)'], {}), '((N + nt, M + nt))\n', (5873, 5891), True, 'import numpy as np\n'), ((6290, 6341), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['PHI', 'DMAT[:, np.newaxis]'], {'rcond': '(-1)'}), '(PHI, DMAT[:, np.newaxis], rcond=-1)\n', (6305, 6341), True, 'import numpy as np\n'), ((7152, 7182), 'numpy.concatenate', 'np.concatenate', (['[E, P]'], {'axis': '(1)'}), '([E, P], axis=1)\n', (7166, 7182), True, 'import numpy as np\n'), ((7248, 7259), 'numpy.ndim', 'np.ndim', (['XI'], {}), '(XI)\n', (7255, 7259), True, 'import numpy as np\n'), ((7327, 7345), 'numpy.zeros', 'np.zeros', (['(nx, ny)'], {}), '((nx, ny))\n', (7335, 7345), True, 'import numpy as np\n'), ((7683, 7716), 'numpy.sqrt', 'np.sqrt', (['((epsilon * r) ** 2 + 1.0)'], {}), '((epsilon * r) ** 2 + 1.0)\n', (7690, 7716), True, 'import numpy as np\n'), ((8311, 8320), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (8317, 8320), True, 'import numpy as np\n'), ((4892, 4910), 'numpy.array', 'np.array', (['[xs, ys]'], {}), '([xs, ys])\n', (4900, 4910), True, 'import numpy as np\n'), ((4924, 4942), 'numpy.array', 'np.array', (['[xs, ys]'], {}), '([xs, ys])\n', (4932, 4942), True, 'import numpy as np\n'), ((5354, 5374), 'numpy.tri', 'np.tri', (['N'], {'M': 'M', 'k': '(-1)'}), '(N, M=M, k=-1)\n', (5360, 5374), True, 'import numpy as np\n'), ((6626, 6644), 'numpy.array', 'np.array', (['[xs, ys]'], {}), '([xs, ys])\n', (6634, 6644), True, 'import numpy as np\n'), ((7291, 7303), 'numpy.dot', 'np.dot', (['E', 'w'], {}), '(E, w)\n', (7297, 7303), True, 'import numpy as np\n'), ((5070, 5088), 'numpy.array', 'np.array', (['[xs, ys]'], {}), '([xs, ys])\n', (5078, 5088), True, 'import numpy as np\n'), ((5104, 5122), 'numpy.array', 'np.array', (['[xs, ys]'], {}), '([xs, ys])\n', (5112, 5122), True, 'import numpy as np\n'), ((5622, 5636), 'numpy.eye', 'np.eye', (['N'], {'M': 'M'}), '(N, M=M)\n', (5628, 5636), True, 'import numpy as np\n'), ((5927, 5941), 'numpy.eye', 'np.eye', (['N'], {'M': 'M'}), '(N, M=M)\n', (5933, 5941), True, 'import numpy as np\n'), ((6090, 6102), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (6098, 6102), True, 'import numpy as np\n'), ((6824, 6842), 'numpy.array', 'np.array', (['[xs, ys]'], {}), '([xs, ys])\n', (6832, 6842), True, 'import numpy as np\n'), ((7363, 7375), 'numpy.dot', 'np.dot', (['E', 'w'], {}), '(E, w)\n', (7369, 7375), True, 'import numpy as np\n'), ((8599, 8614), 'numpy.ones', 'np.ones', (['(1, N)'], {}), '((1, N))\n', (8606, 8614), True, 'import numpy as np\n'), ((8653, 8668), 'numpy.ones', 'np.ones', (['(1, M)'], {}), '((1, M))\n', (8660, 8668), True, 'import numpy as np\n')] |
from flask import Flask, jsonify, request
app = Flask(__name__)
@app.route('/', methods =['GET', 'POST'])
def index():
if (request.method == 'POST'):
some_json = request.get_json()
return jsonify({'you sent': some_json}),201
else:
return jsonify({"about" : "Hello World!"})
@app.route('/multi/<int:n1>', methods=['GET'])
def get_mul10(n1):
return jsonify({"result" : n1*10})
if __name__ == "__main__":
app.run(debug=True)
| [
"flask.jsonify",
"flask.request.get_json",
"flask.Flask"
] | [((48, 63), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (53, 63), False, 'from flask import Flask, jsonify, request\n'), ((385, 413), 'flask.jsonify', 'jsonify', (["{'result': n1 * 10}"], {}), "({'result': n1 * 10})\n", (392, 413), False, 'from flask import Flask, jsonify, request\n'), ((175, 193), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (191, 193), False, 'from flask import Flask, jsonify, request\n'), ((271, 305), 'flask.jsonify', 'jsonify', (["{'about': 'Hello World!'}"], {}), "({'about': 'Hello World!'})\n", (278, 305), False, 'from flask import Flask, jsonify, request\n'), ((209, 241), 'flask.jsonify', 'jsonify', (["{'you sent': some_json}"], {}), "({'you sent': some_json})\n", (216, 241), False, 'from flask import Flask, jsonify, request\n')] |
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
urlpatterns = [
url(r'^risks/$', views.RiskTypeList.as_view(), name='risks_list'),
url(r'^risks/(?P<pk>[0-9]+)/$', views.RiskTypeDetail.as_view(), name='risk_details'),
url(r'^fields/$', views.FieldTypes.as_view(), name='field_types'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| [
"rest_framework.urlpatterns.format_suffix_patterns"
] | [((381, 416), 'rest_framework.urlpatterns.format_suffix_patterns', 'format_suffix_patterns', (['urlpatterns'], {}), '(urlpatterns)\n', (403, 416), False, 'from rest_framework.urlpatterns import format_suffix_patterns\n')] |
"""Test for certbot_nginx.nginxparser."""
import copy
import operator
import tempfile
import unittest
from pyparsing import ParseException
from certbot_nginx.nginxparser import (
RawNginxParser, loads, load, dumps, dump, UnspacedList)
from certbot_nginx.tests import util
FIRST = operator.itemgetter(0)
class TestRawNginxParser(unittest.TestCase):
"""Test the raw low-level Nginx config parser."""
def test_assignments(self):
parsed = RawNginxParser.assignment.parseString('root /test;').asList()
self.assertEqual(parsed, ['root', ' ', '/test'])
parsed = RawNginxParser.assignment.parseString('root /test;foo bar;').asList()
self.assertEqual(parsed, ['root', ' ', '/test'], ['foo', ' ', 'bar'])
def test_blocks(self):
parsed = RawNginxParser.block.parseString('foo {}').asList()
self.assertEqual(parsed, [['foo', ' '], []])
parsed = RawNginxParser.block.parseString('location /foo{}').asList()
self.assertEqual(parsed, [['location', ' ', '/foo'], []])
parsed = RawNginxParser.block.parseString('foo { bar foo ; }').asList()
self.assertEqual(parsed, [['foo', ' '], [[' ', 'bar', ' ', 'foo', ' '], ' ']])
def test_nested_blocks(self):
parsed = RawNginxParser.block.parseString('foo { bar {} }').asList()
block, content = parsed
self.assertEqual(FIRST(content), [[' ', 'bar', ' '], []])
self.assertEqual(FIRST(block), 'foo')
def test_dump_as_string(self):
dumped = dumps(UnspacedList([
['user', ' ', 'www-data'],
[['\n', 'server', ' '], [
['\n ', 'listen', ' ', '80'],
['\n ', 'server_name', ' ', 'foo.com'],
['\n ', 'root', ' ', '/home/ubuntu/sites/foo/'],
[['\n\n ', 'location', ' ', '/status', ' '], [
['\n ', 'check_status', ''],
[['\n\n ', 'types', ' '],
[['\n ', 'image/jpeg', ' ', 'jpg']]],
]]
]]]))
self.assertEqual(dumped.split('\n'),
'user www-data;\n'
'server {\n'
' listen 80;\n'
' server_name foo.com;\n'
' root /home/ubuntu/sites/foo/;\n'
'\n'
' location /status {\n'
' check_status;\n'
'\n'
' types {\n'
' image/jpeg jpg;}}}'.split('\n'))
def test_parse_from_file(self):
with open(util.get_data_filename('foo.conf')) as handle:
parsed = util.filter_comments(load(handle))
self.assertEqual(
parsed,
[['user', 'www-data'],
[['http'],
[[['server'], [
['listen', '*:80', 'default_server', 'ssl'],
['server_name', '*.www.foo.com', '*.www.example.com'],
['root', '/home/ubuntu/sites/foo/'],
[['location', '/status'], [
[['types'], [['image/jpeg', 'jpg']]],
]],
[['location', '~', r'case_sensitive\.php$'], [
['index', 'index.php'],
['root', '/var/root'],
]],
[['location', '~*', r'case_insensitive\.php$'], []],
[['location', '=', r'exact_match\.php$'], []],
[['location', '^~', r'ignore_regex\.php$'], []]
]]]]]
)
def test_parse_from_file2(self):
with open(util.get_data_filename('edge_cases.conf')) as handle:
parsed = util.filter_comments(load(handle))
self.assertEqual(
parsed,
[[['server'], [['server_name', 'simple']]],
[['server'],
[['server_name', 'with.if'],
[['location', '~', '^/services/.+$'],
[[['if', '($request_filename', '~*', '\\.(ttf|woff)$)'],
[['add_header', 'Access-Control-Allow-Origin', '"*"']]]]]]],
[['server'],
[['server_name', 'with.complicated.headers'],
[['location', '~*', '\\.(?:gif|jpe?g|png)$'],
[['add_header', 'Pragma', 'public'],
['add_header',
'Cache-Control', '\'public, must-revalidate, proxy-revalidate\'',
'"test,;{}"', 'foo'],
['blah', '"hello;world"'],
['try_files', '$uri', '@rewrites']]]]]])
def test_parse_from_file3(self):
with open(util.get_data_filename('multiline_quotes.conf')) as handle:
parsed = util.filter_comments(load(handle))
self.assertEqual(
parsed,
[[['http'],
[[['server'],
[['listen', '*:443'],
[['location', '/'],
[['body_filter_by_lua',
'\'ngx.ctx.buffered = (ngx.ctx.buffered or "")'
' .. string.sub(ngx.arg[1], 1, 1000)\n'
' '
'if ngx.arg[2] then\n'
' '
'ngx.var.resp_body = ngx.ctx.buffered\n'
' end\'']]]]]]]])
def test_abort_on_parse_failure(self):
with open(util.get_data_filename('broken.conf')) as handle:
self.assertRaises(ParseException, load, handle)
def test_dump_as_file(self):
with open(util.get_data_filename('nginx.conf')) as handle:
parsed = load(handle)
parsed[-1][-1].append(UnspacedList([['server'],
[['listen', ' ', '443', ' ', 'ssl'],
['server_name', ' ', 'localhost'],
['ssl_certificate', ' ', 'cert.pem'],
['ssl_certificate_key', ' ', 'cert.key'],
['ssl_session_cache', ' ', 'shared:SSL:1m'],
['ssl_session_timeout', ' ', '5m'],
['ssl_ciphers', ' ', 'HIGH:!aNULL:!MD5'],
[['location', ' ', '/'],
[['root', ' ', 'html'],
['index', ' ', 'index.html', ' ', 'index.htm']]]]]))
with tempfile.TemporaryFile(mode='w+t') as f:
dump(parsed, f)
f.seek(0)
parsed_new = load(f)
self.assertEqual(parsed, parsed_new)
def test_comments(self):
with open(util.get_data_filename('minimalistic_comments.conf')) as handle:
parsed = load(handle)
with tempfile.TemporaryFile(mode='w+t') as f:
dump(parsed, f)
f.seek(0)
parsed_new = load(f)
self.assertEqual(parsed, parsed_new)
self.assertEqual(parsed_new, [
['#', " Use bar.conf when it's a full moon!"],
['include', 'foo.conf'],
['#', ' Kilroy was here'],
['check_status'],
[['server'],
[['#', ''],
['#', " Don't forget to open up your firewall!"],
['#', ''],
['listen', '1234'],
['#', ' listen 80;']]],
])
def test_issue_518(self):
parsed = loads('if ($http_accept ~* "webp") { set $webp "true"; }')
self.assertEqual(parsed, [
[['if', '($http_accept', '~*', '"webp")'],
[['set', '$webp', '"true"']]]
])
def test_comment_in_block(self):
parsed = loads("""http {
# server{
}""")
self.assertEqual(parsed, [
[['http'],
[['#', ' server{']]]
])
def test_access_log(self):
# see issue #3798
parsed = loads('access_log syslog:server=unix:/dev/log,facility=auth,'
'tag=nginx_post,severity=info custom;')
self.assertEqual(parsed, [
['access_log',
'syslog:server=unix:/dev/log,facility=auth,tag=nginx_post,severity=info',
'custom']
])
def test_add_header(self):
# see issue #3798
parsed = loads('add_header Cache-Control no-cache,no-store,must-revalidate,max-age=0;')
self.assertEqual(parsed, [
['add_header', 'Cache-Control', 'no-cache,no-store,must-revalidate,max-age=0']
])
def test_map_then_assignment_in_block(self):
# see issue #3798
test_str = """http {
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
"~Opera Mini" 1;
*.example.com 1;
}
one;
}"""
parsed = loads(test_str)
self.assertEqual(parsed, [
[['http'], [
[['map', '$http_upgrade', '$connection_upgrade'], [
['default', 'upgrade'],
["''", 'close'],
['"~Opera Mini"', '1'],
['*.example.com', '1']
]],
['one']
]]
])
def test_variable_name(self):
parsed = loads('try_files /typo3temp/tx_ncstaticfilecache/'
'$host${request_uri}index.html @nocache;')
self.assertEqual(parsed, [
['try_files',
'/typo3temp/tx_ncstaticfilecache/$host${request_uri}index.html',
'@nocache']
])
def test_weird_blocks(self):
test = r"""
if ($http_user_agent ~ MSIE) {
rewrite ^(.*)$ /msie/$1 break;
}
if ($http_cookie ~* "id=([^;]+)(?:;|$)") {
set $id $1;
}
if ($request_method = POST) {
return 405;
}
if ($request_method) {
return 403;
}
if ($args ~ post=140){
rewrite ^ http://example.com/;
}
location ~ ^/users/(.+\.(?:gif|jpe?g|png))$ {
alias /data/w3/images/$1;
}
proxy_set_header X-Origin-URI ${scheme}://${http_host}/$request_uri;
"""
parsed = loads(test)
self.assertEqual(parsed, [[['if', '($http_user_agent', '~', 'MSIE)'],
[['rewrite', '^(.*)$', '/msie/$1', 'break']]],
[['if', '($http_cookie', '~*', '"id=([^;]+)(?:;|$)")'], [['set', '$id', '$1']]],
[['if', '($request_method', '=', 'POST)'], [['return', '405']]],
[['if', '($request_method)'],
[['return', '403']]], [['if', '($args', '~', 'post=140)'],
[['rewrite', '^', 'http://example.com/']]],
[['location', '~', '^/users/(.+\\.(?:gif|jpe?g|png))$'],
[['alias', '/data/w3/images/$1']]],
['proxy_set_header', 'X-Origin-URI', '${scheme}://${http_host}/$request_uri']]
)
def test_edge_cases(self):
# quotes
parsed = loads(r'"hello\""; # blah "heh heh"')
self.assertEqual(parsed, [['"hello\\""'], ['#', ' blah "heh heh"']])
# if with comment
parsed = loads("""if ($http_cookie ~* "id=([^;]+)(?:;|$)") { # blah )
}""")
self.assertEqual(parsed, [[['if', '($http_cookie', '~*', '"id=([^;]+)(?:;|$)")'],
[['#', ' blah )']]]])
# end paren
test = """
one"test";
("two");
"test")red;
"test")"blue";
"test")"three;
(one"test")one;
one";
one"test;
one"test"one;
"""
parsed = loads(test)
self.assertEqual(parsed, [
['one"test"'],
['("two")'],
['"test")red'],
['"test")"blue"'],
['"test")"three'],
['(one"test")one'],
['one"'],
['one"test'],
['one"test"one']
])
self.assertRaises(ParseException, loads, r'"test"one;') # fails
self.assertRaises(ParseException, loads, r'"test;') # fails
# newlines
test = """
server_name foo.example.com bar.example.com \
baz.example.com qux.example.com;
server_name foo.example.com bar.example.com
baz.example.com qux.example.com;
"""
parsed = loads(test)
self.assertEqual(parsed, [
['server_name', 'foo.example.com', 'bar.example.com',
'baz.example.com', 'qux.example.com'],
['server_name', 'foo.example.com', 'bar.example.com',
'baz.example.com', 'qux.example.com']
])
# variable weirdness
parsed = loads("directive $var ${var} $ ${};")
self.assertEqual(parsed, [['directive', '$var', '${var}', '$', '${}']])
self.assertRaises(ParseException, loads, "server {server_name test.com};")
self.assertEqual(loads("blag${dfgdfg};"), [['blag${dfgdfg}']])
self.assertRaises(ParseException, loads, "blag${dfgdf{g};")
class TestUnspacedList(unittest.TestCase):
"""Test the UnspacedList data structure"""
def setUp(self):
self.a = ["\n ", "things", " ", "quirk"]
self.b = ["y", " "]
self.l = self.a[:]
self.l2 = self.b[:]
self.ul = UnspacedList(self.l)
self.ul2 = UnspacedList(self.l2)
def test_construction(self):
self.assertEqual(self.ul, ["things", "quirk"])
self.assertEqual(self.ul2, ["y"])
def test_append(self):
ul3 = copy.deepcopy(self.ul)
ul3.append("wise")
self.assertEqual(ul3, ["things", "quirk", "wise"])
self.assertEqual(ul3.spaced, self.a + ["wise"])
def test_add(self):
ul3 = self.ul + self.ul2
self.assertEqual(ul3, ["things", "quirk", "y"])
self.assertEqual(ul3.spaced, self.a + self.b)
self.assertEqual(self.ul.spaced, self.a)
ul3 = self.ul + self.l2
self.assertEqual(ul3, ["things", "quirk", "y"])
self.assertEqual(ul3.spaced, self.a + self.b)
def test_extend(self):
ul3 = copy.deepcopy(self.ul)
ul3.extend(self.ul2)
self.assertEqual(ul3, ["things", "quirk", "y"])
self.assertEqual(ul3.spaced, self.a + self.b)
self.assertEqual(self.ul.spaced, self.a)
def test_set(self):
ul3 = copy.deepcopy(self.ul)
ul3[0] = "zither"
l = ["\n ", "zather", "zest"]
ul3[1] = UnspacedList(l)
self.assertEqual(ul3, ["zither", ["zather", "zest"]])
self.assertEqual(ul3.spaced, [self.a[0], "zither", " ", l])
def test_get(self):
self.assertRaises(IndexError, self.ul2.__getitem__, 2)
self.assertRaises(IndexError, self.ul2.__getitem__, -3)
def test_insert(self):
x = UnspacedList(
[['\n ', 'listen', ' ', '192.168.3.11:9000'],
['\n ', 'listen', ' ', '127.0.0.1'],
['\n ', 'server_name', ' ', '.example.com'],
['\n ', 'server_name', ' ', 'example.*'], '\n',
['listen', ' ', '5001', ' ', 'ssl']])
x.insert(5, "FROGZ")
self.assertEqual(x,
[['listen', '192.168.3.11:9000'], ['listen', '127.0.0.1'],
['server_name', '.example.com'], ['server_name', 'example.*'],
['listen', '5001', 'ssl'], 'FROGZ'])
self.assertEqual(x.spaced,
[['\n ', 'listen', ' ', '192.168.3.11:9000'],
['\n ', 'listen', ' ', '127.0.0.1'],
['\n ', 'server_name', ' ', '.example.com'],
['\n ', 'server_name', ' ', 'example.*'], '\n',
['listen', ' ', '5001', ' ', 'ssl'],
'FROGZ'])
def test_rawlists(self):
ul3 = copy.deepcopy(self.ul)
ul3.insert(0, "some")
ul3.append("why")
ul3.extend(["did", "whether"])
del ul3[2]
self.assertEqual(ul3, ["some", "things", "why", "did", "whether"])
def test_is_dirty(self):
self.assertEqual(False, self.ul2.is_dirty())
ul3 = UnspacedList([])
ul3.append(self.ul)
self.assertEqual(False, self.ul.is_dirty())
self.assertEqual(True, ul3.is_dirty())
ul4 = UnspacedList([[1], [2, 3, 4]])
self.assertEqual(False, ul4.is_dirty())
ul4[1][2] = 5
self.assertEqual(True, ul4.is_dirty())
if __name__ == '__main__':
unittest.main() # pragma: no cover
| [
"certbot_nginx.nginxparser.RawNginxParser.block.parseString",
"certbot_nginx.nginxparser.RawNginxParser.assignment.parseString",
"tempfile.TemporaryFile",
"certbot_nginx.nginxparser.dump",
"certbot_nginx.nginxparser.load",
"certbot_nginx.tests.util.get_data_filename",
"certbot_nginx.nginxparser.loads",
... | [((288, 310), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (307, 310), False, 'import operator\n'), ((16666, 16681), 'unittest.main', 'unittest.main', ([], {}), '()\n', (16679, 16681), False, 'import unittest\n'), ((7556, 7614), 'certbot_nginx.nginxparser.loads', 'loads', (['"""if ($http_accept ~* "webp") { set $webp "true"; }"""'], {}), '(\'if ($http_accept ~* "webp") { set $webp "true"; }\')\n', (7561, 7614), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((7815, 7866), 'certbot_nginx.nginxparser.loads', 'loads', (['"""http {\n # server{\n }"""'], {}), '("""http {\n # server{\n }""")\n', (7820, 7866), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((8046, 8154), 'certbot_nginx.nginxparser.loads', 'loads', (['"""access_log syslog:server=unix:/dev/log,facility=auth,tag=nginx_post,severity=info custom;"""'], {}), "(\n 'access_log syslog:server=unix:/dev/log,facility=auth,tag=nginx_post,severity=info custom;'\n )\n", (8051, 8154), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((8419, 8497), 'certbot_nginx.nginxparser.loads', 'loads', (['"""add_header Cache-Control no-cache,no-store,must-revalidate,max-age=0;"""'], {}), "('add_header Cache-Control no-cache,no-store,must-revalidate,max-age=0;')\n", (8424, 8497), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((8976, 8991), 'certbot_nginx.nginxparser.loads', 'loads', (['test_str'], {}), '(test_str)\n', (8981, 8991), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((9410, 9510), 'certbot_nginx.nginxparser.loads', 'loads', (['"""try_files /typo3temp/tx_ncstaticfilecache/$host${request_uri}index.html @nocache;"""'], {}), "(\n 'try_files /typo3temp/tx_ncstaticfilecache/$host${request_uri}index.html @nocache;'\n )\n", (9415, 9510), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((10427, 10438), 'certbot_nginx.nginxparser.loads', 'loads', (['test'], {}), '(test)\n', (10432, 10438), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((11199, 11236), 'certbot_nginx.nginxparser.loads', 'loads', (['""""hello\\\\""; # blah "heh heh\\""""'], {}), '(\'"hello\\\\""; # blah "heh heh"\')\n', (11204, 11236), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((11358, 11436), 'certbot_nginx.nginxparser.loads', 'loads', (['"""if ($http_cookie ~* "id=([^;]+)(?:;|$)") { # blah )\n }"""'], {}), '("""if ($http_cookie ~* "id=([^;]+)(?:;|$)") { # blah )\n }""")\n', (11363, 11436), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((11846, 11857), 'certbot_nginx.nginxparser.loads', 'loads', (['test'], {}), '(test)\n', (11851, 11857), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((12591, 12602), 'certbot_nginx.nginxparser.loads', 'loads', (['test'], {}), '(test)\n', (12596, 12602), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((12937, 12974), 'certbot_nginx.nginxparser.loads', 'loads', (['"""directive $var ${var} $ ${};"""'], {}), "('directive $var ${var} $ ${};')\n", (12942, 12974), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((13543, 13563), 'certbot_nginx.nginxparser.UnspacedList', 'UnspacedList', (['self.l'], {}), '(self.l)\n', (13555, 13563), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((13583, 13604), 'certbot_nginx.nginxparser.UnspacedList', 'UnspacedList', (['self.l2'], {}), '(self.l2)\n', (13595, 13604), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((13778, 13800), 'copy.deepcopy', 'copy.deepcopy', (['self.ul'], {}), '(self.ul)\n', (13791, 13800), False, 'import copy\n'), ((14344, 14366), 'copy.deepcopy', 'copy.deepcopy', (['self.ul'], {}), '(self.ul)\n', (14357, 14366), False, 'import copy\n'), ((14594, 14616), 'copy.deepcopy', 'copy.deepcopy', (['self.ul'], {}), '(self.ul)\n', (14607, 14616), False, 'import copy\n'), ((14698, 14713), 'certbot_nginx.nginxparser.UnspacedList', 'UnspacedList', (['l'], {}), '(l)\n', (14710, 14713), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((15036, 15299), 'certbot_nginx.nginxparser.UnspacedList', 'UnspacedList', (["[['\\n ', 'listen', ' ', '192.168.3.11:9000'], ['\\n ', 'listen',\n ' ', '127.0.0.1'], ['\\n ', 'server_name', ' ', '.example.com'],\n ['\\n ', 'server_name', ' ', 'example.*'], '\\n', ['listen', ' ',\n '5001', ' ', 'ssl']]"], {}), "([['\\n ', 'listen', ' ', '192.168.3.11:9000'], [\n '\\n ', 'listen', ' ', '127.0.0.1'], ['\\n ', 'server_name',\n ' ', '.example.com'], ['\\n ', 'server_name', ' ', 'example.*'], '\\n',\n ['listen', ' ', '5001', ' ', 'ssl']])\n", (15048, 15299), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((16018, 16040), 'copy.deepcopy', 'copy.deepcopy', (['self.ul'], {}), '(self.ul)\n', (16031, 16040), False, 'import copy\n'), ((16327, 16343), 'certbot_nginx.nginxparser.UnspacedList', 'UnspacedList', (['[]'], {}), '([])\n', (16339, 16343), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((16485, 16515), 'certbot_nginx.nginxparser.UnspacedList', 'UnspacedList', (['[[1], [2, 3, 4]]'], {}), '([[1], [2, 3, 4]])\n', (16497, 16515), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((1522, 1908), 'certbot_nginx.nginxparser.UnspacedList', 'UnspacedList', (["[['user', ' ', 'www-data'], [['\\n', 'server', ' '], [['\\n ', 'listen',\n ' ', '80'], ['\\n ', 'server_name', ' ', 'foo.com'], ['\\n ',\n 'root', ' ', '/home/ubuntu/sites/foo/'], [['\\n\\n ', 'location', ' ',\n '/status', ' '], [['\\n ', 'check_status', ''], [['\\n\\n ',\n 'types', ' '], [['\\n ', 'image/jpeg', ' ', 'jpg']]]]]]]]"], {}), "([['user', ' ', 'www-data'], [['\\n', 'server', ' '], [['\\n ',\n 'listen', ' ', '80'], ['\\n ', 'server_name', ' ', 'foo.com'], [\n '\\n ', 'root', ' ', '/home/ubuntu/sites/foo/'], [['\\n\\n ',\n 'location', ' ', '/status', ' '], [['\\n ', 'check_status', ''],\n [['\\n\\n ', 'types', ' '], [['\\n ', 'image/jpeg', ' ',\n 'jpg']]]]]]]])\n", (1534, 1908), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((5800, 5812), 'certbot_nginx.nginxparser.load', 'load', (['handle'], {}), '(handle)\n', (5804, 5812), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((5843, 6265), 'certbot_nginx.nginxparser.UnspacedList', 'UnspacedList', (["[['server'], [['listen', ' ', '443', ' ', 'ssl'], ['server_name', ' ',\n 'localhost'], ['ssl_certificate', ' ', 'cert.pem'], [\n 'ssl_certificate_key', ' ', 'cert.key'], ['ssl_session_cache', ' ',\n 'shared:SSL:1m'], ['ssl_session_timeout', ' ', '5m'], ['ssl_ciphers',\n ' ', 'HIGH:!aNULL:!MD5'], [['location', ' ', '/'], [['root', ' ',\n 'html'], ['index', ' ', 'index.html', ' ', 'index.htm']]]]]"], {}), "([['server'], [['listen', ' ', '443', ' ', 'ssl'], [\n 'server_name', ' ', 'localhost'], ['ssl_certificate', ' ', 'cert.pem'],\n ['ssl_certificate_key', ' ', 'cert.key'], ['ssl_session_cache', ' ',\n 'shared:SSL:1m'], ['ssl_session_timeout', ' ', '5m'], ['ssl_ciphers',\n ' ', 'HIGH:!aNULL:!MD5'], [['location', ' ', '/'], [['root', ' ',\n 'html'], ['index', ' ', 'index.html', ' ', 'index.htm']]]]])\n", (5855, 6265), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((6582, 6616), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', ([], {'mode': '"""w+t"""'}), "(mode='w+t')\n", (6604, 6616), False, 'import tempfile\n'), ((6635, 6650), 'certbot_nginx.nginxparser.dump', 'dump', (['parsed', 'f'], {}), '(parsed, f)\n', (6639, 6650), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((6698, 6705), 'certbot_nginx.nginxparser.load', 'load', (['f'], {}), '(f)\n', (6702, 6705), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((6885, 6897), 'certbot_nginx.nginxparser.load', 'load', (['handle'], {}), '(handle)\n', (6889, 6897), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((6912, 6946), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', ([], {'mode': '"""w+t"""'}), "(mode='w+t')\n", (6934, 6946), False, 'import tempfile\n'), ((6965, 6980), 'certbot_nginx.nginxparser.dump', 'dump', (['parsed', 'f'], {}), '(parsed, f)\n', (6969, 6980), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((7028, 7035), 'certbot_nginx.nginxparser.load', 'load', (['f'], {}), '(f)\n', (7032, 7035), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((13163, 13186), 'certbot_nginx.nginxparser.loads', 'loads', (['"""blag${dfgdfg};"""'], {}), "('blag${dfgdfg};')\n", (13168, 13186), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((462, 514), 'certbot_nginx.nginxparser.RawNginxParser.assignment.parseString', 'RawNginxParser.assignment.parseString', (['"""root /test;"""'], {}), "('root /test;')\n", (499, 514), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((598, 658), 'certbot_nginx.nginxparser.RawNginxParser.assignment.parseString', 'RawNginxParser.assignment.parseString', (['"""root /test;foo bar;"""'], {}), "('root /test;foo bar;')\n", (635, 658), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((791, 833), 'certbot_nginx.nginxparser.RawNginxParser.block.parseString', 'RawNginxParser.block.parseString', (['"""foo {}"""'], {}), "('foo {}')\n", (823, 833), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((913, 964), 'certbot_nginx.nginxparser.RawNginxParser.block.parseString', 'RawNginxParser.block.parseString', (['"""location /foo{}"""'], {}), "('location /foo{}')\n", (945, 964), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((1057, 1110), 'certbot_nginx.nginxparser.RawNginxParser.block.parseString', 'RawNginxParser.block.parseString', (['"""foo { bar foo ; }"""'], {}), "('foo { bar foo ; }')\n", (1089, 1110), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((1259, 1309), 'certbot_nginx.nginxparser.RawNginxParser.block.parseString', 'RawNginxParser.block.parseString', (['"""foo { bar {} }"""'], {}), "('foo { bar {} }')\n", (1291, 1309), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((2694, 2728), 'certbot_nginx.tests.util.get_data_filename', 'util.get_data_filename', (['"""foo.conf"""'], {}), "('foo.conf')\n", (2716, 2728), False, 'from certbot_nginx.tests import util\n'), ((2783, 2795), 'certbot_nginx.nginxparser.load', 'load', (['handle'], {}), '(handle)\n', (2787, 2795), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((3717, 3758), 'certbot_nginx.tests.util.get_data_filename', 'util.get_data_filename', (['"""edge_cases.conf"""'], {}), "('edge_cases.conf')\n", (3739, 3758), False, 'from certbot_nginx.tests import util\n'), ((3813, 3825), 'certbot_nginx.nginxparser.load', 'load', (['handle'], {}), '(handle)\n', (3817, 3825), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((4717, 4764), 'certbot_nginx.tests.util.get_data_filename', 'util.get_data_filename', (['"""multiline_quotes.conf"""'], {}), "('multiline_quotes.conf')\n", (4739, 4764), False, 'from certbot_nginx.tests import util\n'), ((4819, 4831), 'certbot_nginx.nginxparser.load', 'load', (['handle'], {}), '(handle)\n', (4823, 4831), False, 'from certbot_nginx.nginxparser import RawNginxParser, loads, load, dumps, dump, UnspacedList\n'), ((5568, 5605), 'certbot_nginx.tests.util.get_data_filename', 'util.get_data_filename', (['"""broken.conf"""'], {}), "('broken.conf')\n", (5590, 5605), False, 'from certbot_nginx.tests import util\n'), ((5730, 5766), 'certbot_nginx.tests.util.get_data_filename', 'util.get_data_filename', (['"""nginx.conf"""'], {}), "('nginx.conf')\n", (5752, 5766), False, 'from certbot_nginx.tests import util\n'), ((6799, 6851), 'certbot_nginx.tests.util.get_data_filename', 'util.get_data_filename', (['"""minimalistic_comments.conf"""'], {}), "('minimalistic_comments.conf')\n", (6821, 6851), False, 'from certbot_nginx.tests import util\n')] |
import pytest
from ..model_base_test import ModelBaseTest
from tests.sampleresponse.cardless_credit import cardless_credit_payment_response
from xendit.models import CardlessCredit, CardlessCreditType
# fmt: off
class TestCreateCardlessCreditPayment(ModelBaseTest):
@pytest.fixture
def default_cardless_credit_data(self):
tested_class = CardlessCredit
class_name = "CardlessCredit"
method_name = "create_payment"
http_method_name = "post"
cardless_credit_items = []
cardless_credit_items.append(
CardlessCredit.helper_create_item(
id="item-123",
name="Phone Case",
price=200000,
type="Smartphone",
url="http://example.com/phone/phone_case",
quantity=2,
)
)
shipping_address = CardlessCredit.helper_create_shipping_address(
first_name="<NAME>",
last_name="<NAME>",
address="Jl Teknologi No. 12",
city="Jakarta",
postal_code="12345",
phone="081513114262",
country_code="IDN",
)
customer_details = CardlessCredit.helper_create_customer_details(
first_name="customer first name",
last_name="customer last name",
email="<EMAIL>",
phone="0812332145",
)
args = ()
kwargs = {
"cardless_credit_type": CardlessCreditType.KREDIVO,
"external_id": "mock-id-123",
"amount": 10000,
"payment_type": "3_months",
"items": cardless_credit_items,
"customer_details": customer_details,
"shipping_address": shipping_address,
"redirect_url": "https://mock-my-shop.com/home",
"callback_url": "https://mock-my-shop.com/callback",
"x_idempotency_key": "test_idemp_123",
}
params = (args, kwargs)
url = "/cardless-credit"
expected_correct_result = cardless_credit_payment_response()
return (tested_class, class_name, method_name, http_method_name, url, params, expected_correct_result)
@pytest.fixture
def api_requestor_request_data(self, default_cardless_credit_data):
tested_class, class_name, method_name, http_method_name, url, params, _ = default_cardless_credit_data
headers = {"X-IDEMPOTENCY-KEY": "test_idemp_123"}
body = {
"cardless_credit_type": "KREDIVO",
"external_id": "mock-id-123",
"amount": 10000,
"payment_type": "3_months",
"items": [
{
"id": "item-123",
"name": "<NAME>",
"price": 200000,
"type": "Smartphone",
"url": "http://example.com/phone/phone_case",
"quantity": 2,
}
],
"customer_details": {
"first_name": "customer <NAME>",
"last_name": "<NAME>",
"email": "<EMAIL>",
"phone": "0812332145",
},
"shipping_address": {
"first_name": "<NAME>",
"last_name": "<NAME>",
"address": "Jl Teknologi No. 12",
"city": "Jakarta",
"postal_code": "12345",
"phone": "081513114262",
"country_code": "IDN",
},
"redirect_url": "https://mock-my-shop.com/home",
"callback_url": "https://mock-my-shop.com/callback",
}
return (tested_class, class_name, method_name, http_method_name, url, params, headers, body)
@pytest.mark.parametrize("mock_correct_response", [cardless_credit_payment_response()], indirect=True)
def test_return_cardless_credit_payment_on_correct_params(
self, mocker, mock_correct_response, default_cardless_credit_data
):
self.run_success_return_test_on_xendit_instance(mocker, mock_correct_response, default_cardless_credit_data)
def test_raise_xendit_error_on_response_error(
self, mocker, mock_error_request_response, default_cardless_credit_data
):
self.run_raises_error_test_on_xendit_instance(mocker, mock_error_request_response, default_cardless_credit_data)
@pytest.mark.parametrize("mock_correct_response", [cardless_credit_payment_response()], indirect=True)
def test_return_cardless_credit_payment_on_correct_params_and_global_xendit(
self, mocker, mock_correct_response, default_cardless_credit_data
):
self.run_success_return_test_on_global_config(mocker, mock_correct_response, default_cardless_credit_data)
def test_raise_xendit_error_on_response_error_and_global_xendit(
self, mocker, mock_error_request_response, default_cardless_credit_data
):
self.run_raises_error_test_on_global_config(mocker, mock_error_request_response, default_cardless_credit_data)
@pytest.mark.parametrize("mock_correct_response", [cardless_credit_payment_response()], indirect=True)
def test_send_correct_request_to_api_requestor(self, mocker, mock_correct_response, api_requestor_request_data):
self.run_send_correct_request_to_api_requestor(mocker, mock_correct_response, api_requestor_request_data)
# fmt: on
| [
"xendit.models.CardlessCredit.helper_create_shipping_address",
"xendit.models.CardlessCredit.helper_create_customer_details",
"xendit.models.CardlessCredit.helper_create_item",
"tests.sampleresponse.cardless_credit.cardless_credit_payment_response"
] | [((897, 1101), 'xendit.models.CardlessCredit.helper_create_shipping_address', 'CardlessCredit.helper_create_shipping_address', ([], {'first_name': '"""<NAME>"""', 'last_name': '"""<NAME>"""', 'address': '"""Jl Teknologi No. 12"""', 'city': '"""Jakarta"""', 'postal_code': '"""12345"""', 'phone': '"""081513114262"""', 'country_code': '"""IDN"""'}), "(first_name='<NAME>',\n last_name='<NAME>', address='Jl Teknologi No. 12', city='Jakarta',\n postal_code='12345', phone='081513114262', country_code='IDN')\n", (942, 1101), False, 'from xendit.models import CardlessCredit, CardlessCreditType\n'), ((1225, 1382), 'xendit.models.CardlessCredit.helper_create_customer_details', 'CardlessCredit.helper_create_customer_details', ([], {'first_name': '"""customer first name"""', 'last_name': '"""customer last name"""', 'email': '"""<EMAIL>"""', 'phone': '"""0812332145"""'}), "(first_name=\n 'customer first name', last_name='customer last name', email='<EMAIL>',\n phone='0812332145')\n", (1270, 1382), False, 'from xendit.models import CardlessCredit, CardlessCreditType\n'), ((2096, 2130), 'tests.sampleresponse.cardless_credit.cardless_credit_payment_response', 'cardless_credit_payment_response', ([], {}), '()\n', (2128, 2130), False, 'from tests.sampleresponse.cardless_credit import cardless_credit_payment_response\n'), ((584, 748), 'xendit.models.CardlessCredit.helper_create_item', 'CardlessCredit.helper_create_item', ([], {'id': '"""item-123"""', 'name': '"""Phone Case"""', 'price': '(200000)', 'type': '"""Smartphone"""', 'url': '"""http://example.com/phone/phone_case"""', 'quantity': '(2)'}), "(id='item-123', name='Phone Case', price=\n 200000, type='Smartphone', url='http://example.com/phone/phone_case',\n quantity=2)\n", (617, 748), False, 'from xendit.models import CardlessCredit, CardlessCreditType\n'), ((3889, 3923), 'tests.sampleresponse.cardless_credit.cardless_credit_payment_response', 'cardless_credit_payment_response', ([], {}), '()\n', (3921, 3923), False, 'from tests.sampleresponse.cardless_credit import cardless_credit_payment_response\n'), ((4529, 4563), 'tests.sampleresponse.cardless_credit.cardless_credit_payment_response', 'cardless_credit_payment_response', ([], {}), '()\n', (4561, 4563), False, 'from tests.sampleresponse.cardless_credit import cardless_credit_payment_response\n'), ((5201, 5235), 'tests.sampleresponse.cardless_credit.cardless_credit_payment_response', 'cardless_credit_payment_response', ([], {}), '()\n', (5233, 5235), False, 'from tests.sampleresponse.cardless_credit import cardless_credit_payment_response\n')] |
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
PROXY_URL_NO_AUTH_1 = 'http://<proxy hostname 1>:<proxy port 2>'
cls = get_driver(Provider.RACKSPACE)
driver = cls('username', 'api key', region='ord',
http_proxy=PROXY_URL_NO_AUTH_1)
| [
"libcloud.compute.providers.get_driver"
] | [((167, 197), 'libcloud.compute.providers.get_driver', 'get_driver', (['Provider.RACKSPACE'], {}), '(Provider.RACKSPACE)\n', (177, 197), False, 'from libcloud.compute.providers import get_driver\n')] |
import os
from cakechat.config import BASE_CORPUS_NAME, S3_MODELS_BUCKET_NAME, S3_TOKENS_IDX_REMOTE_DIR, \
S3_NN_MODEL_REMOTE_DIR, S3_CONDITIONS_IDX_REMOTE_DIR
from cakechat.dialog_model.model import get_nn_model
from cakechat.utils.s3 import S3FileResolver
from cakechat.utils.text_processing import get_index_to_token_path, load_index_to_item, get_index_to_condition_path
def _get_index_to_token(fetch_from_s3):
index_to_token_path = get_index_to_token_path(BASE_CORPUS_NAME)
if fetch_from_s3:
tokens_idx_resolver = S3FileResolver(index_to_token_path, S3_MODELS_BUCKET_NAME, S3_TOKENS_IDX_REMOTE_DIR)
if not tokens_idx_resolver.resolve():
raise Exception('Can\'t get index_to_token because file does not exist at S3')
else:
if not os.path.exists(index_to_token_path):
raise Exception('Can\'t get index_to_token because file does not exist. '
'Run tools/download_model.py first to get all required files or construct it by yourself.')
return load_index_to_item(index_to_token_path)
def _get_index_to_condition(fetch_from_s3):
index_to_condition_path = get_index_to_condition_path(BASE_CORPUS_NAME)
if fetch_from_s3:
index_to_condition_resolver = S3FileResolver(index_to_condition_path, S3_MODELS_BUCKET_NAME,
S3_CONDITIONS_IDX_REMOTE_DIR)
if not index_to_condition_resolver.resolve():
raise Exception('Can\'t get index_to_condition because file does not exist at S3')
else:
if not os.path.exists(index_to_condition_path):
raise Exception('Can\'t get index_to_condition because file does not exist. '
'Run tools/download_model.py first to get all required files or construct it by yourself.')
return load_index_to_item(index_to_condition_path)
def get_trained_model(reverse=False, fetch_from_s3=True):
if fetch_from_s3:
resolver_factory = S3FileResolver.init_resolver(
bucket_name=S3_MODELS_BUCKET_NAME, remote_dir=S3_NN_MODEL_REMOTE_DIR)
else:
resolver_factory = None
nn_model, model_exists = get_nn_model(
_get_index_to_token(fetch_from_s3),
_get_index_to_condition(fetch_from_s3),
resolver_factory=resolver_factory,
is_reverse_model=reverse)
if not model_exists:
raise Exception('Can\'t get the model. '
'Run tools/download_model.py first to get all required files or train it by yourself.')
return nn_model
| [
"os.path.exists",
"cakechat.utils.s3.S3FileResolver",
"cakechat.utils.text_processing.get_index_to_condition_path",
"cakechat.utils.s3.S3FileResolver.init_resolver",
"cakechat.utils.text_processing.get_index_to_token_path",
"cakechat.utils.text_processing.load_index_to_item"
] | [((447, 488), 'cakechat.utils.text_processing.get_index_to_token_path', 'get_index_to_token_path', (['BASE_CORPUS_NAME'], {}), '(BASE_CORPUS_NAME)\n', (470, 488), False, 'from cakechat.utils.text_processing import get_index_to_token_path, load_index_to_item, get_index_to_condition_path\n'), ((1043, 1082), 'cakechat.utils.text_processing.load_index_to_item', 'load_index_to_item', (['index_to_token_path'], {}), '(index_to_token_path)\n', (1061, 1082), False, 'from cakechat.utils.text_processing import get_index_to_token_path, load_index_to_item, get_index_to_condition_path\n'), ((1159, 1204), 'cakechat.utils.text_processing.get_index_to_condition_path', 'get_index_to_condition_path', (['BASE_CORPUS_NAME'], {}), '(BASE_CORPUS_NAME)\n', (1186, 1204), False, 'from cakechat.utils.text_processing import get_index_to_token_path, load_index_to_item, get_index_to_condition_path\n'), ((1848, 1891), 'cakechat.utils.text_processing.load_index_to_item', 'load_index_to_item', (['index_to_condition_path'], {}), '(index_to_condition_path)\n', (1866, 1891), False, 'from cakechat.utils.text_processing import get_index_to_token_path, load_index_to_item, get_index_to_condition_path\n'), ((541, 629), 'cakechat.utils.s3.S3FileResolver', 'S3FileResolver', (['index_to_token_path', 'S3_MODELS_BUCKET_NAME', 'S3_TOKENS_IDX_REMOTE_DIR'], {}), '(index_to_token_path, S3_MODELS_BUCKET_NAME,\n S3_TOKENS_IDX_REMOTE_DIR)\n', (555, 629), False, 'from cakechat.utils.s3 import S3FileResolver\n'), ((1265, 1361), 'cakechat.utils.s3.S3FileResolver', 'S3FileResolver', (['index_to_condition_path', 'S3_MODELS_BUCKET_NAME', 'S3_CONDITIONS_IDX_REMOTE_DIR'], {}), '(index_to_condition_path, S3_MODELS_BUCKET_NAME,\n S3_CONDITIONS_IDX_REMOTE_DIR)\n', (1279, 1361), False, 'from cakechat.utils.s3 import S3FileResolver\n'), ((2001, 2104), 'cakechat.utils.s3.S3FileResolver.init_resolver', 'S3FileResolver.init_resolver', ([], {'bucket_name': 'S3_MODELS_BUCKET_NAME', 'remote_dir': 'S3_NN_MODEL_REMOTE_DIR'}), '(bucket_name=S3_MODELS_BUCKET_NAME, remote_dir=\n S3_NN_MODEL_REMOTE_DIR)\n', (2029, 2104), False, 'from cakechat.utils.s3 import S3FileResolver\n'), ((788, 823), 'os.path.exists', 'os.path.exists', (['index_to_token_path'], {}), '(index_to_token_path)\n', (802, 823), False, 'import os\n'), ((1585, 1624), 'os.path.exists', 'os.path.exists', (['index_to_condition_path'], {}), '(index_to_condition_path)\n', (1599, 1624), False, 'import os\n')] |
import os
import sys
import glob
import time
import copy
import random
import numpy as np
import utils
import logging
import argparse
import tensorflow as tf
import tensorflow.keras as keras
from model import NASNetworkCIFAR
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# Basic model parameters.
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train',
choices=['train', 'test'])
parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10, cifar100'])
parser.add_argument('--model_dir', type=str, default='models')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--eval_batch_size', type=int, default=32)
parser.add_argument('--epochs', type=int, default=600)
parser.add_argument('--cells', type=int, default=6)
parser.add_argument('--nodes', type=int, default=5)
parser.add_argument('--channels', type=int, default=36)
parser.add_argument('--cutout_size', type=int, default=8)
parser.add_argument('--grad_bound', type=float, default=10.0)
parser.add_argument('--initial_lr', type=float, default=0.025)
parser.add_argument('--keep_prob', type=float, default=0.6)
parser.add_argument('--drop_path_keep_prob', type=float, default=0.8)
parser.add_argument('--l2_reg', type=float, default=3e-4)
parser.add_argument('--arch', type=str, default=None)
parser.add_argument('--use_aux_head', action='store_true', default=False)
parser.add_argument('--seed', type=int, default=9)
parser.add_argument('--train_from_scratch', type=bool, default=False)
args = parser.parse_args()
utils.create_exp_dir(args.model_dir)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p')
def train(train_ds, model, optimizer, global_step, criterion, classes=10):
objs = utils.AvgMeter()
top1 = utils.AvgMeter()
top5 = utils.AvgMeter()
for step, (input, labels) in enumerate(train_ds):
global_step.assign_add(1)
with tf.GradientTape() as tape:
logits, aux_logits = model(input, global_step, training=True)
loss = criterion(tf.one_hot(tf.squeeze(labels), depth=classes), logits)
if aux_logits is not None:
aux_loss = criterion(tf.one_hot(tf.squeeze(labels), depth=classes), aux_logits)
loss += 0.4 * aux_loss
reg_loss = args.l2_reg * tf.sqrt(
tf.reduce_sum([tf.reduce_sum(tf.square(x)) for x in model.trainable_variables]))
loss += reg_loss
gradients = tape.gradient(loss, model.trainable_variables)
if args.grad_bound != 0.0:
gradients, _ = tf.clip_by_global_norm(gradients, 15)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
################################################################################################################
acc1, acc5 = utils.accuracy(tf.nn.softmax(logits, axis=-1), tf.squeeze(labels), topk=(1, 5))
batch_size = input.shape[0]
objs.update(loss.numpy(), batch_size)
top1.update(acc1, batch_size)
top5.update(acc5, batch_size)
if (step + 1) % 100 == 0:
print('train step {} loss {} top1 {} top5 {}'.format(step + 1, objs.avg, top1.avg, top5.avg))
logging.info('train step %03d loss %e top1 %f top5 %f', step + 1, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg, global_step
def valid(valid_ds, model, criterion, classes=10):
objs = utils.AvgMeter()
top1 = utils.AvgMeter()
top5 = utils.AvgMeter()
for step, (input, labels) in enumerate(valid_ds):
logits, _ = model(input, training=False)
loss = criterion(tf.one_hot(tf.squeeze(labels), depth=classes), logits)
acc1, acc5 = utils.accuracy(tf.nn.softmax(logits, axis=-1), tf.squeeze(labels), topk=(1, 5))
batch_size = input.shape[0]
objs.update(loss.numpy(), batch_size)
top1.update(acc1, batch_size)
top5.update(acc5, batch_size)
if (step + 1) % 100 == 0:
print('valid step {} loss {} top1 {} top5 {}'.format(step + 1, objs.avg, top1.avg, top5.avg))
logging.info('valid step %03d %e %f %f', step + 1, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def train_cifar10():
logging.info("Args = %s", args)
np.random.seed(args.seed)
tf.random.set_seed(args.seed)
global_step = tf.Variable(initial_value=0, trainable=False, dtype=tf.int32)
epoch = tf.Variable(initial_value=0, trainable=False, dtype=tf.int32)
best_acc_top1 = tf.Variable(initial_value=0.0, trainable=False, dtype=tf.float32)
################################################ model setup #######################################################
train_ds, test_ds = utils.load_cifar10(args.batch_size, args.cutout_size)
total_steps = int(np.ceil(50000 / args.batch_size)) * args.epochs
model = NASNetworkCIFAR(classes=10,
reduce_distance=args.cells,
num_nodes=args.nodes,
channels=args.channels,
keep_prob=args.keep_prob,
drop_path_keep_prob=args.drop_path_keep_prob,
use_aux_head=args.use_aux_head,
steps=total_steps,
arch=args.arch)
temp_ = tf.random.uniform((64,32,32,3), minval=0, maxval=1, dtype=tf.float32)
temp_ = model(temp_, step=1, training=True)
model.summary()
model_size = utils.count_parameters_in_MB(model)
print("param size = {} MB".format(model_size))
logging.info("param size = %fMB", model_size)
criterion = keras.losses.CategoricalCrossentropy(from_logits=True)
learning_rate = keras.experimental.CosineDecay(initial_learning_rate=args.initial_lr,
decay_steps=total_steps, alpha=0.0001)
# learning_rate = keras.optimizers.schedules.ExponentialDecay(
# initial_learning_rate=args.initial_lr, decay_steps=total_steps, decay_rate=0.99, staircase=False, name=None
# )
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
########################################## restore checkpoint ######################################################
if args.train_from_scratch:
utils.clean_dir(args.model_dir)
checkpoint_path = os.path.join(args.model_dir, 'checkpoints')
ckpt = tf.train.Checkpoint(model=model,
optimizer=optimizer,
global_step=global_step,
epoch=epoch,
best_acc_top1=best_acc_top1)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=3)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored!!')
############################################# training process #####################################################
acc_train_result = []
loss_train_result = []
acc_test_result = []
loss_test_result = []
while epoch.numpy() < args.epochs:
print('epoch {} lr {}'.format(epoch.numpy(), optimizer._decayed_lr(tf.float32)))
train_acc, train_loss, step = train(train_ds, model, optimizer, global_step, criterion, classes=10)
test_acc, test_loss = valid(test_ds, model, criterion, classes=10)
acc_train_result.append(train_acc)
loss_train_result.append(train_loss)
acc_test_result.append(test_acc)
loss_test_result.append(test_loss)
logging.info('epoch %d lr %e', epoch.numpy(), optimizer._decayed_lr(tf.float32))
logging.info(acc_train_result)
logging.info(loss_train_result)
logging.info(acc_test_result)
logging.info(loss_test_result)
is_best = False
if test_acc > best_acc_top1:
best_acc_top1 = test_acc
is_best = True
epoch.assign_add(1)
if (epoch.numpy() + 1) % 1 == 0:
ckpt_save_path = ckpt_manager.save()
print('Saving checkpoint for epoch {} at {}'.format(epoch.numpy() + 1, ckpt_save_path))
if is_best:
pass
utils.plot_single_list(acc_train_result, x_label='epochs', y_label='acc', file_name='acc_train')
utils.plot_single_list(loss_train_result, x_label='epochs', y_label='loss', file_name='loss_train')
utils.plot_single_list(acc_test_result, x_label='epochs', y_label='acc', file_name='acc_test')
utils.plot_single_list(loss_test_result, x_label='epochs', y_label='loss', file_name='loss_test')
if __name__ == '__main__':
import time
start_time = time.time()
train_cifar10()
print("--- %s seconds ---" % (time.time() - start_time))
| [
"tensorflow.train.Checkpoint",
"model.NASNetworkCIFAR",
"tensorflow.GradientTape",
"tensorflow.nn.softmax",
"tensorflow.keras.losses.CategoricalCrossentropy",
"utils.AvgMeter",
"utils.count_parameters_in_MB",
"logging.info",
"tensorflow.clip_by_global_norm",
"argparse.ArgumentParser",
"tensorflo... | [((346, 371), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (369, 371), False, 'import argparse\n'), ((1616, 1652), 'utils.create_exp_dir', 'utils.create_exp_dir', (['args.model_dir'], {}), '(args.model_dir)\n', (1636, 1652), False, 'import utils\n'), ((1692, 1803), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO', 'format': 'log_format', 'datefmt': '"""%m/%d %I:%M:%S %p"""'}), "(stream=sys.stdout, level=logging.INFO, format=\n log_format, datefmt='%m/%d %I:%M:%S %p')\n", (1711, 1803), False, 'import logging\n'), ((1886, 1902), 'utils.AvgMeter', 'utils.AvgMeter', ([], {}), '()\n', (1900, 1902), False, 'import utils\n'), ((1914, 1930), 'utils.AvgMeter', 'utils.AvgMeter', ([], {}), '()\n', (1928, 1930), False, 'import utils\n'), ((1942, 1958), 'utils.AvgMeter', 'utils.AvgMeter', ([], {}), '()\n', (1956, 1958), False, 'import utils\n'), ((3574, 3590), 'utils.AvgMeter', 'utils.AvgMeter', ([], {}), '()\n', (3588, 3590), False, 'import utils\n'), ((3602, 3618), 'utils.AvgMeter', 'utils.AvgMeter', ([], {}), '()\n', (3616, 3618), False, 'import utils\n'), ((3630, 3646), 'utils.AvgMeter', 'utils.AvgMeter', ([], {}), '()\n', (3644, 3646), False, 'import utils\n'), ((4384, 4415), 'logging.info', 'logging.info', (['"""Args = %s"""', 'args'], {}), "('Args = %s', args)\n", (4396, 4415), False, 'import logging\n'), ((4420, 4445), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (4434, 4445), True, 'import numpy as np\n'), ((4450, 4479), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['args.seed'], {}), '(args.seed)\n', (4468, 4479), True, 'import tensorflow as tf\n'), ((4499, 4560), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '(0)', 'trainable': '(False)', 'dtype': 'tf.int32'}), '(initial_value=0, trainable=False, dtype=tf.int32)\n', (4510, 4560), True, 'import tensorflow as tf\n'), ((4573, 4634), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '(0)', 'trainable': '(False)', 'dtype': 'tf.int32'}), '(initial_value=0, trainable=False, dtype=tf.int32)\n', (4584, 4634), True, 'import tensorflow as tf\n'), ((4655, 4720), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '(0.0)', 'trainable': '(False)', 'dtype': 'tf.float32'}), '(initial_value=0.0, trainable=False, dtype=tf.float32)\n', (4666, 4720), True, 'import tensorflow as tf\n'), ((4867, 4920), 'utils.load_cifar10', 'utils.load_cifar10', (['args.batch_size', 'args.cutout_size'], {}), '(args.batch_size, args.cutout_size)\n', (4885, 4920), False, 'import utils\n'), ((5004, 5258), 'model.NASNetworkCIFAR', 'NASNetworkCIFAR', ([], {'classes': '(10)', 'reduce_distance': 'args.cells', 'num_nodes': 'args.nodes', 'channels': 'args.channels', 'keep_prob': 'args.keep_prob', 'drop_path_keep_prob': 'args.drop_path_keep_prob', 'use_aux_head': 'args.use_aux_head', 'steps': 'total_steps', 'arch': 'args.arch'}), '(classes=10, reduce_distance=args.cells, num_nodes=args.\n nodes, channels=args.channels, keep_prob=args.keep_prob,\n drop_path_keep_prob=args.drop_path_keep_prob, use_aux_head=args.\n use_aux_head, steps=total_steps, arch=args.arch)\n', (5019, 5258), False, 'from model import NASNetworkCIFAR\n'), ((5482, 5554), 'tensorflow.random.uniform', 'tf.random.uniform', (['(64, 32, 32, 3)'], {'minval': '(0)', 'maxval': '(1)', 'dtype': 'tf.float32'}), '((64, 32, 32, 3), minval=0, maxval=1, dtype=tf.float32)\n', (5499, 5554), True, 'import tensorflow as tf\n'), ((5637, 5672), 'utils.count_parameters_in_MB', 'utils.count_parameters_in_MB', (['model'], {}), '(model)\n', (5665, 5672), False, 'import utils\n'), ((5728, 5773), 'logging.info', 'logging.info', (['"""param size = %fMB"""', 'model_size'], {}), "('param size = %fMB', model_size)\n", (5740, 5773), False, 'import logging\n'), ((5791, 5845), 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (5827, 5845), True, 'import tensorflow.keras as keras\n'), ((5866, 5978), 'tensorflow.keras.experimental.CosineDecay', 'keras.experimental.CosineDecay', ([], {'initial_learning_rate': 'args.initial_lr', 'decay_steps': 'total_steps', 'alpha': '(0.0001)'}), '(initial_learning_rate=args.initial_lr,\n decay_steps=total_steps, alpha=0.0001)\n', (5896, 5978), True, 'import tensorflow.keras as keras\n'), ((6235, 6287), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (6258, 6287), True, 'import tensorflow as tf\n'), ((6506, 6549), 'os.path.join', 'os.path.join', (['args.model_dir', '"""checkpoints"""'], {}), "(args.model_dir, 'checkpoints')\n", (6518, 6549), False, 'import os\n'), ((6561, 6686), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'model': 'model', 'optimizer': 'optimizer', 'global_step': 'global_step', 'epoch': 'epoch', 'best_acc_top1': 'best_acc_top1'}), '(model=model, optimizer=optimizer, global_step=\n global_step, epoch=epoch, best_acc_top1=best_acc_top1)\n', (6580, 6686), True, 'import tensorflow as tf\n'), ((6825, 6889), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['ckpt', 'checkpoint_path'], {'max_to_keep': '(3)'}), '(ckpt, checkpoint_path, max_to_keep=3)\n', (6851, 6889), True, 'import tensorflow as tf\n'), ((8433, 8533), 'utils.plot_single_list', 'utils.plot_single_list', (['acc_train_result'], {'x_label': '"""epochs"""', 'y_label': '"""acc"""', 'file_name': '"""acc_train"""'}), "(acc_train_result, x_label='epochs', y_label='acc',\n file_name='acc_train')\n", (8455, 8533), False, 'import utils\n'), ((8534, 8637), 'utils.plot_single_list', 'utils.plot_single_list', (['loss_train_result'], {'x_label': '"""epochs"""', 'y_label': '"""loss"""', 'file_name': '"""loss_train"""'}), "(loss_train_result, x_label='epochs', y_label='loss',\n file_name='loss_train')\n", (8556, 8637), False, 'import utils\n'), ((8638, 8736), 'utils.plot_single_list', 'utils.plot_single_list', (['acc_test_result'], {'x_label': '"""epochs"""', 'y_label': '"""acc"""', 'file_name': '"""acc_test"""'}), "(acc_test_result, x_label='epochs', y_label='acc',\n file_name='acc_test')\n", (8660, 8736), False, 'import utils\n'), ((8737, 8838), 'utils.plot_single_list', 'utils.plot_single_list', (['loss_test_result'], {'x_label': '"""epochs"""', 'y_label': '"""loss"""', 'file_name': '"""loss_test"""'}), "(loss_test_result, x_label='epochs', y_label='loss',\n file_name='loss_test')\n", (8759, 8838), False, 'import utils\n'), ((8896, 8907), 'time.time', 'time.time', ([], {}), '()\n', (8905, 8907), False, 'import time\n'), ((6451, 6482), 'utils.clean_dir', 'utils.clean_dir', (['args.model_dir'], {}), '(args.model_dir)\n', (6466, 6482), False, 'import utils\n'), ((7899, 7929), 'logging.info', 'logging.info', (['acc_train_result'], {}), '(acc_train_result)\n', (7911, 7929), False, 'import logging\n'), ((7938, 7969), 'logging.info', 'logging.info', (['loss_train_result'], {}), '(loss_train_result)\n', (7950, 7969), False, 'import logging\n'), ((7978, 8007), 'logging.info', 'logging.info', (['acc_test_result'], {}), '(acc_test_result)\n', (7990, 8007), False, 'import logging\n'), ((8016, 8046), 'logging.info', 'logging.info', (['loss_test_result'], {}), '(loss_test_result)\n', (8028, 8046), False, 'import logging\n'), ((2061, 2078), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2076, 2078), True, 'import tensorflow as tf\n'), ((2721, 2758), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['gradients', '(15)'], {}), '(gradients, 15)\n', (2743, 2758), True, 'import tensorflow as tf\n'), ((2994, 3024), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (3007, 3024), True, 'import tensorflow as tf\n'), ((3026, 3044), 'tensorflow.squeeze', 'tf.squeeze', (['labels'], {}), '(labels)\n', (3036, 3044), True, 'import tensorflow as tf\n'), ((3370, 3469), 'logging.info', 'logging.info', (['"""train step %03d loss %e top1 %f top5 %f"""', '(step + 1)', 'objs.avg', 'top1.avg', 'top5.avg'], {}), "('train step %03d loss %e top1 %f top5 %f', step + 1, objs.avg,\n top1.avg, top5.avg)\n", (3382, 3469), False, 'import logging\n'), ((3868, 3898), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (3881, 3898), True, 'import tensorflow as tf\n'), ((3900, 3918), 'tensorflow.squeeze', 'tf.squeeze', (['labels'], {}), '(labels)\n', (3910, 3918), True, 'import tensorflow as tf\n'), ((4244, 4329), 'logging.info', 'logging.info', (['"""valid step %03d %e %f %f"""', '(step + 1)', 'objs.avg', 'top1.avg', 'top5.avg'], {}), "('valid step %03d %e %f %f', step + 1, objs.avg, top1.avg, top5.avg\n )\n", (4256, 4329), False, 'import logging\n'), ((4943, 4975), 'numpy.ceil', 'np.ceil', (['(50000 / args.batch_size)'], {}), '(50000 / args.batch_size)\n', (4950, 4975), True, 'import numpy as np\n'), ((3787, 3805), 'tensorflow.squeeze', 'tf.squeeze', (['labels'], {}), '(labels)\n', (3797, 3805), True, 'import tensorflow as tf\n'), ((8962, 8973), 'time.time', 'time.time', ([], {}), '()\n', (8971, 8973), False, 'import time\n'), ((2202, 2220), 'tensorflow.squeeze', 'tf.squeeze', (['labels'], {}), '(labels)\n', (2212, 2220), True, 'import tensorflow as tf\n'), ((2333, 2351), 'tensorflow.squeeze', 'tf.squeeze', (['labels'], {}), '(labels)\n', (2343, 2351), True, 'import tensorflow as tf\n'), ((2511, 2523), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (2520, 2523), True, 'import tensorflow as tf\n')] |
from ethronsoft.gcspypi.package.package_manager import PackageManager
from ethronsoft.gcspypi.utilities.console import Console
from ethronsoft.gcspypi.parsers.commons import init_repository
def handle_(config, data):
with Console(verbose=config.get("verbose", False), exit_on_error=True) as c:
repo = init_repository(c, config["repository"])
pkg_mgr = PackageManager(repo, console=c, installer=None, is_python3 = config.get("python3", False))
for path in sorted(pkg_mgr.list_items(data["package"], from_cache=True)):
c.output(path.split("/")[-1])
class ListParser(object):
def __init__(self, subparsers):
self.name = "list"
list_parser = subparsers.add_parser(self.name,
description="""Displays all versions of a certain package
or all content of the repository if package name is omitted""")
list_parser.add_argument("package", nargs="?", default="", help="Package Name")
def handle(self, config, data):
handle_(config, data) | [
"ethronsoft.gcspypi.parsers.commons.init_repository"
] | [((314, 354), 'ethronsoft.gcspypi.parsers.commons.init_repository', 'init_repository', (['c', "config['repository']"], {}), "(c, config['repository'])\n", (329, 354), False, 'from ethronsoft.gcspypi.parsers.commons import init_repository\n')] |
from elasticsearch_dsl import *
import os
from glob import glob
import json
import re
from . import to_zh_cn
class Poet(Document):
dynasty = Text()
author = Text()
title = Text(analyzer='jieba_index', search_analyzer='jieba_search')
paragraphs = Text(analyzer='jieba_index', search_analyzer='jieba_search')
class Index:
name = 'poetry_shi'
settings = {
"number_of_shards": 3,
"number_of_replicas": 1
}
class Author(Document):
name = Text()
desc = Text(analyzer='jieba_index', search_analyzer='jieba_search')
class Index:
name = 'author'
settings = {
"number_of_shards": 3,
"number_of_replicas": 1
}
def do_es_import():
"""
import data from current dir
"""
Poet.init()
Author.init()
patt = re.compile(r'^[a-zA-Z]+\.([a-zA-Z]+)\.')
cur_dir = os.path.dirname((os.path.abspath(__file__)))
data_files = glob("{}/json/poet.*.json".format(cur_dir))
for file in data_files:
with open(file, 'r') as f:
data = json.load(f)
dynasty = patt.findall(os.path.basename(file))[0]
for item in data:
item["dynasty"] = dynasty
one = Poet(**to_zh_cn(item))
one.save()
data_files = glob("{}/json/authors.*.json".format(cur_dir))
for file in data_files:
with open(file, 'r') as f:
data = json.load(f)
dynasty = patt.findall(os.path.basename(file))[0]
for item in data:
item["dynasty"] = dynasty
one = Author(**to_zh_cn(item))
one.save()
| [
"os.path.abspath",
"json.load",
"os.path.basename",
"re.compile"
] | [((849, 890), 're.compile', 're.compile', (['"""^[a-zA-Z]+\\\\.([a-zA-Z]+)\\\\."""'], {}), "('^[a-zA-Z]+\\\\.([a-zA-Z]+)\\\\.')\n", (859, 890), False, 'import re\n'), ((921, 946), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (936, 946), False, 'import os\n'), ((1092, 1104), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1101, 1104), False, 'import json\n'), ((1458, 1470), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1467, 1470), False, 'import json\n'), ((1140, 1162), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (1156, 1162), False, 'import os\n'), ((1506, 1528), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (1522, 1528), False, 'import os\n')] |
from PIL import ImageGrab, Image
import cv2 as cv
import numpy as np
import match.template_matching as tm
import match.bilging as b
from mss import mss
def grab_screen():
img_src = ImageGrab.grab()
return cv.cvtColor(np.array(img_src.convert('RGB')), cv.COLOR_RGB2BGR)
class ScreenGrabber(object):
def grab(self):
return grab_screen()
class ScreenshotGrabber(object):
def grab(self):
return cv.imread('screenshot.png')
class FastScreenGrabber(object):
def grab(self):
with mss() as sct:
monitor = sct.monitors[1]
sct_img = sct.grab(monitor)
# Convert to PIL/Pillow Image
return cv.cvtColor(np.array(Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX').convert('RGB')), cv.COLOR_RGB2BGR)
paths = [("A", './images/whiteblue_square.png', True, 0.9),
("B", './images/greenblue_diamond.png', True, 0.9),
("C", './images/lightblue_circle.png', True, 0.9),
("D", './images/lightyellow_circle.png', True, 0.9),
("E", './images/darkblue_square.png', True, 0.9),
("F", './images/lightblue_square.png', True, 0.9),
("G", './images/lightblue_diamond.png', True, 0.9),
("X", './images/puffer.png', False, 0.5),
("Y", './images/crab.png', False, 0.5),
("Z", './images/jellyfish.png', False, 0.5)]
patterns = [tm.build_pattern(p, n, shape=(45, 45), circle_mask=c, threshold=t)
for n, p, c, t in paths]
b.track_board_state(ScreenshotGrabber(), patterns)
| [
"mss.mss",
"PIL.ImageGrab.grab",
"match.template_matching.build_pattern",
"PIL.Image.frombytes",
"cv2.imread"
] | [((187, 203), 'PIL.ImageGrab.grab', 'ImageGrab.grab', ([], {}), '()\n', (201, 203), False, 'from PIL import ImageGrab, Image\n'), ((1392, 1458), 'match.template_matching.build_pattern', 'tm.build_pattern', (['p', 'n'], {'shape': '(45, 45)', 'circle_mask': 'c', 'threshold': 't'}), '(p, n, shape=(45, 45), circle_mask=c, threshold=t)\n', (1408, 1458), True, 'import match.template_matching as tm\n'), ((429, 456), 'cv2.imread', 'cv.imread', (['"""screenshot.png"""'], {}), "('screenshot.png')\n", (438, 456), True, 'import cv2 as cv\n'), ((525, 530), 'mss.mss', 'mss', ([], {}), '()\n', (528, 530), False, 'from mss import mss\n'), ((699, 764), 'PIL.Image.frombytes', 'Image.frombytes', (['"""RGB"""', 'sct_img.size', 'sct_img.bgra', '"""raw"""', '"""BGRX"""'], {}), "('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')\n", (714, 764), False, 'from PIL import ImageGrab, Image\n')] |
"""
---
title: Deep Convolutional Generative Adversarial Networks (DCGAN)
summary: A simple PyTorch implementation/tutorial of Deep Convolutional Generative Adversarial Networks (DCGAN).
---
# Deep Convolutional Generative Adversarial Networks (DCGAN)
This is a [PyTorch](https://pytorch.org) implementation of paper
[Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks](https://papers.labml.ai/paper/1511.06434).
This implementation is based on the [PyTorch DCGAN Tutorial](https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html).
"""
import torch.nn as nn
from labml import experiment
from labml.configs import calculate
from labml_helpers.module import Module
from labml_nn.gan.original.experiment import Configs
class Generator(Module):
"""
### Convolutional Generator Network
This is similar to the de-convolutional network used for CelebA faces,
but modified for MNIST images.
<img src="https://pytorch.org/tutorials/_images/dcgan_generator.png" style="max-width:90%" />
"""
def __init__(self):
super().__init__()
# The input is $1 \times 1$ with 100 channels
self.layers = nn.Sequential(
# This gives $3 \times 3$ output
nn.ConvTranspose2d(100, 1024, 3, 1, 0, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(True),
# This gives $7 \times 7$
nn.ConvTranspose2d(1024, 512, 3, 2, 0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(True),
# This gives $14 \times 14$
nn.ConvTranspose2d(512, 256, 4, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(True),
# This gives $28 \times 28$
nn.ConvTranspose2d(256, 1, 4, 2, 1, bias=False),
nn.Tanh()
)
self.apply(_weights_init)
def forward(self, x):
# Change from shape `[batch_size, 100]` to `[batch_size, 100, 1, 1]`
x = x.unsqueeze(-1).unsqueeze(-1)
x = self.layers(x)
return x
class Discriminator(Module):
"""
### Convolutional Discriminator Network
"""
def __init__(self):
super().__init__()
# The input is $28 \times 28$ with one channel
self.layers = nn.Sequential(
# This gives $14 \times 14$
nn.Conv2d(1, 256, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# This gives $7 \times 7$
nn.Conv2d(256, 512, 4, 2, 1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
# This gives $3 \times 3$
nn.Conv2d(512, 1024, 3, 2, 0, bias=False),
nn.BatchNorm2d(1024),
nn.LeakyReLU(0.2, inplace=True),
# This gives $1 \times 1$
nn.Conv2d(1024, 1, 3, 1, 0, bias=False),
)
self.apply(_weights_init)
def forward(self, x):
x = self.layers(x)
return x.view(x.shape[0], -1)
def _weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
# We import the [simple gan experiment](../original/experiment.html) and change the
# generator and discriminator networks
calculate(Configs.generator, 'cnn', lambda c: Generator().to(c.device))
calculate(Configs.discriminator, 'cnn', lambda c: Discriminator().to(c.device))
def main():
conf = Configs()
experiment.create(name='mnist_dcgan')
experiment.configs(conf,
{'discriminator': 'cnn',
'generator': 'cnn',
'label_smoothing': 0.01})
with experiment.start():
conf.run()
if __name__ == '__main__':
main()
| [
"torch.nn.ConvTranspose2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"labml.experiment.start",
"torch.nn.Tanh",
"torch.nn.LeakyReLU",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"labml_nn.gan.original.experiment.Configs",
"labml.experiment.configs",
"labml.experiment.create",
"torch.nn.init.... | [((3588, 3597), 'labml_nn.gan.original.experiment.Configs', 'Configs', ([], {}), '()\n', (3595, 3597), False, 'from labml_nn.gan.original.experiment import Configs\n'), ((3602, 3639), 'labml.experiment.create', 'experiment.create', ([], {'name': '"""mnist_dcgan"""'}), "(name='mnist_dcgan')\n", (3619, 3639), False, 'from labml import experiment\n'), ((3644, 3743), 'labml.experiment.configs', 'experiment.configs', (['conf', "{'discriminator': 'cnn', 'generator': 'cnn', 'label_smoothing': 0.01}"], {}), "(conf, {'discriminator': 'cnn', 'generator': 'cnn',\n 'label_smoothing': 0.01})\n", (3662, 3743), False, 'from labml import experiment\n'), ((3108, 3149), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight.data', '(0.0)', '(0.02)'], {}), '(m.weight.data, 0.0, 0.02)\n', (3123, 3149), True, 'import torch.nn as nn\n'), ((3820, 3838), 'labml.experiment.start', 'experiment.start', ([], {}), '()\n', (3836, 3838), False, 'from labml import experiment\n'), ((1265, 1315), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(100)', '(1024)', '(3)', '(1)', '(0)'], {'bias': '(False)'}), '(100, 1024, 3, 1, 0, bias=False)\n', (1283, 1315), True, 'import torch.nn as nn\n'), ((1329, 1349), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1024)'], {}), '(1024)\n', (1343, 1349), True, 'import torch.nn as nn\n'), ((1363, 1376), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1370, 1376), True, 'import torch.nn as nn\n'), ((1428, 1478), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(1024)', '(512)', '(3)', '(2)', '(0)'], {'bias': '(False)'}), '(1024, 512, 3, 2, 0, bias=False)\n', (1446, 1478), True, 'import torch.nn as nn\n'), ((1492, 1511), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (1506, 1511), True, 'import torch.nn as nn\n'), ((1525, 1538), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1532, 1538), True, 'import torch.nn as nn\n'), ((1592, 1641), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(512)', '(256)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(512, 256, 4, 2, 1, bias=False)\n', (1610, 1641), True, 'import torch.nn as nn\n'), ((1655, 1674), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (1669, 1674), True, 'import torch.nn as nn\n'), ((1688, 1701), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1695, 1701), True, 'import torch.nn as nn\n'), ((1755, 1802), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(256)', '(1)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(256, 1, 4, 2, 1, bias=False)\n', (1773, 1802), True, 'import torch.nn as nn\n'), ((1816, 1825), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (1823, 1825), True, 'import torch.nn as nn\n'), ((2348, 2386), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(256)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(1, 256, 4, 2, 1, bias=False)\n', (2357, 2386), True, 'import torch.nn as nn\n'), ((2400, 2431), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2412, 2431), True, 'import torch.nn as nn\n'), ((2483, 2523), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(256, 512, 4, 2, 1, bias=False)\n', (2492, 2523), True, 'import torch.nn as nn\n'), ((2537, 2556), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (2551, 2556), True, 'import torch.nn as nn\n'), ((2570, 2601), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2582, 2601), True, 'import torch.nn as nn\n'), ((2653, 2694), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(1024)', '(3)', '(2)', '(0)'], {'bias': '(False)'}), '(512, 1024, 3, 2, 0, bias=False)\n', (2662, 2694), True, 'import torch.nn as nn\n'), ((2708, 2728), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1024)'], {}), '(1024)\n', (2722, 2728), True, 'import torch.nn as nn\n'), ((2742, 2773), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2754, 2773), True, 'import torch.nn as nn\n'), ((2825, 2864), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(1)', '(3)', '(1)', '(0)'], {'bias': '(False)'}), '(1024, 1, 3, 1, 0, bias=False)\n', (2834, 2864), True, 'import torch.nn as nn\n'), ((3202, 3243), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight.data', '(1.0)', '(0.02)'], {}), '(m.weight.data, 1.0, 0.02)\n', (3217, 3243), True, 'import torch.nn as nn\n'), ((3252, 3285), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias.data', '(0)'], {}), '(m.bias.data, 0)\n', (3269, 3285), True, 'import torch.nn as nn\n')] |
import os
import sys
import pandas as pd
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
def read_udp(file_path):
with open(file_path, "r") as f:
data_dict = {'send':{}, 'rec':{}}
data = pd.read_csv(file_path, sep=",", engine='python', error_bad_lines=False, skiprows=1)
data.columns=['mode', 'seq', 'stamp']
for index, row in data.iterrows():
data_dict[row['mode']][row['seq']] = row['stamp']
loss = 0
rtt_array = []
for k in data_dict['send'].keys():
try:
rtt = (data_dict['rec'][k] - data_dict['send'][k])*1000
rtt_array.append(rtt)
except KeyError:
loss += 1
total_send = data['seq'].iloc[-1]
loss_prob = loss/total_send
return loss_prob, rtt_array
DATADIR = "../data"
cluster_name = ["bbcluster", "erdc"]
figure, axes = plt.subplots(1, 2)
figure.suptitle("UDP")
PING = dict.fromkeys(cluster_name, {})
for i in range(2):
cluster = cluster_name[i]
cluster_dir = os.path.join(DATADIR, cluster)
axes[i].set_title(cluster)
data = []
labels = []
for root, dirs, files in os.walk(cluster_dir, topdown=False):
for file_name in files:
mode, stack = file_name.split("_")
if mode == "udp":
dst = stack.split(".")[0]
file_path = os.path.join(cluster_dir, file_name)
loss_prb, rtt_array = read_udp(file_path)
# print(rtt_array)
length = len(rtt_array)
# rtt_outliar_removal = np.sort(rtt_array)[0: round(length*0.999)]
# rtt_mean = np.mean(rtt_outliar_removal)
# rtt_std = np.sqrt(np.var(rtt_outliar_removal))
# rtt_conf = st.norm.interval(0.95, loc=rtt_mean, scale=rtt_std)
# PING[cluster][dst] = (rtt_mean, rtt_conf)
data.append(rtt_array)
labels.append(dst)
axes[i].boxplot(data, labels=labels, showfliers=True)
plt.savefig("udp-latency-nofilter.png")
| [
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"os.path.join",
"matplotlib.pyplot.subplots",
"os.walk"
] | [((914, 932), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (926, 932), True, 'import matplotlib.pyplot as plt\n'), ((2051, 2090), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""udp-latency-nofilter.png"""'], {}), "('udp-latency-nofilter.png')\n", (2062, 2090), True, 'import matplotlib.pyplot as plt\n'), ((1063, 1093), 'os.path.join', 'os.path.join', (['DATADIR', 'cluster'], {}), '(DATADIR, cluster)\n', (1075, 1093), False, 'import os\n'), ((1184, 1219), 'os.walk', 'os.walk', (['cluster_dir'], {'topdown': '(False)'}), '(cluster_dir, topdown=False)\n', (1191, 1219), False, 'import os\n'), ((236, 323), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'sep': '""","""', 'engine': '"""python"""', 'error_bad_lines': '(False)', 'skiprows': '(1)'}), "(file_path, sep=',', engine='python', error_bad_lines=False,\n skiprows=1)\n", (247, 323), True, 'import pandas as pd\n'), ((1401, 1437), 'os.path.join', 'os.path.join', (['cluster_dir', 'file_name'], {}), '(cluster_dir, file_name)\n', (1413, 1437), False, 'import os\n')] |
import re
from collections.abc import MutableMapping
from typing import Dict, List
import markovify
import nltk
class RangeDict(MutableMapping):
"""Enables a dictionary whose keys are ranges."""
def __init__(self, iterable: Dict):
if not isinstance(iterable, dict):
raise TypeError("You must pass a dictionary to RangeDict")
self.store = dict()
for (k, v) in iterable.items():
if not isinstance(k, range):
raise TypeError("Your dictionary keys must be ranges")
direction = {num: v for num in k}
self.store.update(direction)
def __getitem__(self, key):
return self.store[key]
def __setitem__(self, key, value):
self.store[key] = value
def __delitem__(self, key):
del self.store[key]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
class POSifiedText(markovify.Text):
"""
A markovify.Text model that obeys sentence structure better than the naive model.
Uses NLTK's part-of-speech tagger (nltk.pos_tag), which is VERY slow but seems to do a better
job of parsing my text corpora than spaCy, which would be faster.
"""
def word_split(self, sentence: str) -> List:
words = re.split(self.word_split_pattern, sentence)
words = ["::".join(tag) for tag in nltk.pos_tag(words)]
return words
def word_join(self, words: List) -> str:
sentence = " ".join(word.split("::")[0] for word in words)
return sentence
| [
"re.split",
"nltk.pos_tag"
] | [((1311, 1354), 're.split', 're.split', (['self.word_split_pattern', 'sentence'], {}), '(self.word_split_pattern, sentence)\n', (1319, 1354), False, 'import re\n'), ((1398, 1417), 'nltk.pos_tag', 'nltk.pos_tag', (['words'], {}), '(words)\n', (1410, 1417), False, 'import nltk\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : live_visualisation.py
# Author : <NAME> <<EMAIL>>
# Date : 10.04.2020
# Last Modified By: <NAME> <<EMAIL>>
from djitellopy.realtime_plot.RealtimePlotter import *
import redis
import numpy as np
import traceback
import matplotlib
# define data to get from db
# sensorMeshList = ['baro', 'h', 'tof', 'runtime']
# row = len(sensorMeshList)
data_len = 300
plot_update_interval = 0.005
datasource = redis.StrictRedis(host='localhost', port=6379, db=0)
plt.figure()
baro_axes = plt.subplot(3, 1, 1)
plt.title('tello_edu sensors')
baro_data_list = ['baro', 'runtime']
baro_ylim = [-47, -57]
baro_option = DataplotOption.TIMESTAMP_CUSTOM
baro_dataplot = DataPlot(2, data_len, option=baro_option)
baro_plot = RealtimePlotter(baro_dataplot)
baro_plot.config_plots(baro_axes, y_labels=baro_data_list, ylim=baro_ylim)
baro_plot.axes.set_xlabel('time in ms')
baro_plot.axes.set_ylabel('barometer in cmHg')
tof_axes = plt.subplot(3, 1, 2)
tof_data_list = ['tof', 'runtime']
tof_ylim = [-10, 500]
tof_option = DataplotOption.TIMESTAMP_CUSTOM
tof_dataplot = DataPlot(2, data_len, option=tof_option)
tof_plot = RealtimePlotter(tof_dataplot)
tof_plot.config_plots(tof_axes, y_labels=tof_data_list, ylim=tof_ylim)
tof_plot.axes.set_xlabel('time in ms')
tof_plot.axes.set_ylabel('vertical distance in cm')
h_axes = plt.subplot(3, 1, 3)
h_ylim = [-50, 300]
h_data_list = ['h', 'runtime']
h_option = DataplotOption.TIMESTAMP_CUSTOM
h_dataplot = DataPlot(2, data_len, option=h_option)
h_plot = RealtimePlotter(h_dataplot)
h_plot.config_plots(h_axes, y_labels=h_data_list, ylim=h_ylim)
h_plot.axes.set_xlabel('time in ms')
tof_plot.axes.set_ylabel('height in cm')
if __name__ == "__main__":
while True:
# get new data from database and plot
# baro
baro_plot.dataplot.clear_data_regs()
new_data = []
for sensor in baro_data_list:
new_sensor_data = datasource.lrange(sensor, 0, data_len)
# reverse, bc first element is the newest (not the oldest like deque)
new_sensor_data.reverse()
new_data.append(new_sensor_data)
try:
baro_y = np.array(new_data[:-1], dtype=np.float)
baro_x = np.array(new_data[-1], dtype=np.int64)
baro_plot.dataplot.append(
y=baro_y, x=baro_x, single=False)
baro_plot.plot_data()
except Exception as e:
print(e)
# tof
tof_plot.dataplot.clear_data_regs()
new_data = []
for sensor in tof_data_list:
new_sensor_data = datasource.lrange(sensor, 0, data_len)
# reverse, bc first element is the newest (not the oldest like deque)
new_sensor_data.reverse()
new_data.append(new_sensor_data)
try:
tof_y = np.array(new_data[:-1], dtype=np.float)
tof_x = np.array(new_data[-1], dtype=np.int64)
tof_plot.dataplot.append(
y=tof_y, x=tof_x, single=False)
tof_plot.plot_data()
except Exception as e:
print(e)
# height
h_plot.dataplot.clear_data_regs()
new_data = []
for sensor in h_data_list:
new_sensor_data = datasource.lrange(sensor, 0, data_len)
# reverse, bc first element is the newest (not the oldest like deque)
new_sensor_data.reverse()
new_data.append(new_sensor_data)
try:
h_y = np.array(new_data[:-1], dtype=np.float)
h_x = np.array(new_data[-1], dtype=np.int64)
h_plot.dataplot.append(
y=h_y, x=h_x, single=False)
h_plot.plot_data()
except Exception as e:
print(e)
plt.pause(plot_update_interval)
input("Exit(press any key)?")
| [
"numpy.array",
"redis.StrictRedis"
] | [((484, 536), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': '"""localhost"""', 'port': '(6379)', 'db': '(0)'}), "(host='localhost', port=6379, db=0)\n", (501, 536), False, 'import redis\n'), ((2214, 2253), 'numpy.array', 'np.array', (['new_data[:-1]'], {'dtype': 'np.float'}), '(new_data[:-1], dtype=np.float)\n', (2222, 2253), True, 'import numpy as np\n'), ((2275, 2313), 'numpy.array', 'np.array', (['new_data[-1]'], {'dtype': 'np.int64'}), '(new_data[-1], dtype=np.int64)\n', (2283, 2313), True, 'import numpy as np\n'), ((2873, 2912), 'numpy.array', 'np.array', (['new_data[:-1]'], {'dtype': 'np.float'}), '(new_data[:-1], dtype=np.float)\n', (2881, 2912), True, 'import numpy as np\n'), ((2933, 2971), 'numpy.array', 'np.array', (['new_data[-1]'], {'dtype': 'np.int64'}), '(new_data[-1], dtype=np.int64)\n', (2941, 2971), True, 'import numpy as np\n'), ((3524, 3563), 'numpy.array', 'np.array', (['new_data[:-1]'], {'dtype': 'np.float'}), '(new_data[:-1], dtype=np.float)\n', (3532, 3563), True, 'import numpy as np\n'), ((3582, 3620), 'numpy.array', 'np.array', (['new_data[-1]'], {'dtype': 'np.int64'}), '(new_data[-1], dtype=np.int64)\n', (3590, 3620), True, 'import numpy as np\n')] |
import unittest
from conans.errors import ConanException
from conans.model.username import Username
class UsernameTest(unittest.TestCase):
def username_test(self):
Username("userwith-hypens")
self.assertRaises(ConanException, Username, "")
self.assertRaises(ConanException, Username, "A"*31)
Username("A"*30)
self.assertRaises(ConanException, Username, "1A")
self.assertRaises(ConanException, Username, "_A")
Username("A1")
Username("a_")
self.assertRaises(ConanException, Username, "$$") | [
"conans.model.username.Username"
] | [((180, 207), 'conans.model.username.Username', 'Username', (['"""userwith-hypens"""'], {}), "('userwith-hypens')\n", (188, 207), False, 'from conans.model.username import Username\n'), ((332, 350), 'conans.model.username.Username', 'Username', (["('A' * 30)"], {}), "('A' * 30)\n", (340, 350), False, 'from conans.model.username import Username\n'), ((474, 488), 'conans.model.username.Username', 'Username', (['"""A1"""'], {}), "('A1')\n", (482, 488), False, 'from conans.model.username import Username\n'), ((497, 511), 'conans.model.username.Username', 'Username', (['"""a_"""'], {}), "('a_')\n", (505, 511), False, 'from conans.model.username import Username\n')] |
import torch
import hcat.lib.functional
from hcat.lib.functional import IntensityCellReject
from hcat.backends.backend import Backend
from hcat.models.r_unet import embed_model as RUnet
from hcat.train.transforms import median_filter, erosion
import hcat.lib.utils
from hcat.lib.utils import graceful_exit
import os.path
import wget
from typing import Dict, Optional
class SpatialEmbedding(Backend):
def __init__(self,
sigma: Optional[torch.Tensor] = torch.tensor([0.02, 0.02, 0.02]),
device: Optional[str] = 'cuda',
model_loc: Optional[str] = None,
postprocessing: Optional[bool] = True,
scale: Optional[int] = 25,
figure: Optional[str] = None,
archetecture: Optional[RUnet] = RUnet):
"""
Initialize Spatial embedding Algorithm.
:param sigma: torch.Tensor[sigma_x, sigma_y, sigma_z] values for gaussian probability estimation.
:param device: String value for torch device by which to run segmentation backbone on.
:param model_loc: Path to trained model files.
:param postprocessing: Disable segmentation postprocessing, namely
:param scale: scale factor based on max diameter of object
:param figure: filename and path of diagnostic figure which may be rendered
"""
super(SpatialEmbedding, self).__init__()
self.url = 'https://github.com/buswinka/hcat/blob/master/modelfiles/spatial_embedding.trch?raw=true'
# self.url = None
self.scale = torch.tensor(scale)
self.device = device
self.sigma = sigma.to(device)
self.postprocessing = postprocessing
self.figure = figure
if self.url:
self.model = self._model_loader_url(self.url, archetecture, device)
else:
self.model = self._model_loader_path(model_loc, archetecture, device)
self.vector_to_embedding = torch.jit.script(
hcat.lib.functional.VectorToEmbedding(scale=self.scale).requires_grad_(False).eval())
self.embedding_to_probability = torch.jit.script(
hcat.lib.functional.EmbeddingToProbability(scale=self.scale).requires_grad_(False).eval())
self.estimate_centroids = hcat.lib.functional.EstimateCentroids(scale=self.scale).requires_grad_(False)
self.filter = median_filter(kernel_targets=3, rate=1, device=device)
self.binary_erosion = erosion(device=device)
self.intensity_rejection = IntensityCellReject()
self.nms = hcat.lib.functional.nms().requires_grad_(False)
self.centroids = None
self.vec = None
self.embed = None
self.prob = None
@graceful_exit('\x1b[1;31;40m' + 'ERROR: Spatial Embedding Failed. Aborting...' + '\x1b[0m')
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Inputs an image and outputs a probability mask of everything seen in the image.
.. note::
Call the module as a function to execute this method (similar to torch.nn.module).
.. warning:
Will not raise an error upon failure, instead returns None and prints to standard out
Example:
>>> from hcat.backends.spatial_embedding import SpatialEmbedding
>>> import torch
>>> backend = SpatialEmbedding()
>>> image = torch.load('path/to/my/image.trch')
>>> assert image.ndim == 5 # Shape should be [B, C, X, Y, Z]
>>> masks = backend(image)
:param image: [B, C=4, X, Y, Z] input image
:return: [B, 1, X, Y, Z] output segmentation mask where each pixel value is a cell id (0 is background)
"""
assert image.ndim == 5
assert image.shape[1] == 1
assert image.min() >= -1
assert image.max() <= 1
# image = self.filter(image.to(self.device))
image = image.to(self.device)
b, c, x, y, z = image.shape
if self.image_reject and self._is_image_bad(image):
return torch.zeros((b, 0, x, y, z), device=self.device)
# Evaluate Neural Network Model
out: torch.Tensor = self.model(image)
# Assign Outputs
probability_map = out[:, [-1], ...]
out = out[:, 0:3:1, ...]
self.prob = probability_map.cpu()
self.vec = out.cpu()
out: torch.Tensor = self.vector_to_embedding(out)
self.embed = out.cpu()
centroids: Dict[str, torch.Tensor] = self.estimate_centroids(out, probability_map)
self.centroids = centroids
out: torch.Tensor = self.embedding_to_probability(out, centroids, self.sigma)
# Reject cell masks that overlap or meet min Myo7a criteria
if self.postprocessing:
out: torch.Tensor = self.intensity_rejection(out, image)
# print(centroids.shape, out.shape)
if out.numel() == 0:
return torch.zeros((b, 0, x, y, z), device=self.device)
ind = self.nms(out, 0.5)
out = out[:, ind, ...]
# Take probabilities and generate masks!
probability_map = probability_map.lt(0.8).squeeze(1)
for i in range(out.shape[1]):
out[:, i, ...][probability_map] = 0
self.zero_grad()
return out
def load(self, model_loc: str) -> None:
"""
Initializes model weights from a url or filepath.
Example:
>>> from hcat.backends.spatial_embedding import SpatialEmbedding
>>> backend = SpatialEmbedding()
>>>
>>> url = 'https://www.model_location.com/model.trch'
>>> backend.load(url) # Works with url
>>>
>>> model_path = 'path/to/my/model.trch'
>>> backend.load(model_path) # Also works with path
:param model_loc: url or filepath
:return: None
"""
if self._is_url(model_loc):
return self._model_loader_url(model_loc, RUnet(in_channels=1).requires_grad_(False), self.device)
else:
return self._model_loader_path(model_loc, RUnet(in_channels=1).requires_grad_(False), self.device)
| [
"hcat.lib.functional.IntensityCellReject",
"hcat.lib.utils.graceful_exit",
"hcat.models.r_unet.embed_model",
"hcat.train.transforms.erosion",
"torch.tensor",
"hcat.train.transforms.median_filter",
"torch.zeros"
] | [((2732, 2827), 'hcat.lib.utils.graceful_exit', 'graceful_exit', (["('\\x1b[1;31;40m' + 'ERROR: Spatial Embedding Failed. Aborting...' + '\\x1b[0m')"], {}), "('\\x1b[1;31;40m' +\n 'ERROR: Spatial Embedding Failed. Aborting...' + '\\x1b[0m')\n", (2745, 2827), False, 'from hcat.lib.utils import graceful_exit\n'), ((476, 508), 'torch.tensor', 'torch.tensor', (['[0.02, 0.02, 0.02]'], {}), '([0.02, 0.02, 0.02])\n', (488, 508), False, 'import torch\n'), ((1575, 1594), 'torch.tensor', 'torch.tensor', (['scale'], {}), '(scale)\n', (1587, 1594), False, 'import torch\n'), ((2385, 2439), 'hcat.train.transforms.median_filter', 'median_filter', ([], {'kernel_targets': '(3)', 'rate': '(1)', 'device': 'device'}), '(kernel_targets=3, rate=1, device=device)\n', (2398, 2439), False, 'from hcat.train.transforms import median_filter, erosion\n'), ((2470, 2492), 'hcat.train.transforms.erosion', 'erosion', ([], {'device': 'device'}), '(device=device)\n', (2477, 2492), False, 'from hcat.train.transforms import median_filter, erosion\n'), ((2529, 2550), 'hcat.lib.functional.IntensityCellReject', 'IntensityCellReject', ([], {}), '()\n', (2548, 2550), False, 'from hcat.lib.functional import IntensityCellReject\n'), ((4049, 4097), 'torch.zeros', 'torch.zeros', (['(b, 0, x, y, z)'], {'device': 'self.device'}), '((b, 0, x, y, z), device=self.device)\n', (4060, 4097), False, 'import torch\n'), ((4936, 4984), 'torch.zeros', 'torch.zeros', (['(b, 0, x, y, z)'], {'device': 'self.device'}), '((b, 0, x, y, z), device=self.device)\n', (4947, 4984), False, 'import torch\n'), ((5950, 5970), 'hcat.models.r_unet.embed_model', 'RUnet', ([], {'in_channels': '(1)'}), '(in_channels=1)\n', (5955, 5970), True, 'from hcat.models.r_unet import embed_model as RUnet\n'), ((6075, 6095), 'hcat.models.r_unet.embed_model', 'RUnet', ([], {'in_channels': '(1)'}), '(in_channels=1)\n', (6080, 6095), True, 'from hcat.models.r_unet import embed_model as RUnet\n')] |
from mortar_rdb import register_session, get_session
from mortar_rdb.interfaces import ISession
from testfixtures.components import TestComponents
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.session import Session
from sqlalchemy.schema import Column
from sqlalchemy.types import Integer, String
from threading import Thread
from testfixtures import (
ShouldRaise,compare,generator,Comparison as C, LogCapture
)
from unittest import TestCase
from zope.component import getSiteManager
from zope.component.interfaces import ComponentLookupError
import transaction
class TestUtility(TestCase):
def setUp(self):
self.components = TestComponents()
self.Base = declarative_base()
class Model(self.Base):
__tablename__ = 'model'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
self.Model = Model
def tearDown(self):
self.components.uninstall()
def test_how_to_create(self):
register_session('sqlite://', transactional=False)
# at this stage we have no tables
session = get_session()
session.add(self.Model(name='foo'))
# so we get an error
with ShouldRaise(OperationalError):
session.commit()
# ...which we then need to abort:
session.rollback()
# if we know we have no tables, we can do:
self.Base.metadata.create_all(session.bind)
# now we can commit:
session.add(self.Model(name='foo'))
session.commit()
# ...and get stuff back:
self.assertEqual(1,session.query(self.Model).count())
def test_get_in_view(self):
register_session('sqlite://')
register_session('sqlite://','foo')
# create the tables
session1 = get_session()
session2 = get_session('foo')
with transaction.manager:
self.Base.metadata.create_all(session1.bind)
self.Base.metadata.create_all(session2.bind)
# this is what you'd do in views:
session = get_session()
session.add(self.Model(id=1,name='foo'))
model1 = session.query(self.Model).one()
self.assertEqual(model1.id,1)
self.assertEqual(model1.name,'foo')
# or with a name...
session = get_session('foo')
session.add(self.Model(id=1,name='foo'))
model2 = session.query(self.Model).one()
self.assertEqual(model2.id,1)
self.assertEqual(model2.name,'foo')
# paranoia
self.assertFalse(model1 is model2)
def test_register(self):
register_session('sqlite://')
# create the tables
session = get_session()
self.Base.metadata.create_all(session.bind)
# check registrations
compare(generator(
C('zope.component.registry.UtilityRegistration',
component=C('sqlalchemy.orm.scoping.ScopedSession'),
factory=None,
info=u'',
name=u'',
provided=ISession,
registry=self.components.registry
)),self.components.registry.registeredUtilities())
# this is what get_session goes:
session = getSiteManager().getUtility(ISession)
session.add(self.Model(id=1,name='foo'))
model = session.query(self.Model).one()
self.assertEqual(model.id,1)
self.assertEqual(model.name,'foo')
def test_register_with_name(self):
register_session('sqlite://','foo')
# check registrations
compare(generator(
C('zope.component.registry.UtilityRegistration',
component=C('sqlalchemy.orm.scoping.ScopedSession'),
factory=None,
info=u'',
name=u'foo',
provided=ISession,
registry=self.components.registry
)),self.components.registry.registeredUtilities())
registry = getSiteManager()
# check we don't register with no name:
with ShouldRaise(ComponentLookupError(ISession, u'')):
registry.getUtility(ISession)
# check we do with the right name
self.assertTrue(isinstance(
registry.getUtility(ISession,'foo')(),
Session
))
def test_transaction(self):
register_session('sqlite://')
# functional
with transaction.manager:
session = get_session()
self.Base.metadata.create_all(session.bind)
session.add(self.Model(id=1,name='foo'))
compare(session.query(self.Model).count(), expected=1)
def test_transaction_no_session_usage(self):
register_session('sqlite://')
# functional
with transaction.manager:
session = get_session()
self.Base.metadata.create_all(session.bind)
session.execute(
self.Model.__table__.insert().values(name='test')
)
compare(session.query(self.Model).count(), expected=1)
def test_no_transaction(self):
register_session('sqlite://',transactional=False)
# functional
session = get_session()
self.Base.metadata.create_all(session.bind)
session.add(self.Model(id=1,name='foo'))
session.commit()
compare(session.query(self.Model).count(), expected=1)
def test_different_sessions_per_thread(self):
register_session('sqlite://')
class TestThread(Thread):
def run(self):
self.resulting_session = get_session()
t1 = TestThread()
t1.start()
t2 = TestThread()
t2.start()
t1.join()
t2.join()
self.assertNotEqual(
id(t1.resulting_session),
id(t2.resulting_session),
)
def test_different_sessions_when_async(self):
register_session('sqlite://',
scoped=False, transactional=False)
s1 = get_session()
s2 = get_session()
self.assertNotEqual(id(s1),id(s2))
def test_logging_functional(self):
with LogCapture() as l:
register_session('sqlite://')
l.check((
'mortar_rdb',
'INFO',
"Registering session for sqlite:// with name ''"
))
| [
"testfixtures.ShouldRaise",
"sqlalchemy.types.String",
"testfixtures.components.TestComponents",
"mortar_rdb.get_session",
"testfixtures.Comparison",
"zope.component.interfaces.ComponentLookupError",
"mortar_rdb.register_session",
"sqlalchemy.ext.declarative.declarative_base",
"testfixtures.LogCaptu... | [((725, 741), 'testfixtures.components.TestComponents', 'TestComponents', ([], {}), '()\n', (739, 741), False, 'from testfixtures.components import TestComponents\n'), ((762, 780), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (778, 780), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((1083, 1133), 'mortar_rdb.register_session', 'register_session', (['"""sqlite://"""'], {'transactional': '(False)'}), "('sqlite://', transactional=False)\n", (1099, 1133), False, 'from mortar_rdb import register_session, get_session\n'), ((1194, 1207), 'mortar_rdb.get_session', 'get_session', ([], {}), '()\n', (1205, 1207), False, 'from mortar_rdb import register_session, get_session\n'), ((1768, 1797), 'mortar_rdb.register_session', 'register_session', (['"""sqlite://"""'], {}), "('sqlite://')\n", (1784, 1797), False, 'from mortar_rdb import register_session, get_session\n'), ((1806, 1842), 'mortar_rdb.register_session', 'register_session', (['"""sqlite://"""', '"""foo"""'], {}), "('sqlite://', 'foo')\n", (1822, 1842), False, 'from mortar_rdb import register_session, get_session\n'), ((1890, 1903), 'mortar_rdb.get_session', 'get_session', ([], {}), '()\n', (1901, 1903), False, 'from mortar_rdb import register_session, get_session\n'), ((1923, 1941), 'mortar_rdb.get_session', 'get_session', (['"""foo"""'], {}), "('foo')\n", (1934, 1941), False, 'from mortar_rdb import register_session, get_session\n'), ((2159, 2172), 'mortar_rdb.get_session', 'get_session', ([], {}), '()\n', (2170, 2172), False, 'from mortar_rdb import register_session, get_session\n'), ((2400, 2418), 'mortar_rdb.get_session', 'get_session', (['"""foo"""'], {}), "('foo')\n", (2411, 2418), False, 'from mortar_rdb import register_session, get_session\n'), ((2708, 2737), 'mortar_rdb.register_session', 'register_session', (['"""sqlite://"""'], {}), "('sqlite://')\n", (2724, 2737), False, 'from mortar_rdb import register_session, get_session\n'), ((2785, 2798), 'mortar_rdb.get_session', 'get_session', ([], {}), '()\n', (2796, 2798), False, 'from mortar_rdb import register_session, get_session\n'), ((3639, 3675), 'mortar_rdb.register_session', 'register_session', (['"""sqlite://"""', '"""foo"""'], {}), "('sqlite://', 'foo')\n", (3655, 3675), False, 'from mortar_rdb import register_session, get_session\n'), ((4146, 4162), 'zope.component.getSiteManager', 'getSiteManager', ([], {}), '()\n', (4160, 4162), False, 'from zope.component import getSiteManager\n'), ((4544, 4573), 'mortar_rdb.register_session', 'register_session', (['"""sqlite://"""'], {}), "('sqlite://')\n", (4560, 4573), False, 'from mortar_rdb import register_session, get_session\n'), ((4921, 4950), 'mortar_rdb.register_session', 'register_session', (['"""sqlite://"""'], {}), "('sqlite://')\n", (4937, 4950), False, 'from mortar_rdb import register_session, get_session\n'), ((5332, 5382), 'mortar_rdb.register_session', 'register_session', (['"""sqlite://"""'], {'transactional': '(False)'}), "('sqlite://', transactional=False)\n", (5348, 5382), False, 'from mortar_rdb import register_session, get_session\n'), ((5430, 5443), 'mortar_rdb.get_session', 'get_session', ([], {}), '()\n', (5441, 5443), False, 'from mortar_rdb import register_session, get_session\n'), ((5706, 5735), 'mortar_rdb.register_session', 'register_session', (['"""sqlite://"""'], {}), "('sqlite://')\n", (5722, 5735), False, 'from mortar_rdb import register_session, get_session\n'), ((6168, 6232), 'mortar_rdb.register_session', 'register_session', (['"""sqlite://"""'], {'scoped': '(False)', 'transactional': '(False)'}), "('sqlite://', scoped=False, transactional=False)\n", (6184, 6232), False, 'from mortar_rdb import register_session, get_session\n'), ((6271, 6284), 'mortar_rdb.get_session', 'get_session', ([], {}), '()\n', (6282, 6284), False, 'from mortar_rdb import register_session, get_session\n'), ((6298, 6311), 'mortar_rdb.get_session', 'get_session', ([], {}), '()\n', (6309, 6311), False, 'from mortar_rdb import register_session, get_session\n'), ((866, 905), 'sqlalchemy.schema.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)'}), "('id', Integer, primary_key=True)\n", (872, 905), False, 'from sqlalchemy.schema import Column\n'), ((1294, 1323), 'testfixtures.ShouldRaise', 'ShouldRaise', (['OperationalError'], {}), '(OperationalError)\n', (1305, 1323), False, 'from testfixtures import ShouldRaise, compare, generator, Comparison as C, LogCapture\n'), ((4660, 4673), 'mortar_rdb.get_session', 'get_session', ([], {}), '()\n', (4671, 4673), False, 'from mortar_rdb import register_session, get_session\n'), ((5029, 5042), 'mortar_rdb.get_session', 'get_session', ([], {}), '()\n', (5040, 5042), False, 'from mortar_rdb import register_session, get_session\n'), ((6426, 6438), 'testfixtures.LogCapture', 'LogCapture', ([], {}), '()\n', (6436, 6438), False, 'from testfixtures import ShouldRaise, compare, generator, Comparison as C, LogCapture\n'), ((6457, 6486), 'mortar_rdb.register_session', 'register_session', (['"""sqlite://"""'], {}), "('sqlite://')\n", (6473, 6486), False, 'from mortar_rdb import register_session, get_session\n'), ((940, 950), 'sqlalchemy.types.String', 'String', (['(50)'], {}), '(50)\n', (946, 950), False, 'from sqlalchemy.types import Integer, String\n'), ((3367, 3383), 'zope.component.getSiteManager', 'getSiteManager', ([], {}), '()\n', (3381, 3383), False, 'from zope.component import getSiteManager\n'), ((4245, 4280), 'zope.component.interfaces.ComponentLookupError', 'ComponentLookupError', (['ISession', 'u""""""'], {}), "(ISession, u'')\n", (4265, 4280), False, 'from zope.component.interfaces import ComponentLookupError\n'), ((5839, 5852), 'mortar_rdb.get_session', 'get_session', ([], {}), '()\n', (5850, 5852), False, 'from mortar_rdb import register_session, get_session\n'), ((3010, 3051), 'testfixtures.Comparison', 'C', (['"""sqlalchemy.orm.scoping.ScopedSession"""'], {}), "('sqlalchemy.orm.scoping.ScopedSession')\n", (3011, 3051), True, 'from testfixtures import ShouldRaise, compare, generator, Comparison as C, LogCapture\n'), ((3826, 3867), 'testfixtures.Comparison', 'C', (['"""sqlalchemy.orm.scoping.ScopedSession"""'], {}), "('sqlalchemy.orm.scoping.ScopedSession')\n", (3827, 3867), True, 'from testfixtures import ShouldRaise, compare, generator, Comparison as C, LogCapture\n')] |
"""GageRnR.
The input data should be structured
in a 3d array n[i,j,k] where
i = operator, j = part, k = measurement
Stored to file this data would look:
m1 m2 m3
3.29; 3.41; 3.64 # p1 | o1
2.44; 2.32; 2.42 # p2
3.08; 3.25; 3.07 # p1 | o2
2.53; 1.78; 2.32 # p2
3.04; 2.89; 2.85 # p1 | o3
1.62; 1.87; 2.04 # p2
More info: https://github.com/owodunni/GageRnR
Usage:
GageRnR -f FILE -s STRUCTURE [-a <AXES>] [-d <DELIMITER>] [-o <FOLDER>] [-g <PARTS>]
GageRnR -h | --help
GageRnR -v | --version
Examples:
GageRnR -f data.csv -s5,7,11 -o report
GageRnR -f data/data_mXop.csv -s 3,5,11 -o outDir
GageRnR -f data/data_opXm.csv -s 5,7,11 -a 2,1,0 -o outDir
GageRnR -f data/data_demoGRnR.csv -s 3,10,3 -a 0,2,1 -g 40,42,30,43,29,45,27.5,42,26,35 -o outDir
Options:
-f --file=FILE Load input data.
-s --structure=STRUCTURE Data structure.
Order should be operators, parts, measurements.
-a --axes=<AXES> Order of data axes [default: 0,1,2].
-d --delimiter=<DELIMITER> Order of data axes [default: ;].
-o --output=<FOLDER> Report output directory
-g --groundTruth=<PARTS> Ground Truth data for parts
-h --help Show this screen.
-v --version Show version.
"""
from docopt import docopt
import os.path
import GageRnR
from .reportGenerator import ReportGenerator
def toInt(values):
return [int(v) for v in values.split(',')]
def toFloat(values):
return [float(v) for v in values.split(',')]
def positiveIntegers(values, minValue):
for value in values:
if value < minValue:
return False
return True
def checkIntegerList(name, values, minValue=0):
if(len(values) != 3):
raise AttributeError(name, " can only have three values.")
if(not positiveIntegers(values, minValue)):
raise AttributeError(name, " can only be positive integers.")
class Application():
def __init__(self, argv=None):
arguments = docopt(__doc__, argv, version=GageRnR.__version__)
self.file = str(arguments["--file"])
self.structure = toInt(arguments["--structure"])
self.axes = toInt(arguments["--axes"])
self.delimiter = str(arguments["--delimiter"])
if(arguments["--groundTruth"] is not None):
self.gt = toFloat(arguments["--groundTruth"])
if(arguments["--output"] is not None):
self.outputFolder = arguments["--output"]
def check(self):
if not os.path.isfile(self.file):
raise FileNotFoundError(self.file)
checkIntegerList("Structure", self.structure, 1)
checkIntegerList("Axes", self.axes)
def run(self):
loader = GageRnR.DataLoader()
data = loader.load(
file=self.file,
structure=self.structure,
axes=self.axes,
delimiter=self.delimiter)
g = GageRnR.GageRnR(data)
g.calculate()
s = GageRnR.Statistics(data)
s.calculate()
n = GageRnR.Normality(data)
n.calculate()
if hasattr(self, 'gt'):
lin = GageRnR.Linearity(data=data, partGt=self.gt)
lin.calculate()
if not hasattr(self, 'outputFolder'):
return
rg = ReportGenerator(self.outputFolder)
rg.addTitle(g.title)
rg.addDoc(g)
rg.addTable(g.summary(tableFormat="html"))
rg.addTitle(s.title)
rg.addDoc(s)
rg.addTable(s.summary(tableFormat="html"))
rg.addPlot(s.createPartsBoxPlot(), 'Parts Box Plot')
rg.addPlot(s.createOperatorsBoxPlot(), 'Operators Box Plot')
rg.addTitle(n.title)
rg.addDoc(n)
rg.addTable(n.summary(tableFormat="html"))
if hasattr(self, 'gt'):
rg.addTitle(lin.title)
rg.addDoc(lin)
rg.addTable(lin.summary(tableFormat="html"))
rg.addPlot(lin.createLinearityPlot(), 'Residual Linearity Plot')
rg.generateReport()
print("Report written to: " + self.outputFolder)
| [
"GageRnR.Statistics",
"GageRnR.GageRnR",
"GageRnR.DataLoader",
"GageRnR.Linearity",
"GageRnR.Normality",
"docopt.docopt"
] | [((1960, 2010), 'docopt.docopt', 'docopt', (['__doc__', 'argv'], {'version': 'GageRnR.__version__'}), '(__doc__, argv, version=GageRnR.__version__)\n', (1966, 2010), False, 'from docopt import docopt\n'), ((2677, 2697), 'GageRnR.DataLoader', 'GageRnR.DataLoader', ([], {}), '()\n', (2695, 2697), False, 'import GageRnR\n'), ((2871, 2892), 'GageRnR.GageRnR', 'GageRnR.GageRnR', (['data'], {}), '(data)\n', (2886, 2892), False, 'import GageRnR\n'), ((2928, 2952), 'GageRnR.Statistics', 'GageRnR.Statistics', (['data'], {}), '(data)\n', (2946, 2952), False, 'import GageRnR\n'), ((2988, 3011), 'GageRnR.Normality', 'GageRnR.Normality', (['data'], {}), '(data)\n', (3005, 3011), False, 'import GageRnR\n'), ((3085, 3129), 'GageRnR.Linearity', 'GageRnR.Linearity', ([], {'data': 'data', 'partGt': 'self.gt'}), '(data=data, partGt=self.gt)\n', (3102, 3129), False, 'import GageRnR\n')] |
import pytest
from wikidict.render import parse_word
from wikidict.utils import process_templates
@pytest.mark.parametrize(
"word, pronunciations, gender, etymology, definitions",
[
("ababalhar", [], "", ["De baba."], ["<i>(popular)</i> babar; conspurcar"]),
(
"alguém",
["aw.ˈgẽj"],
"",
["Do latim <i>alĭquem</i> <sup>(la)</sup>."],
["pessoa não identificada"],
),
(
"algo",
[],
"",
[],
["um pouco, de certo modo", "objeto (não-identificado) de que se fala"],
),
(
"baiano",
[],
"",
["Derivado de Bahia, mais o sufixo ano, com perda do H."],
[
"do Estado da Bahia, Brasil",
"natural ou habitante do Estado da Bahia, Brasil",
"<i>(São Paulo, Brasil; popular; pejorativo)</i> pessoa que se veste de maneira incomum ou brega; fora da moda", # noqa
],
),
(
"cabrum",
[],
"mf",
['Do latim <i>caprunu</i> <sup>(la)</sup> "cabra".'],
[
"<i>(Pecuária)</i> de cabras:",
"<i>(Regionalismo, Brasil)</i> marido de mulher adúltera",
"indica estrondo",
],
),
(
"COPOM",
[],
"m",
[],
[
"<b>C</b>entro de <b>O</b>perações da <b>Po</b>lícia <b>M</b>ilitar",
"<i>(Brasil)</i> <b>Co</b>mitê de <b>Po</b>lítica <b>M</b>onetária",
],
),
(
"dezassete",
[],
"",
["Contração do latim vulgar <i>decem</i> + <i>ac</i> + <i>septem</i>."],
[
"o número dezassete (17, XVII)",
"nota correspondente a dezassete valores",
"pessoa ou coisa que apresenta o número dezassete numa ordenação",
"vide dezessete",
],
),
(
"etc",
[],
"",
[],
[
'abreviação do latim <i>et cetera</i>, que significa "e outros", "e os restantes" e "e outras coisas mais"', # noqa
],
),
(
"-ista",
[],
"",
[
"Do grego antigo <i>-ιστεσ</i> (<i>-istes</i>) através do latim <i>-ista</i> através do francês antigo <i>-iste</i>." # noqa
],
[
"que segue um princípio",
"que é estudioso ou profissional de um assunto",
"que usa algo",
"que tem uma visão preconceituosa",
],
),
(
"neo-",
[],
"",
["Do grego antigo <i>νέος</i>."],
[
"exprime a ideia de <i>novo</i>",
"<b>Nota:</b> Liga-se por hífen ao morfema seguinte quando este começa por <b>vogal</b>, <b>h</b>, <b>r</b> ou <b>s</b>.", # noqa
],
),
("para", [], "", [], ["exprime fim, destino, lugar, tempo, direção etc"]),
(
"paulista",
[],
"",
[],
[
"diz-se de pessoa de origem do Estado de São Paulo, Brasil",
"diz-se de artigo ou objeto do Estado de São Paulo",
"pessoa de origem do Estado de São Paulo, Brasil",
"artigo ou objeto do Estado de São Paulo",
],
),
("tenui-", [], "", [], ["variante ortográfica de <b>tenu-</b>"]),
(
"to",
[],
"",
[],
[
'<i>(antigo)</i> contração do pronome pessoal "te" com o pronome pessoal ou demonstrativo "o"',
"<i>(coloquial e Brasil)</i> forma aferética (muito comum na linguagem falada) de estou",
],
),
(
"ũa",
[],
"",
[
"Do Latim <i>una-</i>: <i>una-</i> deu <b>ũa</b> por queda do <b>n</b> com a nasalação do <b>ũ</b>."
],
["ortografia antiga de uma"],
),
("UTC", [], "", [], ["<i>(estrangeirismo)</i> ver TUC"]),
],
)
def test_parse_word(word, pronunciations, gender, etymology, definitions, page):
"""Test the sections finder and definitions getter."""
code = page(word, "pt")
details = parse_word(word, code, "pt", force=True)
assert pronunciations == details.pronunciations
assert gender == details.gender
assert etymology == details.etymology
assert definitions == details.definitions
@pytest.mark.parametrize(
"wikicode, expected",
[
("{{AFI|/k/|pt}}", "/k/"),
("{{barra de cor|yellow|#FFFF00}}", "[RGB #FFFF00]"),
("{{escopo2|Informática}}", "<i>(Informática)</i>"),
("{{escopo2|Brasil|governo}}", "<i>(Brasil)</i>"),
("{{escopoCat|Árvore|pt}}", "<i>(Botânica)</i>"),
("{{escopoCat|Náutica|pt}}", "<i>(Náutica)</i>"),
("{{escopoCatLang|Alimentação|pt}}", "<i>(Culinária)</i>"),
("{{escopoCatLang|Verbo auxiliar|pt}}", "<i>(Verbo auxiliar)</i>"),
("{{escopoUso|Portugal|pt}}", "<i>(Portugal)</i>"),
("{{escopoUso|Coloquialismo|pt}}", "<i>(coloquialismo)</i>"),
("{{fem|heliostático}}", "feminino de <b>heliostático</b>"),
("{{fl|la|occŭlo}}", "occŭlo"),
("{{l|pt|usar|usar}}", "usar"),
("{{l.o.|jurídico|jurídica}}", "jurídica"),
("{{l.s.|uso}}", "uso"),
("{{link preto|ciconiforme}}", "ciconiforme"),
("{{ll|publicar}}", "publicar"),
("{{m|ar|شيشة|tr=šīša}}", "<i>masculino</i>"),
("{{mq|palavra}}", "o mesmo que <b>palavra</b>"),
("{{mq|word|en}}", "o mesmo que <i>word</i>"),
("{{PE|cu}}", "cu <sup>(português de Portugal)</sup>"),
("{{r|la|basium|basĭum}}", "basĭum"),
("{{r.l|la|utor|ūtor}}", "ūtor"),
("{{varort|tenu-|pt}}", "variante ortográfica de <b>tenu-</b>"),
],
)
def test_process_templates(wikicode, expected):
"""Test templates handling."""
assert process_templates("foo", wikicode, "pt") == expected
| [
"pytest.mark.parametrize",
"wikidict.render.parse_word",
"wikidict.utils.process_templates"
] | [((102, 2951), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""word, pronunciations, gender, etymology, definitions"""', '[(\'ababalhar\', [], \'\', [\'De baba.\'], [\'<i>(popular)</i> babar; conspurcar\']\n ), (\'alguém\', [\'aw.ˈgẽj\'], \'\', [\n \'Do latim <i>alĭquem</i> <sup>(la)</sup>.\'], [\'pessoa não identificada\'\n ]), (\'algo\', [], \'\', [], [\'um pouco, de certo modo\',\n \'objeto (não-identificado) de que se fala\']), (\'baiano\', [], \'\', [\n \'Derivado de Bahia, mais o sufixo ano, com perda do H.\'], [\n \'do Estado da Bahia, Brasil\',\n \'natural ou habitante do Estado da Bahia, Brasil\',\n \'<i>(São Paulo, Brasil; popular; pejorativo)</i> pessoa que se veste de maneira incomum ou brega; fora da moda\'\n ]), (\'cabrum\', [], \'mf\', [\n \'Do latim <i>caprunu</i> <sup>(la)</sup> "cabra".\'], [\n \'<i>(Pecuária)</i> de cabras:\',\n \'<i>(Regionalismo, Brasil)</i> marido de mulher adúltera\',\n \'indica estrondo\']), (\'COPOM\', [], \'m\', [], [\n \'<b>C</b>entro de <b>O</b>perações da <b>Po</b>lícia <b>M</b>ilitar\',\n \'<i>(Brasil)</i> <b>Co</b>mitê de <b>Po</b>lítica <b>M</b>onetária\']),\n (\'dezassete\', [], \'\', [\n \'Contração do latim vulgar <i>decem</i> + <i>ac</i> + <i>septem</i>.\'],\n [\'o número dezassete (17, XVII)\',\n \'nota correspondente a dezassete valores\',\n \'pessoa ou coisa que apresenta o número dezassete numa ordenação\',\n \'vide dezessete\']), (\'etc\', [], \'\', [], [\n \'abreviação do latim <i>et cetera</i>, que significa "e outros", "e os restantes" e "e outras coisas mais"\'\n ]), (\'-ista\', [], \'\', [\n \'Do grego antigo <i>-ιστεσ</i> (<i>-istes</i>) através do latim <i>-ista</i> através do francês antigo <i>-iste</i>.\'\n ], [\'que segue um princípio\',\n \'que é estudioso ou profissional de um assunto\', \'que usa algo\',\n \'que tem uma visão preconceituosa\']), (\'neo-\', [], \'\', [\n \'Do grego antigo <i>νέος</i>.\'], [\'exprime a ideia de <i>novo</i>\',\n \'<b>Nota:</b> Liga-se por hífen ao morfema seguinte quando este começa por <b>vogal</b>, <b>h</b>, <b>r</b> ou <b>s</b>.\'\n ]), (\'para\', [], \'\', [], [\n \'exprime fim, destino, lugar, tempo, direção etc\']), (\'paulista\', [],\n \'\', [], [\'diz-se de pessoa de origem do Estado de São Paulo, Brasil\',\n \'diz-se de artigo ou objeto do Estado de São Paulo\',\n \'pessoa de origem do Estado de São Paulo, Brasil\',\n \'artigo ou objeto do Estado de São Paulo\']), (\'tenui-\', [], \'\', [], [\n \'variante ortográfica de <b>tenu-</b>\']), (\'to\', [], \'\', [], [\n \'<i>(antigo)</i> contração do pronome pessoal "te" com o pronome pessoal ou demonstrativo "o"\'\n ,\n \'<i>(coloquial e Brasil)</i> forma aferética (muito comum na linguagem falada) de estou\'\n ]), (\'ũa\', [], \'\', [\n \'Do Latim <i>una-</i>: <i>una-</i> deu <b>ũa</b> por queda do <b>n</b> com a nasalação do <b>ũ</b>.\'\n ], [\'ortografia antiga de uma\']), (\'UTC\', [], \'\', [], [\n \'<i>(estrangeirismo)</i> ver TUC\'])]'], {}), '(\'word, pronunciations, gender, etymology, definitions\',\n [(\'ababalhar\', [], \'\', [\'De baba.\'], [\n \'<i>(popular)</i> babar; conspurcar\']), (\'alguém\', [\'aw.ˈgẽj\'], \'\', [\n \'Do latim <i>alĭquem</i> <sup>(la)</sup>.\'], [\'pessoa não identificada\'\n ]), (\'algo\', [], \'\', [], [\'um pouco, de certo modo\',\n \'objeto (não-identificado) de que se fala\']), (\'baiano\', [], \'\', [\n \'Derivado de Bahia, mais o sufixo ano, com perda do H.\'], [\n \'do Estado da Bahia, Brasil\',\n \'natural ou habitante do Estado da Bahia, Brasil\',\n \'<i>(São Paulo, Brasil; popular; pejorativo)</i> pessoa que se veste de maneira incomum ou brega; fora da moda\'\n ]), (\'cabrum\', [], \'mf\', [\n \'Do latim <i>caprunu</i> <sup>(la)</sup> "cabra".\'], [\n \'<i>(Pecuária)</i> de cabras:\',\n \'<i>(Regionalismo, Brasil)</i> marido de mulher adúltera\',\n \'indica estrondo\']), (\'COPOM\', [], \'m\', [], [\n \'<b>C</b>entro de <b>O</b>perações da <b>Po</b>lícia <b>M</b>ilitar\',\n \'<i>(Brasil)</i> <b>Co</b>mitê de <b>Po</b>lítica <b>M</b>onetária\']),\n (\'dezassete\', [], \'\', [\n \'Contração do latim vulgar <i>decem</i> + <i>ac</i> + <i>septem</i>.\'],\n [\'o número dezassete (17, XVII)\',\n \'nota correspondente a dezassete valores\',\n \'pessoa ou coisa que apresenta o número dezassete numa ordenação\',\n \'vide dezessete\']), (\'etc\', [], \'\', [], [\n \'abreviação do latim <i>et cetera</i>, que significa "e outros", "e os restantes" e "e outras coisas mais"\'\n ]), (\'-ista\', [], \'\', [\n \'Do grego antigo <i>-ιστεσ</i> (<i>-istes</i>) através do latim <i>-ista</i> através do francês antigo <i>-iste</i>.\'\n ], [\'que segue um princípio\',\n \'que é estudioso ou profissional de um assunto\', \'que usa algo\',\n \'que tem uma visão preconceituosa\']), (\'neo-\', [], \'\', [\n \'Do grego antigo <i>νέος</i>.\'], [\'exprime a ideia de <i>novo</i>\',\n \'<b>Nota:</b> Liga-se por hífen ao morfema seguinte quando este começa por <b>vogal</b>, <b>h</b>, <b>r</b> ou <b>s</b>.\'\n ]), (\'para\', [], \'\', [], [\n \'exprime fim, destino, lugar, tempo, direção etc\']), (\'paulista\', [],\n \'\', [], [\'diz-se de pessoa de origem do Estado de São Paulo, Brasil\',\n \'diz-se de artigo ou objeto do Estado de São Paulo\',\n \'pessoa de origem do Estado de São Paulo, Brasil\',\n \'artigo ou objeto do Estado de São Paulo\']), (\'tenui-\', [], \'\', [], [\n \'variante ortográfica de <b>tenu-</b>\']), (\'to\', [], \'\', [], [\n \'<i>(antigo)</i> contração do pronome pessoal "te" com o pronome pessoal ou demonstrativo "o"\'\n ,\n \'<i>(coloquial e Brasil)</i> forma aferética (muito comum na linguagem falada) de estou\'\n ]), (\'ũa\', [], \'\', [\n \'Do Latim <i>una-</i>: <i>una-</i> deu <b>ũa</b> por queda do <b>n</b> com a nasalação do <b>ũ</b>.\'\n ], [\'ortografia antiga de uma\']), (\'UTC\', [], \'\', [], [\n \'<i>(estrangeirismo)</i> ver TUC\'])])\n', (125, 2951), False, 'import pytest\n'), ((4779, 6054), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""wikicode, expected"""', "[('{{AFI|/k/|pt}}', '/k/'), ('{{barra de cor|yellow|#FFFF00}}',\n '[RGB #FFFF00]'), ('{{escopo2|Informática}}', '<i>(Informática)</i>'),\n ('{{escopo2|Brasil|governo}}', '<i>(Brasil)</i>'), (\n '{{escopoCat|Árvore|pt}}', '<i>(Botânica)</i>'), (\n '{{escopoCat|Náutica|pt}}', '<i>(Náutica)</i>'), (\n '{{escopoCatLang|Alimentação|pt}}', '<i>(Culinária)</i>'), (\n '{{escopoCatLang|Verbo auxiliar|pt}}', '<i>(Verbo auxiliar)</i>'), (\n '{{escopoUso|Portugal|pt}}', '<i>(Portugal)</i>'), (\n '{{escopoUso|Coloquialismo|pt}}', '<i>(coloquialismo)</i>'), (\n '{{fem|heliostático}}', 'feminino de <b>heliostático</b>'), (\n '{{fl|la|occŭlo}}', 'occŭlo'), ('{{l|pt|usar|usar}}', 'usar'), (\n '{{l.o.|jurídico|jurídica}}', 'jurídica'), ('{{l.s.|uso}}', 'uso'), (\n '{{link preto|ciconiforme}}', 'ciconiforme'), ('{{ll|publicar}}',\n 'publicar'), ('{{m|ar|شيشة|tr=šīša}}', '<i>masculino</i>'), (\n '{{mq|palavra}}', 'o mesmo que <b>palavra</b>'), ('{{mq|word|en}}',\n 'o mesmo que <i>word</i>'), ('{{PE|cu}}',\n 'cu <sup>(português de Portugal)</sup>'), ('{{r|la|basium|basĭum}}',\n 'basĭum'), ('{{r.l|la|utor|ūtor}}', 'ūtor'), ('{{varort|tenu-|pt}}',\n 'variante ortográfica de <b>tenu-</b>')]"], {}), "('wikicode, expected', [('{{AFI|/k/|pt}}', '/k/'), (\n '{{barra de cor|yellow|#FFFF00}}', '[RGB #FFFF00]'), (\n '{{escopo2|Informática}}', '<i>(Informática)</i>'), (\n '{{escopo2|Brasil|governo}}', '<i>(Brasil)</i>'), (\n '{{escopoCat|Árvore|pt}}', '<i>(Botânica)</i>'), (\n '{{escopoCat|Náutica|pt}}', '<i>(Náutica)</i>'), (\n '{{escopoCatLang|Alimentação|pt}}', '<i>(Culinária)</i>'), (\n '{{escopoCatLang|Verbo auxiliar|pt}}', '<i>(Verbo auxiliar)</i>'), (\n '{{escopoUso|Portugal|pt}}', '<i>(Portugal)</i>'), (\n '{{escopoUso|Coloquialismo|pt}}', '<i>(coloquialismo)</i>'), (\n '{{fem|heliostático}}', 'feminino de <b>heliostático</b>'), (\n '{{fl|la|occŭlo}}', 'occŭlo'), ('{{l|pt|usar|usar}}', 'usar'), (\n '{{l.o.|jurídico|jurídica}}', 'jurídica'), ('{{l.s.|uso}}', 'uso'), (\n '{{link preto|ciconiforme}}', 'ciconiforme'), ('{{ll|publicar}}',\n 'publicar'), ('{{m|ar|شيشة|tr=šīša}}', '<i>masculino</i>'), (\n '{{mq|palavra}}', 'o mesmo que <b>palavra</b>'), ('{{mq|word|en}}',\n 'o mesmo que <i>word</i>'), ('{{PE|cu}}',\n 'cu <sup>(português de Portugal)</sup>'), ('{{r|la|basium|basĭum}}',\n 'basĭum'), ('{{r.l|la|utor|ūtor}}', 'ūtor'), ('{{varort|tenu-|pt}}',\n 'variante ortográfica de <b>tenu-</b>')])\n", (4802, 6054), False, 'import pytest\n'), ((4559, 4599), 'wikidict.render.parse_word', 'parse_word', (['word', 'code', '"""pt"""'], {'force': '(True)'}), "(word, code, 'pt', force=True)\n", (4569, 4599), False, 'from wikidict.render import parse_word\n'), ((6269, 6309), 'wikidict.utils.process_templates', 'process_templates', (['"""foo"""', 'wikicode', '"""pt"""'], {}), "('foo', wikicode, 'pt')\n", (6286, 6309), False, 'from wikidict.utils import process_templates\n')] |
"""
These tests require an AWS account to be set up, but don't require any manual
intervention beyond some initial setup. Also, these tests create instances (which cost
money!). Either `meadowrun-manage install` needs to be set up, or `meadowrun-manage
clean` needs to be run periodically
"""
import asyncio
import datetime
import io
import pprint
import threading
import uuid
import boto3
import fabric
import pytest
import meadowrun.aws_integration.management_lambdas.adjust_ec2_instances as adjust_ec2_instances # noqa: E501
from basics import BasicsSuite, HostProvider, ErrorsSuite, MapSuite
from instance_registrar_suite import (
InstanceRegistrarProvider,
InstanceRegistrarSuite,
TERMINATE_INSTANCES_IF_IDLE_FOR_TEST,
)
from meadowrun.aws_integration.aws_core import _get_default_region_name
from meadowrun.aws_integration.ec2_instance_allocation import EC2InstanceRegistrar
from meadowrun.aws_integration.ec2_pricing import _get_ec2_instance_types
from meadowrun.aws_integration.ec2_ssh_keys import ensure_meadowrun_key_pair
from meadowrun.aws_integration.grid_tasks_sqs import (
_add_tasks,
_complete_task,
_create_queues_for_job,
_get_task,
get_results,
worker_loop,
)
from meadowrun.instance_allocation import InstanceRegistrar
from meadowrun.instance_selection import choose_instance_types_for_job, Resources
from meadowrun.meadowrun_pb2 import ProcessState
from meadowrun.run_job import AllocCloudInstance
from meadowrun.run_job_core import Host, JobCompletion, CloudProviderType
# TODO don't always run tests in us-east-2
REGION = "us-east-2"
class AwsHostProvider(HostProvider):
def get_host(self) -> Host:
return AllocCloudInstance(1, 2, 80, "EC2", REGION)
def get_test_repo_url(self) -> str:
return "https://github.com/meadowdata/test_repo"
async def get_log_file_text(self, job_completion: JobCompletion) -> str:
with fabric.Connection(
job_completion.public_address,
user="ubuntu",
connect_kwargs={"pkey": ensure_meadowrun_key_pair(REGION)},
) as conn:
with io.BytesIO() as local_copy:
conn.get(job_completion.log_file_name, local_copy)
return local_copy.getvalue().decode("utf-8")
class TestBasicsAws(AwsHostProvider, BasicsSuite):
pass
class TestErrorsAws(AwsHostProvider, ErrorsSuite):
pass
class TestMapAws(MapSuite):
def cloud_provider(self) -> CloudProviderType:
return "EC2"
class EC2InstanceRegistrarProvider(InstanceRegistrarProvider[InstanceRegistrar]):
async def get_instance_registrar(self) -> InstanceRegistrar:
return EC2InstanceRegistrar(await _get_default_region_name(), "create")
async def deregister_instance(
self,
instance_registrar: InstanceRegistrar,
public_address: str,
require_no_running_jobs: bool,
) -> bool:
return adjust_ec2_instances._deregister_ec2_instance(
public_address,
require_no_running_jobs,
instance_registrar.get_region_name(),
)
async def num_currently_running_instances(
self, instance_registrar: InstanceRegistrar
) -> int:
ec2 = boto3.resource("ec2", region_name=instance_registrar.get_region_name())
return sum(1 for _ in adjust_ec2_instances._get_running_instances(ec2))
async def run_adjust(self, instance_registrar: InstanceRegistrar) -> None:
adjust_ec2_instances._deregister_and_terminate_instances(
instance_registrar.get_region_name(),
TERMINATE_INSTANCES_IF_IDLE_FOR_TEST,
datetime.timedelta.min,
)
async def terminate_all_instances(
self, instance_registrar: InstanceRegistrar
) -> None:
adjust_ec2_instances.terminate_all_instances(
instance_registrar.get_region_name()
)
def cloud_provider(self) -> CloudProviderType:
return "EC2"
class TestEC2InstanceRegistrar(EC2InstanceRegistrarProvider, InstanceRegistrarSuite):
pass
@pytest.mark.asyncio
async def test_get_ec2_instance_types():
# This function makes a lot of assumptions about the format of the data we get from
# various AWS endpoints, good to check that everything works. Look for unexpected
# warnings!
instance_types = await _get_ec2_instance_types(REGION)
# the actual number of instance types will fluctuate based on AWS' whims.
assert len(instance_types) > 600
chosen_instance_types = choose_instance_types_for_job(
Resources(5, 3, {}), 52, 10, instance_types
)
total_cpu = sum(
instance_type.instance_type.logical_cpu * instance_type.num_instances
for instance_type in chosen_instance_types
)
assert total_cpu >= 3 * 52
total_memory_gb = sum(
instance_type.instance_type.memory_gb * instance_type.num_instances
for instance_type in chosen_instance_types
)
assert total_memory_gb >= 5 * 52
assert all(
instance_type.instance_type.interruption_probability <= 10
for instance_type in chosen_instance_types
)
pprint.pprint(chosen_instance_types)
chosen_instance_types = choose_instance_types_for_job(
Resources(24000, 1000, {}), 1, 10, instance_types
)
assert len(chosen_instance_types) == 0
class TestGridTaskQueue:
def test_grid_task_queue(self):
"""
Tests the grid_task_queue functions without actually running any tasks. Uses SQS
resources.
"""
region_name = asyncio.run(_get_default_region_name())
task_arguments = ["hello", ("hey", "there"), {"a": 1}]
# dummy variables
job_id = str(uuid.uuid4())
public_address = "foo"
worker_id = 1
request_queue_url, result_queue_url = asyncio.run(
_create_queues_for_job(job_id, region_name)
)
# get results in a different thread as we're adding/completing tasks
results = None
def get_results_thread():
nonlocal results
results = asyncio.run(
get_results(result_queue_url, region_name, len(task_arguments), 1)
)
results_thread = threading.Thread(target=get_results_thread)
results_thread.start()
# add some tasks
asyncio.run(_add_tasks(request_queue_url, region_name, task_arguments))
# get some tasks and complete them
task1 = _get_task(
request_queue_url,
result_queue_url,
region_name,
0,
public_address,
worker_id,
)
assert task1 is not None
task2 = _get_task(
request_queue_url,
result_queue_url,
region_name,
0,
public_address,
worker_id,
)
assert task2 is not None
_complete_task(
result_queue_url,
region_name,
task1,
ProcessState(
state=ProcessState.ProcessStateEnum.SUCCEEDED,
pickled_result=task1.pickled_function_arguments,
),
public_address,
worker_id,
)
task3 = _get_task(
request_queue_url,
result_queue_url,
region_name,
0,
public_address,
worker_id,
)
assert task3 is not None
# there should be no more tasks to get
assert (
_get_task(
request_queue_url,
result_queue_url,
region_name,
0,
public_address,
worker_id,
)
is None
)
_complete_task(
result_queue_url,
region_name,
task2,
ProcessState(
state=ProcessState.ProcessStateEnum.SUCCEEDED,
pickled_result=task2.pickled_function_arguments,
),
public_address,
worker_id,
)
_complete_task(
result_queue_url,
region_name,
task3,
ProcessState(
state=ProcessState.ProcessStateEnum.SUCCEEDED,
pickled_result=task3.pickled_function_arguments,
),
public_address,
worker_id,
)
results_thread.join()
assert results == task_arguments
def test_worker_loop(self):
region_name = asyncio.run(_get_default_region_name())
task_arguments = [1, 2, 3, 4]
# dummy variables
job_id = str(uuid.uuid4())
public_address = "foo"
worker_id = 1
request_queue_url, result_queue_url = asyncio.run(
_create_queues_for_job(job_id, region_name)
)
# get results on another thread
results = None
def get_results_thread():
nonlocal results
results = asyncio.run(
get_results(result_queue_url, region_name, len(task_arguments), 1)
)
results_thread = threading.Thread(target=get_results_thread)
results_thread.start()
# add tasks
asyncio.run(_add_tasks(request_queue_url, region_name, task_arguments))
# start a worker_loop which will get tasks and complete them
worker_thread = threading.Thread(
target=lambda: worker_loop(
lambda x: x**x,
request_queue_url,
result_queue_url,
region_name,
public_address,
worker_id,
)
)
worker_thread.start()
results_thread.join()
worker_thread.join()
assert results == [1, 4, 27, 256]
| [
"meadowrun.aws_integration.ec2_pricing._get_ec2_instance_types",
"meadowrun.meadowrun_pb2.ProcessState",
"meadowrun.aws_integration.grid_tasks_sqs.worker_loop",
"meadowrun.aws_integration.ec2_ssh_keys.ensure_meadowrun_key_pair",
"meadowrun.aws_integration.management_lambdas.adjust_ec2_instances._get_running... | [((5127, 5163), 'pprint.pprint', 'pprint.pprint', (['chosen_instance_types'], {}), '(chosen_instance_types)\n', (5140, 5163), False, 'import pprint\n'), ((1683, 1726), 'meadowrun.run_job.AllocCloudInstance', 'AllocCloudInstance', (['(1)', '(2)', '(80)', '"""EC2"""', 'REGION'], {}), "(1, 2, 80, 'EC2', REGION)\n", (1701, 1726), False, 'from meadowrun.run_job import AllocCloudInstance\n'), ((4334, 4365), 'meadowrun.aws_integration.ec2_pricing._get_ec2_instance_types', '_get_ec2_instance_types', (['REGION'], {}), '(REGION)\n', (4357, 4365), False, 'from meadowrun.aws_integration.ec2_pricing import _get_ec2_instance_types\n'), ((4549, 4568), 'meadowrun.instance_selection.Resources', 'Resources', (['(5)', '(3)', '{}'], {}), '(5, 3, {})\n', (4558, 4568), False, 'from meadowrun.instance_selection import choose_instance_types_for_job, Resources\n'), ((5232, 5258), 'meadowrun.instance_selection.Resources', 'Resources', (['(24000)', '(1000)', '{}'], {}), '(24000, 1000, {})\n', (5241, 5258), False, 'from meadowrun.instance_selection import choose_instance_types_for_job, Resources\n'), ((6215, 6258), 'threading.Thread', 'threading.Thread', ([], {'target': 'get_results_thread'}), '(target=get_results_thread)\n', (6231, 6258), False, 'import threading\n'), ((6456, 6549), 'meadowrun.aws_integration.grid_tasks_sqs._get_task', '_get_task', (['request_queue_url', 'result_queue_url', 'region_name', '(0)', 'public_address', 'worker_id'], {}), '(request_queue_url, result_queue_url, region_name, 0,\n public_address, worker_id)\n', (6465, 6549), False, 'from meadowrun.aws_integration.grid_tasks_sqs import _add_tasks, _complete_task, _create_queues_for_job, _get_task, get_results, worker_loop\n'), ((6678, 6771), 'meadowrun.aws_integration.grid_tasks_sqs._get_task', '_get_task', (['request_queue_url', 'result_queue_url', 'region_name', '(0)', 'public_address', 'worker_id'], {}), '(request_queue_url, result_queue_url, region_name, 0,\n public_address, worker_id)\n', (6687, 6771), False, 'from meadowrun.aws_integration.grid_tasks_sqs import _add_tasks, _complete_task, _create_queues_for_job, _get_task, get_results, worker_loop\n'), ((7228, 7321), 'meadowrun.aws_integration.grid_tasks_sqs._get_task', '_get_task', (['request_queue_url', 'result_queue_url', 'region_name', '(0)', 'public_address', 'worker_id'], {}), '(request_queue_url, result_queue_url, region_name, 0,\n public_address, worker_id)\n', (7237, 7321), False, 'from meadowrun.aws_integration.grid_tasks_sqs import _add_tasks, _complete_task, _create_queues_for_job, _get_task, get_results, worker_loop\n'), ((9129, 9172), 'threading.Thread', 'threading.Thread', ([], {'target': 'get_results_thread'}), '(target=get_results_thread)\n', (9145, 9172), False, 'import threading\n'), ((5560, 5586), 'meadowrun.aws_integration.aws_core._get_default_region_name', '_get_default_region_name', ([], {}), '()\n', (5584, 5586), False, 'from meadowrun.aws_integration.aws_core import _get_default_region_name\n'), ((5699, 5711), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5709, 5711), False, 'import uuid\n'), ((5838, 5881), 'meadowrun.aws_integration.grid_tasks_sqs._create_queues_for_job', '_create_queues_for_job', (['job_id', 'region_name'], {}), '(job_id, region_name)\n', (5860, 5881), False, 'from meadowrun.aws_integration.grid_tasks_sqs import _add_tasks, _complete_task, _create_queues_for_job, _get_task, get_results, worker_loop\n'), ((6336, 6394), 'meadowrun.aws_integration.grid_tasks_sqs._add_tasks', '_add_tasks', (['request_queue_url', 'region_name', 'task_arguments'], {}), '(request_queue_url, region_name, task_arguments)\n', (6346, 6394), False, 'from meadowrun.aws_integration.grid_tasks_sqs import _add_tasks, _complete_task, _create_queues_for_job, _get_task, get_results, worker_loop\n'), ((6994, 7107), 'meadowrun.meadowrun_pb2.ProcessState', 'ProcessState', ([], {'state': 'ProcessState.ProcessStateEnum.SUCCEEDED', 'pickled_result': 'task1.pickled_function_arguments'}), '(state=ProcessState.ProcessStateEnum.SUCCEEDED, pickled_result=\n task1.pickled_function_arguments)\n', (7006, 7107), False, 'from meadowrun.meadowrun_pb2 import ProcessState\n'), ((7510, 7603), 'meadowrun.aws_integration.grid_tasks_sqs._get_task', '_get_task', (['request_queue_url', 'result_queue_url', 'region_name', '(0)', 'public_address', 'worker_id'], {}), '(request_queue_url, result_queue_url, region_name, 0,\n public_address, worker_id)\n', (7519, 7603), False, 'from meadowrun.aws_integration.grid_tasks_sqs import _add_tasks, _complete_task, _create_queues_for_job, _get_task, get_results, worker_loop\n'), ((7851, 7964), 'meadowrun.meadowrun_pb2.ProcessState', 'ProcessState', ([], {'state': 'ProcessState.ProcessStateEnum.SUCCEEDED', 'pickled_result': 'task2.pickled_function_arguments'}), '(state=ProcessState.ProcessStateEnum.SUCCEEDED, pickled_result=\n task2.pickled_function_arguments)\n', (7863, 7964), False, 'from meadowrun.meadowrun_pb2 import ProcessState\n'), ((8179, 8292), 'meadowrun.meadowrun_pb2.ProcessState', 'ProcessState', ([], {'state': 'ProcessState.ProcessStateEnum.SUCCEEDED', 'pickled_result': 'task3.pickled_function_arguments'}), '(state=ProcessState.ProcessStateEnum.SUCCEEDED, pickled_result=\n task3.pickled_function_arguments)\n', (8191, 8292), False, 'from meadowrun.meadowrun_pb2 import ProcessState\n'), ((8536, 8562), 'meadowrun.aws_integration.aws_core._get_default_region_name', '_get_default_region_name', ([], {}), '()\n', (8560, 8562), False, 'from meadowrun.aws_integration.aws_core import _get_default_region_name\n'), ((8650, 8662), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8660, 8662), False, 'import uuid\n'), ((8789, 8832), 'meadowrun.aws_integration.grid_tasks_sqs._create_queues_for_job', '_create_queues_for_job', (['job_id', 'region_name'], {}), '(job_id, region_name)\n', (8811, 8832), False, 'from meadowrun.aws_integration.grid_tasks_sqs import _add_tasks, _complete_task, _create_queues_for_job, _get_task, get_results, worker_loop\n'), ((9245, 9303), 'meadowrun.aws_integration.grid_tasks_sqs._add_tasks', '_add_tasks', (['request_queue_url', 'region_name', 'task_arguments'], {}), '(request_queue_url, region_name, task_arguments)\n', (9255, 9303), False, 'from meadowrun.aws_integration.grid_tasks_sqs import _add_tasks, _complete_task, _create_queues_for_job, _get_task, get_results, worker_loop\n'), ((2113, 2125), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2123, 2125), False, 'import io\n'), ((2686, 2712), 'meadowrun.aws_integration.aws_core._get_default_region_name', '_get_default_region_name', ([], {}), '()\n', (2710, 2712), False, 'from meadowrun.aws_integration.aws_core import _get_default_region_name\n'), ((3321, 3369), 'meadowrun.aws_integration.management_lambdas.adjust_ec2_instances._get_running_instances', 'adjust_ec2_instances._get_running_instances', (['ec2'], {}), '(ec2)\n', (3364, 3369), True, 'import meadowrun.aws_integration.management_lambdas.adjust_ec2_instances as adjust_ec2_instances\n'), ((9444, 9554), 'meadowrun.aws_integration.grid_tasks_sqs.worker_loop', 'worker_loop', (['(lambda x: x ** x)', 'request_queue_url', 'result_queue_url', 'region_name', 'public_address', 'worker_id'], {}), '(lambda x: x ** x, request_queue_url, result_queue_url,\n region_name, public_address, worker_id)\n', (9455, 9554), False, 'from meadowrun.aws_integration.grid_tasks_sqs import _add_tasks, _complete_task, _create_queues_for_job, _get_task, get_results, worker_loop\n'), ((2041, 2074), 'meadowrun.aws_integration.ec2_ssh_keys.ensure_meadowrun_key_pair', 'ensure_meadowrun_key_pair', (['REGION'], {}), '(REGION)\n', (2066, 2074), False, 'from meadowrun.aws_integration.ec2_ssh_keys import ensure_meadowrun_key_pair\n')] |
# SPDX-FileCopyrightText: 2021 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: MIT
import os
import shutil
import subprocess
from termcolor import cprint
from trace_for_guess.skip import skip
def rescale_file(in_file, out_file, template_file, alg):
"""Regrid a NetCDF file using NCO (i.e. the ncremap command).
Args:
in_file: Path of input file.
out_file: Output file path. It will not be overwritten.
template_file: Path to a NetCDF file that has the desired grid
resolution.
alg: ESMF regrid algorithm. See here:
http://www.earthsystemmodeling.org/esmf_releases/public/ESMF_6_3_0rp1/ESMF_refdoc/node3.html#SECTION03020000000000000000
Returns:
The output file (`out_file`).
Raises:
FileNotFoundError: If `in_file` or `template_file` doesn’t exist.
RuntimeError: The `cdo` command is not in the PATH.
RuntimeError: The `ncremap` command failed or produced no output
file.
"""
if not os.path.isfile(in_file):
raise FileNotFoundError("Input file doesn’t exist: '%s'" % in_file)
if not os.path.isfile(template_file):
raise FileNotFoundError("Template file doesn’t exist: '%s'" %
template_file)
if skip([in_file, template_file], out_file):
return out_file
if shutil.which("ncremap") is None:
raise RuntimeError("Executable `ncremap` not found.")
cprint("Regridding '%s'..." % in_file, 'yellow')
try:
subprocess.run(["ncremap",
"--algorithm=%s" % alg,
"--template_file=%s" % template_file,
"--input_file=%s" % in_file,
"--output_file=%s" % out_file], check=True)
except Exception:
if os.path.isfile(out_file):
cprint(f"Removing file '{out_file}'.", 'red')
os.remove(out_file)
raise
if not os.path.isfile(out_file):
raise RuntimeError("Regridding with `ncremap` failed: No output file "
"created.")
cprint(f"Successfully created '{out_file}'.", 'green')
return out_file
| [
"trace_for_guess.skip.skip",
"subprocess.run",
"shutil.which",
"os.path.isfile",
"termcolor.cprint",
"os.remove"
] | [((1280, 1320), 'trace_for_guess.skip.skip', 'skip', (['[in_file, template_file]', 'out_file'], {}), '([in_file, template_file], out_file)\n', (1284, 1320), False, 'from trace_for_guess.skip import skip\n'), ((1452, 1500), 'termcolor.cprint', 'cprint', (['("Regridding \'%s\'..." % in_file)', '"""yellow"""'], {}), '("Regridding \'%s\'..." % in_file, \'yellow\')\n', (1458, 1500), False, 'from termcolor import cprint\n'), ((2098, 2152), 'termcolor.cprint', 'cprint', (['f"""Successfully created \'{out_file}\'."""', '"""green"""'], {}), '(f"Successfully created \'{out_file}\'.", \'green\')\n', (2104, 2152), False, 'from termcolor import cprint\n'), ((1013, 1036), 'os.path.isfile', 'os.path.isfile', (['in_file'], {}), '(in_file)\n', (1027, 1036), False, 'import os\n'), ((1125, 1154), 'os.path.isfile', 'os.path.isfile', (['template_file'], {}), '(template_file)\n', (1139, 1154), False, 'import os\n'), ((1353, 1376), 'shutil.which', 'shutil.which', (['"""ncremap"""'], {}), "('ncremap')\n", (1365, 1376), False, 'import shutil\n'), ((1518, 1687), 'subprocess.run', 'subprocess.run', (["['ncremap', '--algorithm=%s' % alg, '--template_file=%s' % template_file, \n '--input_file=%s' % in_file, '--output_file=%s' % out_file]"], {'check': '(True)'}), "(['ncremap', '--algorithm=%s' % alg, '--template_file=%s' %\n template_file, '--input_file=%s' % in_file, '--output_file=%s' %\n out_file], check=True)\n", (1532, 1687), False, 'import subprocess\n'), ((1950, 1974), 'os.path.isfile', 'os.path.isfile', (['out_file'], {}), '(out_file)\n', (1964, 1974), False, 'import os\n'), ((1809, 1833), 'os.path.isfile', 'os.path.isfile', (['out_file'], {}), '(out_file)\n', (1823, 1833), False, 'import os\n'), ((1847, 1892), 'termcolor.cprint', 'cprint', (['f"""Removing file \'{out_file}\'."""', '"""red"""'], {}), '(f"Removing file \'{out_file}\'.", \'red\')\n', (1853, 1892), False, 'from termcolor import cprint\n'), ((1905, 1924), 'os.remove', 'os.remove', (['out_file'], {}), '(out_file)\n', (1914, 1924), False, 'import os\n')] |
from typing import Union
import yaml
class ConfigReader:
def __init__(self):
with open("config.yml", "r") as f:
data = yaml.safe_load(f)
self.data = data
def __getattr__(self, __name: str):
s = __name.split("_")
data = self.data
try:
for i in s:
data = data[i]
return data
except KeyError:
raise Exception("Can't find object")
class TextReader:
def __init__(self):
with open("text.yml", "r") as f:
data = yaml.safe_load(f)
self.data = data
def __getattr__(self, __name: str):
s = __name.split("_")
data = self.data
try:
for i in s:
data = data[i]
return data
except KeyError:
raise Exception("Can't find object")
def find(self, string: str) -> Union[str, list]:
s = string.split("_")
data = self.data
try:
for i in s:
data = data[i]
return data
except KeyError:
raise Exception("Can't find object")
| [
"yaml.safe_load"
] | [((146, 163), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (160, 163), False, 'import yaml\n'), ((556, 573), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (570, 573), False, 'import yaml\n')] |