id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11562591
|
import unittest
from pyvalidator.is_ethereum_address import is_ethereum_address
from . import print_test_ok
class TestIsEthereumAddress(unittest.TestCase):
def test_valid_ethereum_address(self):
for i in [
'0x0000000000000000000000000000000000000001',
'0x683E07492fBDfDA84457C16546ac3f433BFaa128',
'0x88dA6B6a8D3590e88E0FcadD5CEC56A7C9478319',
'0x8a718a84ee7B1621E63E680371e0C03C417cCaF6',
'<KEY>',
]:
self.assertTrue(is_ethereum_address(i))
print_test_ok()
def test_invalid_ethereum_address(self):
for i in [
'0xGHIJK05pwm37asdf5555QWERZCXV2345AoEuIdHt',
'<KEY>',
'<KEY>',
'0b0110100001100101011011000110110001101111',
'<KEY>',
'1C6o5CDkLxjsVpnLSuqRs1UBFozXLEwYvU',
]:
self.assertFalse(is_ethereum_address(i))
print_test_ok()
|
11562635
|
import io
import numpy as np
from PIL import Image
from ppadb.client import Client
from src.data.constants import SCREENSHOT_WIDTH, SCREENSHOT_HEIGHT
class Screen:
def __init__(self):
self.client = Client(host='127.0.0.1', port=5037)
self.device = self.client.device('emulator-5554')
def take_screenshot(self):
"""
Take a screenshot of the emulator
"""
screenshot = self.device.screencap()
screenshot = io.BytesIO(screenshot)
screenshot = Image.open(screenshot).convert('RGB')
screenshot = screenshot.resize((SCREENSHOT_WIDTH, SCREENSHOT_HEIGHT), Image.BILINEAR)
return screenshot
def click(self, x, y):
"""
Click at the given (x, y) coordinate
"""
self.device.input_tap(x, y)
def main():
cls = Screen()
screenshot = cls.take_screenshot()
print(np.array(screenshot).shape)
screenshot.save('screen.jpg')
if __name__ == '__main__':
main()
|
11562651
|
from datetime import date, datetime
from typing import Union
def convert_to_datetime(d: Union[datetime, date]) -> datetime:
if isinstance(d, datetime):
return d
return datetime.combine(d, datetime.min.time())
|
11562663
|
import glob
import importlib
import os
import Utils
__all__ = ['cases']
cases = {}
for __file in glob.glob(os.path.join(os.path.dirname(__file__), '*.py')):
__name = os.path.basename(__file)
if not __name.startswith('_'):
__module = importlib.import_module(__name__ + '.' + __name[:-3])
__module_desc = __module.__doc__.strip() if __module.__doc__ else ''
for name in dir(__module):
obj = getattr(__module, name)
if isinstance(obj, type) and issubclass(obj, Utils.TestRun) and obj != Utils.TestRun:
cases[name] = (obj, obj.__doc__.strip() if obj.__doc__ else '')
|
11562720
|
import numpy as np
import sophus as sp
import matplotlib.pyplot as plt
DEFAULT_AXIS_LENGTH = 0.1
class SceneViz:
def __init__(self):
self.fig = plt.figure()
self.ax = plt.axes(projection="3d")
self.max = np.zeros(3)
self.min = np.zeros(3)
def _update_limits(self, x):
self.max = np.max([self.max, x], axis=0)
self.min = np.min([self.min, x], axis=0)
def _draw_lines(self, starts, ends, color):
for s, e in zip(starts, ends):
self._update_limits(s)
self._update_limits(e)
self.ax.plot([s[0], e[0]], [s[1], e[1]], [s[2], e[2]], color=color)
def draw_axes(self, pose, length=DEFAULT_AXIS_LENGTH):
o_0 = length * np.array([0, 0, 0])
x_0 = length * np.array([1, 0, 0])
y_0 = length * np.array([0, 1, 0])
z_0 = length * np.array([0, 0, 1])
o = pose * o_0
x = pose * x_0
y = pose * y_0
z = pose * z_0
self._draw_lines([o], [x], color="r")
self._draw_lines([o], [y], color="g")
self._draw_lines([o], [z], color="b")
def draw_camera(self, pose, size, color="grey", axes=True):
# Draw a pyramid representing a camera
b0_0 = size * np.array([0.5, 0.5, 0])
b1_0 = size * np.array([0.5, -0.5, 0])
b2_0 = size * np.array([-0.5, -0.5, 0])
b3_0 = size * np.array([-0.5, 0.5, 0])
t_0 = size * np.array([0, 0, -1])
b0 = pose * b0_0
b1 = pose * b1_0
b2 = pose * b2_0
b3 = pose * b3_0
t = pose * t_0
starts = [b0, b1, b2, b3, b0, b1, b2, b3]
ends = [b1, b2, b3, b0, t, t, t, t]
self._draw_lines(starts, ends, color)
# Draw camera axes
if axes:
self.draw_axes(pose, length=size / 2.0)
def draw_marker(self, pose, id, length, color="k", show_id=False):
# Draw marker outline
c0_0 = 0.5 * length * np.array([1, 1, 0])
c1_0 = 0.5 * length * np.array([-1, 1, 0])
c2_0 = 0.5 * length * np.array([-1, -1, 0])
c3_0 = 0.5 * length * np.array([1, -1, 0])
c0 = pose * c0_0
c1 = pose * c1_0
c2 = pose * c2_0
c3 = pose * c3_0
starts = [c0, c1, c2, c3]
ends = [c1, c2, c3, c0]
self._draw_lines(starts, ends, color)
# Draw marker ID
if show_id:
pos = pose.translation()
self.ax.text(pos[0], pos[1], pos[2], id, color="b")
def show(self):
# Set limits
mid = (self.max + self.min) / 2.0
r = max(np.max(self.max - mid), np.max(mid - self.min))
self.ax.set_xlim(mid[0] - r, mid[0] + r)
self.ax.set_ylim(mid[1] - r, mid[1] + r)
self.ax.set_zlim(mid[2] - r, mid[2] + r)
# Show
plt.show()
|
11562723
|
import json
from django.conf import settings
from django.contrib.auth.models import Permission
from django.core.exceptions import PermissionDenied
from django.http import JsonResponse
from django.shortcuts import redirect
from django.urls import path, include, reverse
from django.utils.html import format_html, escapejs
from django.utils.translation import gettext as _, gettext_lazy as __
from django.views.i18n import JavaScriptCatalog
from wagtail.admin.action_menu import ActionMenuItem
from wagtail.admin.menu import MenuItem
from wagtail.admin.staticfiles import versioned_static
from wagtail.core import hooks
from . import views
from .compat import DATE_FORMAT
from .models import AbTest
from .utils import request_is_trackable
@hooks.register("register_admin_urls")
def register_admin_urls():
urls = [
path('jsi18n/', JavaScriptCatalog.as_view(packages=['wagtail_ab_testing']), name='javascript_catalog'),
path('add/<int:page_id>/compare/', views.add_compare, name='add_ab_test_compare'),
path('<int:page_id>/compare-draft/', views.compare_draft, name='compare_draft'),
path('add/<int:page_id>/', views.add_form, name='add_ab_test_form'),
path('report/', views.AbTestingReportView.as_view(), name='report'),
path('results/<int:page_id>/<int:ab_test_id>/', views.results, name='results'),
]
return [
path(
"abtests/",
include(
(urls, "wagtail_ab_testing_admin"),
namespace="wagtail_ab_testing_admin",
),
)
]
class CreateAbTestActionMenuItem(ActionMenuItem):
name = 'create-ab-test'
label = __("Save and create A/B Test")
icon_name = 'people-arrows'
def is_shown(self, request, context):
if context['view'] != 'edit':
return False
# User must have permission to add A/B tests
if not request.user.has_perm('wagtail_ab_testing.add_abtest'):
return False
return True
@hooks.register('register_page_action_menu_item')
def register_create_abtest_action_menu_item():
return CreateAbTestActionMenuItem(order=100)
# This is the only way to inject custom JS into the editor with knowledge of the page being edited
class AbTestingTabActionMenuItem(ActionMenuItem):
def render_html(self, request, context):
if 'page' in context:
return format_html(
'<script src="{}"></script><script src="{}"></script><script>window.abTestingTabProps = JSON.parse("{}");</script>',
reverse('wagtail_ab_testing_admin:javascript_catalog'),
versioned_static('wagtail_ab_testing/js/wagtail-ab-testing.js'),
escapejs(json.dumps({
'tests': [
{
'id': ab_test.id,
'name': ab_test.name,
'started_at': ab_test.first_started_at.strftime(DATE_FORMAT) if ab_test.first_started_at else _("Not started"),
'status': ab_test.get_status_description(),
'results_url': reverse('wagtail_ab_testing_admin:results', args=[ab_test.page_id, ab_test.id]),
}
for ab_test in AbTest.objects.filter(page=context['page']).order_by('-id')
],
'can_create_abtest': request.user.has_perm('wagtail_ab_testing.add_abtest'),
}))
)
return ''
@hooks.register('register_page_action_menu_item')
def register_ab_testing_tab_action_menu_item():
return AbTestingTabActionMenuItem()
@hooks.register('after_edit_page')
def redirect_to_create_ab_test(request, page):
if 'create-ab-test' in request.POST:
return redirect('wagtail_ab_testing_admin:add_ab_test_compare', page.id)
@hooks.register('before_edit_page')
def check_for_running_ab_test(request, page):
running_experiment = AbTest.objects.get_current_for_page(page=page)
if running_experiment:
return views.progress(request, page, running_experiment)
@hooks.register('before_serve_page')
def before_serve_page(page, request, serve_args, serve_kwargs):
# Check if the user is trackable
if not request_is_trackable(request):
return
# Check for a running A/B test on the requested page
try:
test = AbTest.objects.get(page=page, status=AbTest.STATUS_RUNNING)
except AbTest.DoesNotExist:
return
# Save reference to test on request object so it can be found by the {% wagtail_ab_testing_script %} template tag
request.wagtail_ab_testing_test = test
# If this request is coming from a frontend worker, return both the control and variant versions
# The worker will decide which version to serve to the user
if request.META.get('HTTP_X_REQUESTED_WITH') == 'WagtailAbTestingWorker':
if request.META.get('HTTP_AUTHORIZATION', '') != 'Token ' + settings.WAGTAIL_AB_TESTING_WORKER_TOKEN:
raise PermissionDenied
control_response = page.serve(request, *serve_args, **serve_kwargs)
# Note: we must render the control response before setting `wagtail_ab_testing_serving_variant`
if hasattr(control_response, "render"):
control_response.render()
request.wagtail_ab_testing_serving_variant = True
variant_response = test.variant_revision.as_page_object().serve(request, *serve_args, **serve_kwargs)
if hasattr(variant_response, "render"):
variant_response.render()
response = JsonResponse({
'control': control_response.content.decode('utf-8'),
'variant': variant_response.content.decode('utf-8'),
})
response['X-WagtailAbTesting-Test'] = str(test.id)
return response
# If the user visiting is a participant, show them the same version they saw before
if f'wagtail-ab-testing_{test.id}_version' in request.COOKIES:
version = request.COOKIES[f'wagtail-ab-testing_{test.id}_version']
else:
# Otherwise, show them the version of the page that the next participant should see.
# Note: In order to exclude bots, the browser must call a JavaScript API to sign up as a participant
# Once they've signed up, they'll get a cookie which keeps them on the same version
version = test.get_new_participant_version()
# If the user should be shown the variant, serve that from the revision. Otherwise return to keep the control
if version == AbTest.VERSION_VARIANT:
request.wagtail_ab_testing_serving_variant = True
return test.variant_revision.as_page_object().serve(request, *serve_args, **serve_kwargs)
class AbTestingReportMenuItem(MenuItem):
def is_shown(self, request):
return True
@hooks.register('register_reports_menu_item')
def register_ab_testing_report_menu_item():
return AbTestingReportMenuItem(_('A/B testing'), reverse('wagtail_ab_testing_admin:report'), icon_name='people-arrows', order=1000)
@hooks.register('register_icons')
def register_icons(icons):
icons.append('wagtail_ab_testing/icons/people-arrows.svg')
icons.append('wagtail_ab_testing/icons/crown.svg')
return icons
@hooks.register('register_permissions')
def register_add_abtest_permission():
return Permission.objects.filter(content_type__app_label='wagtail_ab_testing', codename='add_abtest')
|
11562757
|
import sys
sys.path.append('../')
from ImageProcessing.hog import face_center as hog_face_center
def face_center(filename, model):
return hog_face_center(filename, model)
|
11562782
|
from toee import *
import char_class_utils
###################################################
def GetConditionName():
return "Shadowdancer"
def GetCategory():
return "Core 3.5 Ed Prestige Classes"
def GetClassDefinitionFlags():
return CDF_CoreClass
def GetClassHelpTopic():
return "TAG_SHADOWDANCERS"
classEnum = stat_level_shadowdancer
###################################################
class_feats = {
1: (feat_armor_proficiency_light, feat_martial_weapon_proficiency_shortbow, feat_martial_weapon_proficiency_composite_shortbow, feat_martial_weapon_proficiency_rapier, feat_martial_weapon_proficiency_short_sword)
}
class_skills = (skill_alchemy, skill_balance, skill_bluff, skill_decipher_script, skill_diplomacy, skill_disguise, skill_escape_artist, skill_hide, skill_jump, skill_listen, skill_move_silently, skill_perform, skill_profession, skill_search, skill_pick_pocket, skill_spot, skill_tumble, skill_use_rope)
def IsEnabled():
return 0
def GetHitDieType():
return 8
def GetSkillPtsPerLevel():
return 6
def GetBabProgression():
return base_attack_bonus_semi_martial
def IsFortSaveFavored():
return 0
def IsRefSaveFavored():
return 1
def IsWillSaveFavored():
return 0
def GetSpellListType():
return spell_list_type_none
def IsClassSkill(skillEnum):
return char_class_utils.IsClassSkill(class_skills, skillEnum)
def IsClassFeat(featEnum):
return char_class_utils.IsClassFeat(class_feats, featEnum)
def GetClassFeats():
return class_feats
def IsAlignmentCompatible( alignment):
return 1
def ObjMeetsPrereqs( obj ):
return 0 # WIP
if (obj.skill_ranks_get(skill_move_silently) < 8):
return 0
if (obj.skill_ranks_get(skill_hide) < 10):
return 0
if (obj.skill_ranks_get(skill_perform) < 5):
return 0
if (not obj.has_feat(feat_combat_reflexes)):
return 0
if (not obj.has_feat(feat_dodge)):
return 0
if (not obj.has_feat(feat_mobility)):
return 0
return 1
|
11562789
|
import tensorflow as tf
import numpy as np
import random
from tensorflow.contrib import rnn
from tensorflow.python.ops import rnn_cell_impl
from ops.ops import *
def highway(input_, dim, num_layer=2):
size = dim
for i in range(num_layer):
with tf.variable_scope("highway-%d" % i):
W_p = tf.get_variable("W_p", [size, size])
b_p = tf.get_variable("B_p", [1, size], initializer=tf.constant_initializer(0.0))
proj = tf.nn.relu(tf.matmul(input_, W_p) + b_p, "relu-proj")
W_t = tf.get_variable("W_t", [size, size])
b_t = tf.get_variable("B_t", [1, size], initializer=tf.constant_initializer(-2.0))
transform = tf.nn.sigmoid(tf.matmul(input_, W_t) + b_t, "sigmoid-transform")
input_ = tf.multiply(transform, proj) + tf.multiply(input_, 1 - transform)
return input_, size
def mlp(input_, dim):
n_hidden1 = int(dim*0.8)
n_hidden2 = int(n_hidden1*0.8)
n_out = int(n_hidden2*0.8)
with tf.variable_scope("mlp"):
h1 = tf.Variable(tf.random_normal([dim,n_hidden1]))
h2 = tf.Variable(tf.random_normal([n_hidden1, n_hidden2]))
hout = tf.Variable(tf.random_normal([n_hidden2, n_out]))
b1 = tf.Variable(tf.random_normal([n_hidden1]))
b2 = tf.Variable(tf.random_normal([n_hidden2]))
bout = tf.Variable(tf.random_normal([n_out]))
layer1 = tf.add(tf.matmul(input_, h1), b1)
layer1 = tf.nn.relu(layer1)
layer2 = tf.add(tf.matmul(layer1, h2), b2)
layer2 = tf.nn.relu(layer2)
out_layer = tf.matmul(layer2, hout) + bout
return out_layer, n_out
def tfFC(input_, dim):
n_hidden1 = int(dim*0.8)
n_hidden2 = int(n_hidden1*0.8)
#default active_fn = relu
h1 = tf.contrib.layers.fully_connected(
inputs=input_, num_outputs=n_hidden1)
h2 = tf.contrib.layers.fully_connected(
inputs=h1, num_outputs=n_hidden2)
return h2, n_hidden2
def normal_concat(input_, dim):
return input_, dim
def concat_fc(input_, dim, method, layers=2):
if method == 'highway':
return highway(input_, dim, layers)
elif method == 'mlp':
return mlp(input_, dim)
elif method == 'tfFC':
return tfFC(input_, dim)
elif method == 'normal':
return normal_concat(input_, dim)
else:
print("concat_fc error")
|
11562841
|
from .bases import BaseJsonPage, BaseJsonBlock
from .mixins import AIslandGetThreadId
import re
__all__ = ['AdnmbBlock', 'AdnmbPage']
_request_info = {
'cdn_host': 'http://h-adnmb-com.n1.yun.tf:8999/Public/Upload',
'headers': {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Dnt': '1',
'Host': 'h-adnmb-com.n1.yun.tf:8999',
'Pragma': 'no-cache',
'Referer': 'http://h.adnmb.com/',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
}
}
class AdnmbBlock(BaseJsonBlock):
request_info = _request_info
def _deal_with_reply(self, content):
return re.sub(r'(<font color.*?>.*?\d+</font>)', r'<span class="reply-color">\1</span>', content)
class AdnmbPage(AIslandGetThreadId, BaseJsonPage):
block_model = AdnmbBlock
|
11562848
|
class RebarBendData(object,IDisposable):
"""
The values in this class provide a summary of information taken from the RebarBarType,RebarHookType,and RebarStyle.
RebarBendData(barType: RebarBarType,hookType0: RebarHookType,hookType1: RebarHookType,style: RebarStyle,hookOrient0: RebarHookOrientation,hookOrient1: RebarHookOrientation)
RebarBendData()
"""
def Dispose(self):
""" Dispose(self: RebarBendData) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: RebarBendData,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,barType=None,hookType0=None,hookType1=None,style=None,hookOrient0=None,hookOrient1=None):
"""
__new__(cls: type,barType: RebarBarType,hookType0: RebarHookType,hookType1: RebarHookType,style: RebarStyle,hookOrient0: RebarHookOrientation,hookOrient1: RebarHookOrientation)
__new__(cls: type)
"""
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
BarDiameter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The diameter of the bar.
Get: BarDiameter(self: RebarBendData) -> float
Set: BarDiameter(self: RebarBendData)=value
"""
BendRadius=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The radius of all fillets,except hook fillets,in the Rebar shape.
Get: BendRadius(self: RebarBendData) -> float
Set: BendRadius(self: RebarBendData)=value
"""
HookAngle0=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The angle of the hook at the start.
Get: HookAngle0(self: RebarBendData) -> int
Set: HookAngle0(self: RebarBendData)=value
"""
HookAngle1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The angle of the hook at the end.
Get: HookAngle1(self: RebarBendData) -> int
Set: HookAngle1(self: RebarBendData)=value
"""
HookBendRadius=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The radius of the hook fillets in the Rebar shape.
Get: HookBendRadius(self: RebarBendData) -> float
Set: HookBendRadius(self: RebarBendData)=value
"""
HookLength0=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The extension length of the hook at the start.
Get: HookLength0(self: RebarBendData) -> float
Set: HookLength0(self: RebarBendData)=value
"""
HookLength1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The extension length of the hook at the end.
Get: HookLength1(self: RebarBendData) -> float
Set: HookLength1(self: RebarBendData)=value
"""
HookOrient0=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The orientation of the hook at the start.
Get: HookOrient0(self: RebarBendData) -> RebarHookOrientation
Set: HookOrient0(self: RebarBendData)=value
"""
HookOrient1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The orientation of the hook at the end.
Get: HookOrient1(self: RebarBendData) -> RebarHookOrientation
Set: HookOrient1(self: RebarBendData)=value
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: RebarBendData) -> bool
"""
|
11562856
|
import renderdoc as rd
import rdtest
class D3D12_Empty_Capture(rdtest.TestCase):
demos_test_name = 'D3D12_Empty_Capture'
demos_frame_cap = 100
def check_capture(self):
draws = self.controller.GetDrawcalls()
self.check(len(draws) == 1)
self.check('End' in draws[0].name)
self.check(draws[0].eventId == 1)
|
11562874
|
from summary.models import Summary
def build_summary_data(summary, paper_id, previous_summary_id):
return {
'summary': summary,
'summary_plain_text': summary,
'paper': paper_id,
'previousSummaryId': previous_summary_id,
}
def create_summary(summary, proposed_by, paper_id):
"""Returns a Summary instance.
Args:
summary (str)
proposed_by (obj) - user
paper_id (int)
"""
return Summary.objects.create(
summary=summary,
summary_plain_text=summary,
proposed_by=proposed_by,
paper_id=paper_id
)
|
11562885
|
def onCook(dat):
mod.opDefinition.buildTypeTable(
dat,
supportedTypes=dat.inputs[0],
inputDefs=dat.inputs[1])
|
11562894
|
from django.http import JsonResponse
import json
from movies.utils import get_token_data
from ..models import Rating, Movie
def rate(request):
# if POST, save or update rating
if request.method == 'POST':
body = json.loads(request.body)
movie_id = body['id']
rating = int(body['rating'])
try:
username = body['username']
except KeyError:
token = get_token_data(request)
username = token['username']
# get the movie object with id movie_id, or create it
m, created = Movie.objects.get_or_create(source_id=movie_id, defaults={'title': ''})
# save or update rating
try:
r, created = Rating.objects.update_or_create(username=username, movie=m, defaults={'rating': rating})
except Exception as e:
print(e)
return JsonResponse({
'status': 'fail',
'data': {
'message': 'Error while saving rating'
}
}, status=500)
return JsonResponse({
'status': 'success',
'data': {
'title': m.title,
'rating': r.rating,
'is_new': created
}
})
elif request.method == 'DELETE':
username = request.GET.get('u', '')
movie_id = request.GET.get('m_id', '')
# find movie object
m = Movie.objects.filter(source_id=movie_id).first()
r = Rating.objects.filter(movie=m, username=username)
# delete rating
try:
r.delete()
except:
return JsonResponse({
'status': 'fail',
'data': {
'message': 'Error while deleting rating'
}
}, status=500)
return JsonResponse({
'status': 'success'
})
def getRating(request, movie_id):
if request.method != 'POST':
pass
body = json.loads(request.body)
username = body['username']
# get rating
r = Rating.objects.filter(movie_id = movie_id, username = username).first()
return JsonResponse({
'result': 'success',
'data': {
'rating': r.rating if r else None
}
})
|
11562902
|
from fabric.api import run
from fabric.api import task
from fabric.contrib import files
from fabric.operations import get, put
@task
def backup():
get(remote_path="/etc/dnsmasq.conf", local_path="backup/dns/dnsmasq.conf")
get(remote_path="/etc/hosts", local_path="backup/dns/hosts")
|
11562913
|
from core.base_model import AcousticModel
from keras.layers import Dense,Activation,Dropout,Input,Add
from core.ctc_function import CTC_Batch_Cost
from keras import Model
import os
from util.mapmap import PinyinMapper
from util.reader import VoiceDatasetList,VoiceLoader
from feature.mel_feature import MelFeature5
class MCONM(AcousticModel):
'''将每一层的卷积连接起来的一次尝试,Somiao输入法到声学模型的迁移尝试
2019年7月14日14:36:13,thchs30数据集上epoch=55,loss=59,基本无法下降,废弃
'''
def compile(self,feature_shape = (1024,200),label_max_string_length = 32,ms_output_size = 1423):
audio_ipt = Input(name="audio_input", shape=feature_shape)
parent_out = self.parent(audio_ipt,128)
layer_h1 = self.conv1d_layers(audio_ipt,64,8)
layer_h2 = self.cnn1d_cell(64, layer_h1, pool=False)
layer_h3 = Add()([parent_out,layer_h2])
# 64print(layer_h5)
layer_h6 = Dropout(0.2)(layer_h3) # KL,双Dense
layer_h7 = Dense(256, activation="relu", kernel_initializer="he_normal")(layer_h6) # TODO 考虑在这里加Attention
layer_h7 = Dropout(0.2)(layer_h7)
layer_h8 = Dense(ms_output_size)(layer_h7)
y_pred = Activation(activation="softmax")(layer_h8)
y_true = Input(name='label_inputs', shape=[label_max_string_length], dtype='float32')
audio_length = Input(name='audio_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = CTC_Batch_Cost()([y_true, y_pred, audio_length, label_length])
train_model = Model([audio_ipt, y_true, audio_length, label_length], [loss_out])
train_model.compile(optimizer="adam", loss={"ctc": lambda y_true, y_pred: y_pred})
base_model = Model(audio_ipt, y_pred)
self.built(train_model,base_model)
@staticmethod
def train(datagenes:list, load_model = None):
w, h = 800, 200
max_label_len = 64
dataset = VoiceDatasetList()
x_set, y_set = dataset.merge_load(datagenes)
pymap = PinyinMapper(sil_mode=-1)
vloader = VoiceLoader(x_set, y_set,
batch_size= 16,
feature_pad_len = w,
n_mels=h,
max_label_len=max_label_len,
pymap=pymap,
melf=MelFeature5(),
all_train=False
)
model_helper = MCONM(pymap)
model_helper.compile(feature_shape=(w, h), label_max_string_length=max_label_len, ms_output_size=pymap.max_index+1)
if load_model is not None:
load_model = os.path.abspath(load_model)
model_helper.load(load_model)
model_helper.fit(vloader,epoch=-1,save_step=1000,use_ctc=True)
class MPCONM(AcousticModel):
'''在MCONM的基础上将parent结构改为三层卷积+maxpool的尝试,其余条件相同
2019年7月15日00:30:43,thchs30数据集上epoch=82,loss=14,此时下降已经变得有些困难,等待其继续训练,epoch>150次如果还未拟合则放弃
'''
def compile(self,feature_shape = (1024,200),label_max_string_length = 32,ms_output_size = 1423):
audio_ipt = Input(name="audio_input", shape=feature_shape)
parent_out = self.cnn1d_cell(32,audio_ipt,pool=True)
parent_out = self.cnn1d_cell(64,parent_out,pool=True)
parent_out = self.cnn1d_cell(64,parent_out,pool=True)
layer_h1 = self.conv1d_layers(parent_out,64,8)
layer_h2 = self.cnn1d_cell(64, layer_h1, pool=False)
layer_h3 = Add()([parent_out,layer_h2])
# 64print(layer_h5)
layer_h6 = Dropout(0.2)(layer_h3) # KL,双Dense
layer_h7 = Dense(256, activation="relu", kernel_initializer="he_normal")(layer_h6) # TODO 考虑在这里加Attention
layer_h7 = Dropout(0.2)(layer_h7)
layer_h8 = Dense(ms_output_size)(layer_h7)
y_pred = Activation(activation="softmax")(layer_h8)
y_true = Input(name='label_inputs', shape=[label_max_string_length], dtype='float32')
audio_length = Input(name='audio_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = CTC_Batch_Cost()([y_true, y_pred, audio_length, label_length])
train_model = Model([audio_ipt, y_true, audio_length, label_length], [loss_out])
train_model.compile(optimizer="adam", loss={"ctc": lambda y_true, y_pred: y_pred})
base_model = Model(audio_ipt, y_pred)
self.built(train_model,base_model)
@staticmethod
def train(datagenes: list, load_model=None):
w, h = 1600, 200
max_label_len = 64
dataset = VoiceDatasetList()
x_set, y_set = dataset.merge_load(datagenes)
pymap = PinyinMapper(sil_mode=-1)
vloader = VoiceLoader(x_set, y_set,
batch_size=16,
feature_pad_len=w,
n_mels=h,
max_label_len=max_label_len,
pymap=pymap,
divide_feature_len=8,
melf=MelFeature5(),
all_train=False
)
model_helper = MPCONM(pymap)
model_helper.compile(feature_shape=(w, h), label_max_string_length=max_label_len,
ms_output_size=pymap.max_index + 1)
if load_model is not None:
load_model = os.path.abspath(load_model)
model_helper.load(load_model)
model_helper.fit(vloader, epoch=-1, save_step=100, use_ctc=True)
class MPBCONM(AcousticModel):
'''在MPCONM的基础上添加BatchNorm'''
def compile(self,feature_shape = (1024,200),label_max_string_length = 32,ms_output_size = 1423):
audio_ipt = Input(name="audio_input", shape=feature_shape)
parent_out = self.cnn1d_cell(32,audio_ipt,pool=True)
parent_out = self.cnn1d_cell(64,parent_out,pool=True)
parent_out = self.cnn1d_cell(64,parent_out,pool=True)
layer_h1 = self.conv1d_layers(parent_out,64,8,batch_norm=True)
layer_h2 = self.cnn1d_cell(64, layer_h1, pool=False)
layer_h3 = Add()([parent_out,layer_h2])
# 64print(layer_h5)
layer_h6 = Dropout(0.2)(layer_h3) # KL,双Dense
layer_h7 = Dense(256, activation="relu", kernel_initializer="he_normal")(layer_h6) # TODO 考虑在这里加Attention
layer_h7 = Dropout(0.2)(layer_h7)
layer_h8 = Dense(ms_output_size)(layer_h7)
y_pred = Activation(activation="softmax")(layer_h8)
y_true = Input(name='label_inputs', shape=[label_max_string_length], dtype='float32')
audio_length = Input(name='audio_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = CTC_Batch_Cost()([y_true, y_pred, audio_length, label_length])
train_model = Model([audio_ipt, y_true, audio_length, label_length], [loss_out])
train_model.compile(optimizer="adam", loss={"ctc": lambda y_true, y_pred: y_pred})
base_model = Model(audio_ipt, y_pred)
self.built(train_model,base_model)
@staticmethod
def train(datagenes: list, load_model=None):
w, h = 1600, 200
max_label_len = 64
dataset = VoiceDatasetList()
x_set, y_set = dataset.merge_load(datagenes)
pymap = PinyinMapper(sil_mode=-1)
vloader = VoiceLoader(x_set, y_set,
batch_size=16,
feature_pad_len=w,
n_mels=h,
max_label_len=max_label_len,
pymap=pymap,
divide_feature_len=8,
melf=MelFeature5(),
all_train=False
)
model_helper = MPBCONM(pymap)
model_helper.compile(feature_shape=(w, h), label_max_string_length=max_label_len,
ms_output_size=pymap.max_index + 1)
if load_model is not None:
load_model = os.path.abspath(load_model)
model_helper.load(load_model)
model_helper.fit(vloader, epoch=-1, save_step=1000, use_ctc=True)
|
11562978
|
import argparse
import logging
import os
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s')
def run(cmd: str):
ret = os.system(cmd)
if ret != 0:
raise RuntimeError("running '{}' returned non-zero status: {}".format(cmd, ret))
def clean(host: str, directory: str):
os.system("ssh {} 'killall graph_data_server'".format(host))
run("ssh {} 'rm -rf {}/*'".format(host, directory))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dir', type=str, help='directory to clean up on each remote machine')
parser.add_argument('--hosts', nargs='*', type=str, help='list of hosts to deploy to')
args = parser.parse_args()
assert len(args.hosts) > 0, "need to clean at least one host"
for host in args.hosts:
logging.info("cleaning {} on {}".format(args.dir, host))
clean(host, args.dir)
if __name__ == "__main__":
main()
|
11563012
|
import os.path as osp
from glob import glob
import torch
from torch_geometric.data import InMemoryDataset, extract_zip
from torch_geometric.read import read_ply
class CoMA(InMemoryDataset):
url = 'https://coma.is.tue.mpg.de/'
categories = [
'bareteeth',
'cheeks_in',
'eyebrow',
'high_smile',
'lips_back',
'lips_up',
'mouth_down',
'mouth_extreme',
'mouth_middle',
'mouth_open',
'mouth_side',
'mouth_up',
]
def __init__(self,
root,
train=True,
transform=None,
pre_transform=None,
pre_filter=None):
super(CoMA, self).__init__(root, transform, pre_transform, pre_filter)
path = self.processed_paths[0] if train else self.processed_paths[1]
self.data, self.slices = torch.load(path)
@property
def raw_file_names(self):
return 'COMA_data.zip'
@property
def processed_file_names(self):
return ['training.pt', 'test.pt']
def download(self):
raise RuntimeError(
'Dataset not found. Please download COMA_data.zip from {} and '
'move it to {}'.format(self.url, self.raw_dir))
def process(self):
folders = sorted(glob(osp.join(self.raw_dir, 'FaceTalk_*')))
if len(folders) == 0:
extract_zip(self.raw_paths[0], self.raw_dir, log=False)
folders = sorted(glob(osp.join(self.raw_dir, 'FaceTalk_*')))
train_data_list, test_data_list = [], []
for folder in folders:
for i, category in enumerate(self.categories):
files = sorted(glob(osp.join(folder, category, '*.ply')))
for j, f in enumerate(files):
data = read_ply(f)
data.y = torch.tensor([i], dtype=torch.long)
if self.pre_filter is not None and\
not self.pre_filter(data):
continue
if self.pre_transform is not None:
data = self.pre_transform(data)
if (j % 100) < 90:
train_data_list.append(data)
else:
test_data_list.append(data)
torch.save(self.collate(train_data_list), self.processed_paths[0])
torch.save(self.collate(test_data_list), self.processed_paths[1])
|
11563027
|
from htmlgen.attribute import html_attribute
from htmlgen.element import Element
class Link(Element):
"""An HTML inline link (<a>) element.
>>> link = Link("http://www.example.com/", "caption")
>>> link.append(", more")
>>> link.url
'http://www.example.com/'
>>> str(link)
'<a href="http://www.example.com/">caption, more</a>'
By default links open in the same window. This can be influenced using
the target property:
>>> link = Link("/foo/bar")
>>> link.target
'_self'
>>> link.set_blank_target()
>>> link.target
'_blank'
>>> link.target = "my-window"
>>> str(link)
'<a href="/foo/bar" target="my-window"></a>'
Please refer to the HeadLink class for <link> elements.
"""
def __init__(self, url, *content):
super().__init__("a")
self.url = url
self.extend(content)
url = html_attribute("href")
target = html_attribute("target", "_self")
title = html_attribute("title")
def set_blank_target(self):
self.target = "_blank"
|
11563052
|
import logging
from io import BytesIO
import numpy as np
from PIL import Image, ImageCms
from PIL.ImageCms import (
applyTransform,
getProfileDescription,
getProfileName,
ImageCmsProfile,
ImageCmsTransform,
isIntentSupported,
)
logger = logging.getLogger(__name__)
class ColorManager(object):
"""Class for color management using ICC profiles."""
def __init__(self, icc_profile: bytes):
"""
Parameters
----------
icc_profile: bytes
ICC profile
Raises
------
ValueError
When ICC Profile cannot be read.
"""
try:
self._icc_transform = self._build_icc_transform(icc_profile)
except OSError:
raise ValueError('Could not read ICC Profile.')
def transform_frame(self, array: np.ndarray) -> np.ndarray:
"""Transforms a frame by applying the ICC profile.
Parameters
----------
array: numpy.ndarray
Pixel data of a color image frame in form of an array with
dimensions (Rows x Columns x SamplesPerPixel)
Returns
-------
numpy.ndarray
Color corrected pixel data of a image frame in form of an array
with dimensions (Rows x Columns x SamplesPerPixel)
Raises
------
ValueError
When `array` does not have 3 dimensions and thus does not represent
a color image frame.
"""
if array.ndim != 3:
raise ValueError(
'Array has incorrect dimensions for a color image frame.'
)
image = Image.fromarray(array)
applyTransform(image, self._icc_transform, inPlace=True)
return np.asarray(image)
@staticmethod
def _build_icc_transform(icc_profile: bytes) -> ImageCmsTransform:
"""Builds an ICC Transformation object.
Parameters
----------
icc_profile: bytes
ICC Profile
Returns
-------
PIL.ImageCms.ImageCmsTransform
ICC Transformation object
"""
profile: bytes
try:
profile = ImageCmsProfile(BytesIO(icc_profile))
except OSError:
raise ValueError('Cannot read ICC Profile in image metadata.')
name = getProfileName(profile).strip()
description = getProfileDescription(profile).strip()
logger.debug(f'found ICC Profile "{name}": "{description}"')
logger.debug('build ICC Transform')
intent = ImageCms.INTENT_RELATIVE_COLORIMETRIC
if not isIntentSupported(
profile,
intent=intent,
direction=ImageCms.DIRECTION_INPUT
):
raise ValueError(
'ICC Profile does not support desired '
'color transformation intent.'
)
return ImageCms.buildTransform(
inputProfile=profile,
outputProfile=ImageCms.createProfile('sRGB'),
inMode='RGB', # according to PS3.3 C.11.15.1.1
outMode='RGB'
)
|
11563069
|
import argparse
import cv2
import numpy as np
import requests
from v2_plugin.runner.utils import type_serializer
def http_request_test(port: int):
image: np.ndarray = cv2.imread('../../tests/test1.jpg')
data = {'raw_input': image.tolist(), 'dtype': type_serializer(image.dtype)}
response = requests.post(f'http://localhost:{port}/predict', json=data)
print(response.text)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=int, default=8001, help='Port for HTTP service.')
args = parser.parse_args()
http_request_test(args.port)
|
11563070
|
import numpy as np
import unittest
from spyne import Tensor, Constant
from spyne.operations import (TensorAddition, TensorSubtraction, TensorMultiply, TensorElemMultiply, TensorReLU,
TensorTanh, TensorSigmoid, TensorSoftmax, TensorSum, TensorSquared, TensorNegLog,
TensorDuplicateRows)
from spyne.operations.base import DualTensorOperation, UniTensorOperation
class TestDualTensorBase(unittest.TestCase):
operation = DualTensorOperation
def setUp(self):
self.A = Tensor(np.random.random((2, 2, 3)))
self.B = Tensor(np.random.random((2, 2, 3)))
self.C = self.operation(self.A, self.B)
def test_constant_tensor_interchange(self):
const_A = Constant(self.A.value)
const_B = Constant(self.B.value)
self.assertTrue(np.array_equal(self.C.value, self.operation(const_A, self.B).value))
self.assertTrue(np.array_equal(self.C.value, self.operation(self.A, const_B).value))
def test_attrs(self):
self.assertEqual(self.C.a, self.A)
self.assertEqual(self.C.b, self.B)
self.assertTrue(np.array_equal(self.C._a, self.A.value))
self.assertTrue(np.array_equal(self.C._b, self.B.value))
self.assertTrue(type(self.C.node_uid) == str)
self.assertTrue(len(self.C.node_uid) > 1)
class TestUniTensorBase(unittest.TestCase):
operation = UniTensorOperation
def setUp(self):
self.A = Tensor(np.random.random((3, 5, 3)))
self.B = self.operation(self.A)
def test_constant_tensor_interchange(self):
const_A = Constant(self.A.value)
self.assertTrue(np.array_equal(self.B.value, self.operation(const_A).value))
def test_attrs(self):
self.assertEqual(self.B.a, self.A)
self.assertTrue(np.array_equal(self.B._a, self.A.value))
self.assertTrue(type(self.B.node_uid) == str)
self.assertTrue(len(self.B.node_uid) > 1)
class TensorAdditionTest(TestDualTensorBase):
operation = TensorAddition
def test_execution(self):
result = np.add(self.A.value, self.B.value)
self.assertTrue(np.array_equal(self.C.value, result))
self.assertEqual(self.C.shape, self.A.shape, self.B.shape)
def test_vjps(self):
g_1 = np.array([1, 2])
g_2 = np.array([[[4, 5, 22, 33], [1, 2, 3, 4]], [[1, 2, 3, 4], [5, 6, 7, 8]]])
a_vjp, b_vjp = self.C.vector_jacobian_product()
self.assertTrue(np.array_equal(a_vjp(g_1), g_1))
self.assertTrue(np.array_equal(a_vjp(g_2), g_2))
self.assertTrue(np.array_equal(b_vjp(g_1), g_1))
self.assertTrue(np.array_equal(b_vjp(g_2), g_2))
def test_attrs(self):
self.assertEqual(self.C.shape, (2, 2, 3))
def test_methods(self):
self.assertTrue(self.C.is_tensor(self.A))
self.assertFalse(self.C.is_tensor(np.array(1)))
self.assertFalse(self.C.is_operation(self.A))
self.assertTrue(self.C.is_operation(self.C))
class TensorSubtractionTest(TestDualTensorBase):
operation = TensorSubtraction
def test_execution(self):
result = np.subtract(self.A.value, self.B.value)
self.assertTrue(np.array_equal(self.C.value, result))
self.assertEqual(self.C.shape, self.A.shape, self.B.shape)
def test_vjps(self):
g_1 = np.random.random((2,))
g_2 = np.random.random((3, 2, 4))
a_vjp, b_vjp = self.C.vector_jacobian_product()
self.assertTrue(np.array_equal(a_vjp(g_1), -1 * g_1))
self.assertTrue(np.array_equal(a_vjp(g_2), -1 * g_2))
self.assertTrue(np.array_equal(b_vjp(g_1), -1 * g_1))
self.assertTrue(np.array_equal(b_vjp(g_2), -1 * g_2))
def test_attrs(self):
self.assertEqual(self.C.shape, (2, 2, 3))
def test_methods(self):
self.assertTrue(self.C.is_tensor(self.A))
self.assertFalse(self.C.is_tensor(np.array(1)))
self.assertFalse(self.C.is_operation(self.A))
self.assertTrue(self.C.is_operation(self.C))
class TensorMultiplyTest(TestDualTensorBase):
operation = TensorMultiply
def setUp(self):
self.A = Tensor(
np.random.random((2, 2, 3))
)
self.B = Tensor(
np.random.random((2, 3, 2))
)
self.C = self.operation(self.A, self.B)
def test_execution(self):
self.assertTrue(np.array_equal(self.C.value, np.dot(self.A.value, self.B.value)))
def test_vjps(self):
# test case where a is of dim greater than 1 and b is of dim 1
a = Tensor(np.random.random((3, 5, 4)))
b = Tensor(np.random.random((4,)))
c = self.operation(a, b)
g = np.random.random(c.shape)
contract_num = max(0, len(b.shape) - (len(a.shape) != 0))
a_vjp, b_vjp = c.vector_jacobian_product()
a_ndim = len(a.shape)
b_ndim = len(b.shape)
self.assertTrue(np.array_equal(a_vjp(g), np.tensordot(g, b.value, contract_num)))
res = np.asarray(np.tensordot(
g, a.value, [range(-a_ndim - b_ndim + 2, -b_ndim + 1), range(a_ndim - 1)]))
self.assertTrue(np.array_equal(b_vjp(g), res))
# test case where a is of dim 1 and b is of dim greater than 1
a = Tensor(np.random.random((4,)))
b = Tensor(np.random.random((3, 4, 5)))
c = self.operation(a, b)
g = np.random.random(c.shape)
contract_num = max(0, len(a.shape) - (len(b.shape) != 0))
a_vjp, b_vjp = c.vector_jacobian_product()
a_ndim = len(a.shape)
b_ndim = len(b.shape)
self.assertTrue(np.array_equal(a_vjp(g), np.tensordot(g, np.swapaxes(b.value, -1, -2), b_ndim - 1)))
self.assertTrue(np.array_equal(b_vjp(g),
np.asarray(np.swapaxes(np.tensordot(g, a.value, contract_num), -1, -2))))
class TestTensorElemMultiply(TestDualTensorBase):
operation = TensorElemMultiply
def test_execution(self):
self.assertTrue(np.array_equal(self.C.value, np.multiply(self.A.value, self.B.value)))
self.assertEqual(self.C.shape, self.A.shape, self.B.shape)
def test_vjps(self):
a_vjp, b_vjp = self.C.vector_jacobian_product()
g = np.random.random((2, 2, 3))
self.assertTrue(np.array_equal(a_vjp(g), np.multiply(g, self.B.value)))
self.assertTrue(np.array_equal(b_vjp(g), np.multiply(g, self.A.value)))
class TestTensorRelu(TestUniTensorBase):
operation = TensorReLU
def test_execution(self):
self.assertTrue(np.array_equal(self.B.value, self.A.value * (self.A.value > 0)))
self.assertEqual(self.B.shape, self.A.shape)
def test_vjps(self):
a_vjp = self.B.vector_jacobian_product()
g = np.random.random(self.B.shape)
self.assertTrue(np.array_equal(a_vjp(g), g * np.where(self.A.value > 0, 1, 0)))
class TestTensorTanh(TestUniTensorBase):
operation = TensorTanh
def test_execution(self):
self.assertTrue(np.array_equal(self.B.value, np.tanh(self.A.value)))
self.assertEqual(self.B.shape, self.A.shape)
def test_vjps(self):
a_vjp = self.B.vector_jacobian_product()
g = np.random.random(self.B.shape)
self.assertTrue(np.array_equal(a_vjp(g), g * 1 / np.square(np.cosh(self.A.value))))
class TestTensorSigmoid(TestUniTensorBase):
operation = TensorSigmoid
def test_execution(self):
e = np.exp(self.A.value)
self.assertTrue(np.array_equal(self.B.value, e / (e + 1)))
self.assertEqual(self.B.shape, self.A.shape)
def test_vjps(self):
a_vjp = self.B.vector_jacobian_product()
g = np.random.random(self.B.shape)
e = np.exp(-1 * self.A.value)
self.assertTrue(np.array_equal(a_vjp(g), np.multiply(g, e / (1 + e)**2)))
class TestTensorSoftmax(TestUniTensorBase):
operation = TensorSoftmax
def test_execution(self):
exp_sum = np.exp(self.A.value).sum()
self.assertTrue(np.array_equal(self.B.value, np.exp(self.A.value) / exp_sum))
self.assertEqual(self.B.shape, self.A.shape)
def test_vjps(self):
a_vjp = self.B.vector_jacobian_product()
g = np.random.random(self.B.shape)
exp_sum = np.exp(self.A.value).sum()
num = np.multiply(np.exp(self.A.value), exp_sum) - np.exp(2 * self.A.value)
self.assertTrue(np.array_equal(a_vjp(g), np.multiply(g, num / np.square(exp_sum))))
class TestTensorSum(TestUniTensorBase):
operation = TensorSum
def test_execution(self):
self.assertTrue(np.array_equal(self.B.value, self.A.value.sum()))
self.assertEqual(self.B.shape, ())
def test_vjps(self):
a_vjp = self.B.vector_jacobian_product()
g = np.random.random((1,))
self.assertTrue(np.array_equal(a_vjp(g), g * np.ones(self.A.shape)))
class TestTensorSquared(TestUniTensorBase):
operation = TensorSquared
def test_execution(self):
self.assertTrue(np.array_equal(self.B.value, np.square(self.A.value)))
self.assertEqual(self.B.shape, self.A.shape)
def test_vjps(self):
a_vjp = self.B.vector_jacobian_product()
g = np.random.random(self.A.shape)
self.assertTrue(np.array_equal(a_vjp(g), g * 2 * self.A.value))
class TestTensorNegLog(TestUniTensorBase):
operation = TensorNegLog
def test_execution(self):
self.assertTrue(np.array_equal(self.B.value, -1 * np.log(self.A.value)))
self.assertEqual(self.B.shape, self.A.shape)
def test_vjps(self):
a_vjp = self.B.vector_jacobian_product()
g = np.random.random(self.A.shape)
self.assertTrue(np.array_equal(a_vjp(g), np.divide(-g, self.A.value)))
class TestTensorDuplicateRows(unittest.TestCase):
operation = TensorDuplicateRows
def setUp(self):
self.A = Tensor(np.random.random((7,)))
self.n_rows = 5
self.B = self.operation(self.A, self.n_rows)
def test_execution(self):
self.assertTrue(np.array_equal(self.B.value, np.ones((self.n_rows,) + self.A.shape) * self.A.value))
self.assertEqual(self.B.shape, (self.n_rows,) + self.A.shape)
def test_vjps(self):
a_vjp = self.B.vector_jacobian_product()
g = np.random.random(self.A.shape)
self.assertTrue(np.array_equal(a_vjp(g), np.sum(g, axis=0)))
|
11563110
|
from nevada.Common.Connector import *
from typing import List
import jsonpickle
import json
class CreateAdExtensionObject:
def __init__(self, pcChannelId, mobileChannelId, ownerId, type, userLock, schedule=None):
self.pcChannelId = pcChannelId
self.mobileChannelId = mobileChannelId
self.ownerId = ownerId
self.schedule = schedule
self.type = type
self.userLock = userLock
class UpdateAdExtensionObject:
def __init__(self, nccAdExtensionId, schedule=None, userLock = None):
self.nccAdExtensionId = nccAdExtensionId
self.schedule = schedule
self.userLock = userLock
class AdExtensionObject:
def __init__(self, json_def):
if type(json_def) is str:
json_def = json.loads(json_def)
s = json_def
self.customerId = None if 'customerId' not in s else s['customerId']
self.delFlag = None if 'delFlag' not in s else s['delFlag']
self.editTm = None if 'editTm' not in s else s['editTm']
self.inspectStatus = None if 'inspectStatus' not in s else s['inspectStatus']
self.mobileChannelId = None if 'mobileChannelId' not in s else s['mobileChannelId']
self.nccAdExtensionId = None if 'nccAdExtensionId' not in s else s['nccAdExtensionId']
self.ownerId = None if 'ownerId' not in s else s['ownerId']
self.pcChannelId = None if 'pcChannelId' not in s else s['pcChannelId']
self.regTm = None if 'regTm' not in s else s['regTm']
self.status = None if 'status' not in s else s['status']
self.statusReason = None if 'statusReason' not in s else s['statusReason']
self.type = None if 'type' not in s else s['type']
self.userLock = None if 'userLock' not in s else s['userLock']
class AdExtension: # 확장소재
def __init__(self, base_url: str, api_key: str, secret_key: str, customer_id: int):
self.conn = Connector(base_url, api_key, secret_key, customer_id)
AdExtensionObjectList = List[AdExtensionObject]
IdList = List[str]
ChangeFieldsList = List[str]
def list_by_owner_id(self, ownerId: str) -> AdExtensionObjectList:
result = self.conn.get('/ncc/ad-extensions', {'ownerId': ownerId})
return result
def list_by_ids(self, ids: IdList) -> AdExtensionObjectList:
ids = ",".join(ids)
ids = {'ids': ids}
result = self.conn.get('/ncc/ad-extensions', ids)
return result
def get(self, adExtensionId: str) -> AdExtensionObject:
result = self.conn.get('/ncc/ad-extensions/' + adExtensionId)
return result
def create(self, CreateAdExtensionObject: CreateAdExtensionObject) -> AdExtensionObject:
data = jsonpickle.encode(CreateAdExtensionObject, unpicklable=False)
data = json.loads(data)
data = CommonFunctions.dropna(data)
data_str = data
data_str = json.dumps(data_str)
result = self.conn.post('/ncc/ad-extensions', data_str)
return result
def update(self, adExtensionId: str, fields: ChangeFieldsList,
UpdateAdExtensionObject: UpdateAdExtensionObject) -> AdExtensionObject:
data = jsonpickle.encode(UpdateAdExtensionObject, unpicklable=False)
data = json.loads(data)
data = CommonFunctions.dropna(data)
data_str = data
data_str = json.dumps(data_str)
change_fields_list = ",".join(fields)
query = {'fields': change_fields_list}
result = self.conn.put('/ncc/ad-extensions/' + adExtensionId, data_str, query)
return result
def delete(self, adExtensionId: str):
self.conn.delete('/ncc/ad-extensions/' + adExtensionId)
return True
|
11563117
|
from web3 import Web3
sample_eth_address = 562046206989085878832492993516240920558397288279
def str_eth(numeric_eth_address):
return Web3.toChecksumAddress(hex(int(numeric_eth_address)))
|
11563141
|
import os
import numpy as np
import pandas as pd
import hydra
from src.classifier.data_processing.annotate.metrics import (
fleiss_kappa,
get_all_pairwise_kappas,
)
def create_combined_df(data_dir):
data_dir = hydra.utils.to_absolute_path(data_dir)
annotations = pd.DataFrame()
annotator_names = []
for i, annotation in enumerate(os.listdir(data_dir)):
annotator = annotation.split("_")[-1].split(".")[0]
annotator_names += [annotator]
data = pd.read_csv(os.path.join(data_dir, annotation), index_col=0)
if "Unsicher" in data.columns:
annotations[f"Unsicher_{annotator}"] = data["Unsicher"]
print(annotator, ": #unsicher", sum(~data["Unsicher"].isna()))
# print(f'{annotator} not sure about {data['Unsicher']} sentences.')
annotations[annotator] = data["Label"].fillna(98)
annotations.loc[
~annotations[f"Unsicher_{annotator}"].isna(), annotator
] = 98
annotations[annotator] = annotations[annotator].astype("int32")
if i == 0:
annotations["Text"] = data["Text"]
annotations["Gender"] = data["Gender"]
return annotations, annotator_names
def clean_uncertain_labels(remove_uncertain, annotations, annotator_names):
if remove_uncertain == "all":
min_uncertain = 1
else:
min_uncertain = 2
rm_cases = annotations.loc[
np.sum(annotations[annotator_names] == 98, axis=1) >= min_uncertain,
annotator_names,
].index
annotations_cleaned = annotations.drop(
annotations.loc[rm_cases, annotator_names].index
)
annotations_cleaned = annotations_cleaned.replace(98, np.nan)
print(f"Dropping {len(rm_cases)} cases.")
return annotations_cleaned
def label_with_aggregate_annotation(
annotation,
label_col,
annotations,
annotator_names,
force_majority=False,
):
if annotation == "majority" or force_majority:
return_df = _get_majority_label(
annotations,
annotator_names,
label_col,
for_stratification_only=force_majority,
)
else:
not_all_equal_idcs = []
for i, row in annotations[annotator_names].iterrows():
e = _all_equal(row)
if e is False:
not_all_equal_idcs += [i]
all_equal_indcs = list(
set(annotations.index.values.tolist()) - set(not_all_equal_idcs)
)
return_df = _get_majority_label(
annotations.loc[all_equal_indcs, :],
annotator_names,
label_col,
for_stratification_only=force_majority,
)
print(
f"Removed {len(not_all_equal_idcs)} with varying votes. {len(all_equal_indcs)} unanimously labeled sentences remain."
)
# Check inter rater reliability
fleiss_kappa(return_df, annotator_names)
get_all_pairwise_kappas(return_df, annotator_names)
return return_df
def _all_equal(iterator):
iterator = iter(iterator)
try:
first = next(iterator)
except StopIteration:
return True
return all(first == x for x in iterator)
def _get_majority_label(
annotations,
annotator_names,
label_col,
for_stratification_only,
):
annotations[label_col] = annotations[annotator_names].mode(axis="columns")[0]
if for_stratification_only and 98 in annotations[label_col]:
from random import choice
options = annotations[label_col].drop(98)
annotations.loc[annotations[label_col] == 98, label_col] = choice(
options
) # remove unsicher
return annotations
|
11563143
|
import sys
import torch
from torch import cuda
from transformers import *
from torch import nn
from torch.autograd import Variable
from util.holder import *
from util.util import *
class BertEncoder(torch.nn.Module):
def __init__(self, opt, shared):
super(BertEncoder, self).__init__()
self.opt = opt
self.shared = shared
self.zero = Variable(torch.zeros(1), requires_grad=False)
self.zero = to_device(self.zero, self.opt.gpuid)
print('loading transformer...')
self.bert = AutoModel.from_pretrained(self.opt.bert_type)
for n in self.bert.children():
for p in n.parameters():
p.skip_init = True
def forward(self, tok_idx):
tok_idx = to_device(tok_idx, self.opt.gpuid)
last, pooled = self.bert(tok_idx, return_dict=False)
last = last + pooled.unsqueeze(1) * self.zero
# move to the original device
last = to_device(last, self.opt.gpuid)
self.shared.bert_enc = last
return last
def begin_pass(self):
pass
def end_pass(self):
pass
|
11563160
|
import pytest
from vectorhub.encoders.text.tfhub import USE2Vec, USEMulti2Vec, USELite2Vec
from ....test_utils import assert_encoder_works
def test_use_encode():
"""
Testing for labse encode
"""
encoder = USE2Vec()
assert_encoder_works(encoder, vector_length=512, data_type='text')
def test_use_multi_encode():
"""
Testing for labse encode
"""
encoder = USEMulti2Vec()
assert_encoder_works(encoder, vector_length=512, data_type='text')
@pytest.mark.skip("Skip pytest due to tensorflow compatibility.")
def test_use_lite_works():
"""
Testing for USE encoder
"""
encoder = USELite2Vec()
assert_encoder_works(encoder, vector_length=512, data_type='text')
|
11563161
|
import pytest
import numpy as np
import tensorflow as tf
import jax
import torch
from tensornetwork.backends import backend_factory
#pylint: disable=line-too-long
from tensornetwork.matrixproductstates.mpo import (FiniteMPO,
BaseMPO,
InfiniteMPO,
FiniteFreeFermion2D)
from tensornetwork.matrixproductstates.finite_mps import FiniteMPS
from tensornetwork.matrixproductstates.dmrg import FiniteDMRG
@pytest.fixture(
name="backend_dtype_values",
params=[('numpy', np.float64), ('numpy', np.complex128),
('tensorflow', tf.float64), ('tensorflow', tf.complex128),
('pytorch', torch.float64), ('jax', np.float64),
('jax', np.complex128)])
def backend_dtype(request):
return request.param
def test_base_mpo_init(backend_dtype_values):
backend = backend_factory.get_backend(backend_dtype_values[0])
dtype = backend_dtype_values[1]
tensors = [
backend.randn((1, 5, 2, 2), dtype=dtype),
backend.randn((5, 5, 2, 2), dtype=dtype),
backend.randn((5, 1, 2, 2), dtype=dtype)
]
mpo = BaseMPO(tensors=tensors, backend=backend, name='test')
assert mpo.backend is backend
assert mpo.dtype == dtype
np.testing.assert_allclose(mpo.bond_dimensions, [1, 5, 5, 1])
def test_base_mpo_raises():
backend = backend_factory.get_backend('numpy')
tensors = [
backend.randn((1, 5, 2, 2), dtype=np.float64),
backend.randn((5, 5, 2, 2), dtype=np.float64),
backend.randn((5, 1, 2, 2), dtype=np.float32)
]
with pytest.raises(TypeError):
BaseMPO(tensors=tensors, backend=backend)
mpo = BaseMPO(tensors=[], backend=backend)
mpo.tensors = tensors
with pytest.raises(TypeError):
mpo.dtype
def test_finite_mpo_raises(backend):
tensors = [np.random.rand(2, 5, 2, 2), np.random.rand(5, 1, 2, 2)]
with pytest.raises(ValueError):
FiniteMPO(tensors=tensors, backend=backend)
tensors = [np.random.rand(1, 5, 2, 2), np.random.rand(5, 2, 2, 2)]
with pytest.raises(ValueError):
FiniteMPO(tensors=tensors, backend=backend)
def test_infinite_mpo_raises(backend):
tensors = [np.random.rand(2, 5, 2, 2), np.random.rand(5, 3, 2, 2)]
with pytest.raises(ValueError):
InfiniteMPO(tensors=tensors, backend=backend)
def test_infinite_mpo_roll(backend):
tensors = [np.random.rand(5, 5, 2, 2), np.random.rand(5, 5, 2, 2)]
mpo = InfiniteMPO(tensors=tensors, backend=backend)
mpo.roll(1)
np.testing.assert_allclose(mpo.tensors[0], tensors[1])
np.testing.assert_allclose(mpo.tensors[1], tensors[0])
mpo.roll(1)
np.testing.assert_allclose(mpo.tensors[0], tensors[0])
np.testing.assert_allclose(mpo.tensors[1], tensors[1])
def test_len(backend):
tensors = [
np.random.rand(1, 5, 2, 2),
np.random.rand(5, 5, 2, 2),
np.random.rand(5, 1, 2, 2)
]
mpo = BaseMPO(tensors=tensors, backend=backend)
assert len(mpo) == 3
@pytest.mark.parametrize("N1, N2, D", [(2, 2, 4), (2, 4, 16), (4, 4, 128)])
def test_finiteFreeFermions2d(N1, N2, D):
def adjacency(N1, N2):
neighbors = {}
mat = np.arange(N1 * N2).reshape(N1, N2)
for n in range(N1 * N2):
x, y = np.divmod(n, N2)
if n not in neighbors:
neighbors[n] = []
if y < N2 - 1:
neighbors[n].append(mat[x, y + 1])
if x > 0:
neighbors[n].append(mat[x - 1, y])
return neighbors
adj = adjacency(N1, N2)
tij = np.zeros((N1 * N2, N1 * N2))
t = -1
v = -1
for n, d in adj.items():
for ind in d:
tij[n, ind] += t
tij[ind, n] += t
tij += np.diag(np.ones(N1 * N2) * v)
eta, _ = np.linalg.eigh(tij)
expected = min(np.cumsum(eta))
t1 = t
t2 = t
dtype = np.float64
mpo = FiniteFreeFermion2D(t1, t2, v, N1, N2, dtype)
mps = FiniteMPS.random([2] * N1 * N2, [D] * (N1 * N2 - 1), dtype=np.float64)
dmrg = FiniteDMRG(mps, mpo)
actual = dmrg.run_one_site()
np.testing.assert_allclose(actual, expected)
|
11563185
|
from typing import List, Tuple, Dict
from promise import Promise
from promise.dataloader import DataLoader
from gtmcore.environment.utils import get_package_manager
from gtmcore.environment.packagemanager import PackageMetadata
from gtmcore.labbook import LabBook
from gtmcore.logging import LMLogger
logger = LMLogger.get_logger()
class PackageDataloader(DataLoader):
"""Dataloader to manage running package latest version and metadata queries
The key for this dataloader is manager&package
"""
def __init__(self, keys: List[str], labbook: LabBook, username: str) -> None:
DataLoader.__init__(self)
self.keys = keys
self.results: Dict[str, PackageMetadata] = dict()
self.labbook = labbook
self.username = username
def populate_results(self) -> None:
# Repack key data
packages: Dict[str, List[Tuple[str, str, str]]] = {'conda2': list(),
'conda3': list(),
'pip': list(),
'apt': list()}
for key in self.keys:
manager, package = key.split('&')
packages[manager].append((manager, package, key))
for manager in packages.keys():
package_names = [x[1] for x in packages[manager]]
if package_names:
mgr = get_package_manager(manager)
try:
metadata = mgr.get_packages_metadata(package_names, labbook=self.labbook, username=self.username)
# save
for pkg_input, metadata in zip(packages[manager], metadata):
self.results[pkg_input[2]] = metadata
except ValueError as err:
logger.warning(f"An error occurred while looking up {manager} latest versions and metadata: {err}")
def get_data(self, key: str) -> PackageMetadata:
if not self.results:
self.populate_results()
return self.results[key]
def batch_load_fn(self, keys: List[str]):
"""Method to load labbook objects based on a list of unique keys
Args:
keys(list(str)): Unique key to identify the labbook
Returns:
"""
# Resolve objects
return Promise.resolve([self.get_data(key) for key in keys])
|
11563198
|
class Solution:
def minTransfers(self, transactions: List[List[int]]) -> int:
def remove_one_zero_clique(non_zero):
n = len(non_zero)
q = collections.deque()
# q store ([index set], sum of set)
q.append(([0], non_zero[0]))
min_zero_set = None
while q:
cur_set, cur_sum = q.popleft()
if cur_sum == 0:
min_zero_set = cur_set
break
for j in range(cur_set[-1] + 1, n):
q.append((cur_set + [j], cur_sum + non_zero[j]))
min_zero_set = set(min_zero_set)
return [non_zero[i] for i in range(n) if i not in min_zero_set]
bal = collections.defaultdict(int)
for t in transactions:
bal[t[0]] -= t[2]
bal[t[1]] += t[2]
non_zero = [bal[k] for k in bal if bal[k] != 0]
bal_cnt = len(non_zero)
while len(non_zero) > 0:
non_zero = remove_one_zero_clique(non_zero)
bal_cnt -= 1
return bal_cnt
|
11563313
|
import json
import logging
import pycurl
import re
import requests
import socket
import sys
import unittest
from StringIO import StringIO
class EtcdError(BaseException):
#Generic etcd error
pass
class Etcd(object):
def __init__(self, logger, server=None):
if server:
self.server = server
else:
self.server = socket.gethostname()
self.url = 'http://%(hostname)s:4001/v2/keys' % {
'hostname': self.server}
self.logger = logger
def set_key(self, key, value):
url = '%(base)s/%(key)s' % {
'base': self.url,
'key': key
}
data = 'value=%s' % value
self.logger.debug("Saving data: %(data)s to %(url)s" %{
'data': data,
'url': url
})
storage = StringIO()
curl = pycurl.Curl()
curl.setopt(curl.URL, url)
curl.setopt(curl.POSTFIELDS, data)
curl.setopt(pycurl.FOLLOWLOCATION, 1)
curl.setopt(pycurl.MAXREDIRS, 5)
curl.setopt(curl.WRITEFUNCTION, storage.write)
curl.setopt(pycurl.CUSTOMREQUEST, "PUT")
curl.perform()
response = curl.getinfo(pycurl.HTTP_CODE)
curl.close()
if response == requests.codes.ok:
return True
elif response == requests.codes.created:
return True
else:
self.logger.error("ETCD returned %(status)s %(text)s" % {
'status': response,
'text': storage.getvalue()})
return None
def get_key(self, key):
url = '%(base)s/%(key)s' % {
'base': self.url,
'key': key
}
self.logger.debug('Getting url: ' + url)
response = requests.get(url)
self.logger.debug('Response: ' + response.text)
res = json.loads(response.text)
if isinstance(res, list):
raise ValueError('Key "%s" is a directory, expecting leaf (use \
list_directory() to get directory listing).' % key)
#Check to see if Etcd returned an error
if 'errorCode' in res:
raise EtcdError(res['errorCode'], res['message'])
try:
return str(res['node']['value'])
except KeyError:
#Fallback on v1 functionality
return str(res['value'])
def delete_key(self, key):
url = '%(base)s/%(key)s' % {
'base': self.url,
'key': key
}
response = requests.delete(url)
if response.status_code == requests.codes.ok:
return response.text
else:
response.raise_for_status()
return None
def list_directory(self, path):
url = '%(base)s/%(path)s' % {
'base': self.url,
'path': path
}
response = requests.get(url)
if response.status_code == requests.codes.ok:
directory_list = []
json_txt = json.loads(response.text)
try:
for entry in json_txt['node']['nodes']:
directory_list.append(str(entry['key']))
return directory_list
except KeyError:
self.logger.error("Key ['node']['nodes'] not found in %(data)s" %{
'data': json_txt
})
else:
response.raise_for_status()
return None
def get_machines(self):
url = '%(base)s/_etcd/machines' % {
'base': self.url}
res = json.loads(requests.get(url).text)
#Check to see if Etcd returned an error
if 'errorCode' in res:
raise EtcdError(res['errorCode'], res['message'])
server_list = []
for entry in res:
server_list.append(str(entry['value']))
return server_list
class TestEtcd(unittest.TestCase):
def setUp(self):
logger = logging.getLogger()
stream = logging.StreamHandler(sys.stdout)
stream.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
stream.setFormatter(formatter)
logger.addHandler(stream)
self.etcd = Etcd(logger)
def test_a_setkey(self):
ret = self.etcd.set_key('message', 'Hello World')
self.assertTrue(ret)
def test_b_getkey(self):
self.etcd.set_key('message', 'Hello World')
text = self.etcd.get_key('message')
self.assertEqual(text, 'Hello World')
def test_c_deletekey(self):
#Set the key first before deleting it
self.etcd.set_key('message', 'Hello World')
text = self.etcd.delete_key('message')
regex = re.compile(r'{"action":"delete","node":{"key":"/message",'
'"modifiedIndex":\d+,"createdIndex":\d+},"prevNode":{"key":"/message"'
',"value":"Hello World","modifiedIndex":\d+,"createdIndex":\d+}}')
self.assertRegexpMatches(text, regex)
def test_d_directorylist(self):
#List a directory in Etcd
dir_list = self.etcd.list_directory('formations/cholcomb')
self.assertIsInstance(dir_list, list)
|
11563326
|
import unicodecsv
import matplotlib.pyplot as plt
import numpy
from collections import defaultdict
from scipy.stats import chisquare, ttest_ind
def n_utterances_counts(f_name, eou='__eou__'):
n_utterances = []
reader = unicodecsv.reader(open(f_name))
next(reader) # skip header
for line in reader:
n_utterances.append(line[0].count(eou))
return n_utterances
def train_stats(f_name, eou='__eou__', eot='__eot__'):
pos_utterances = []
pos_turns = []
pos_words = []
neg_utterances = []
neg_turns = []
neg_words = []
reader = unicodecsv.reader(open(f_name))
next(reader) # skip header
for line in reader:
if int(float(line[2])) == 1:
pos_utterances.append(line[0].count(eou))
pos_turns.append(line[0].count(eot))
pos_words.append(len(line[0].split()))
elif int(float(line[2])) == 0:
neg_utterances.append(line[0].count(eou))
neg_turns.append(line[0].count(eot))
neg_words.append(len(line[0].split()))
else:
print line[2]
return pos_utterances, pos_turns, pos_words, neg_utterances, neg_turns, neg_words
def normalize(data):
total = float(sum(data))
return data/total
def distribution(data, max_utt):
counts = defaultdict(int)
for d in data:
counts[d] += 1
total = float(len(data))
distr = numpy.zeros(max_utt)
for key, val in counts.iteritems():
distr[key] = val
return distr, normalize(distr)
def plot_histogram(data, title, x_label, y_label, **kwargs):
n, bins, patches = plt.hist(data, 500, facecolor='green', alpha=0.75, **kwargs)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.grid(True)
plt.show()
if __name__ == "__main__":
# load lists of number of utterances
train_n_uterrances = n_utterances_counts("/home/petrbel/ubuntu-ranking-dataset-creator/src/train.csv")
test_n_uterrances = n_utterances_counts("/home/petrbel/ubuntu-ranking-dataset-creator/src/test.csv")
valid_n_uterrances = n_utterances_counts("/home/petrbel/ubuntu-ranking-dataset-creator/src/valid.csv")
max_utt = max(max(train_n_uterrances), max(test_n_uterrances), max(valid_n_uterrances)) + 1
# train distribution
train_counts, train_distr = distribution(train_n_uterrances, max_utt=max_utt)
# test
expected_test_counts = train_distr * len(test_n_uterrances)
real_test_counts, test_distr = distribution(test_n_uterrances, max_utt=max_utt)
_, pvalue = chisquare(real_test_counts+1, expected_test_counts+1)
print("TestDataset: ChiSq pvalue={}".format(pvalue))
# valid
expected_valid_counts = train_distr * len(valid_n_uterrances)
real_valid_counts, valid_distr = distribution(valid_n_uterrances, max_utt=max_utt)
_, pvalue = chisquare(real_valid_counts+1, expected_valid_counts+1)
print("ValidDataset: ChiSq pvalue={}".format(pvalue))
# histograms
plot_histogram(train_n_uterrances, "Train Utterances", "Number of utterances", "Count")
plot_histogram(test_n_uterrances, "Test Utterances", "Number of utterances", "Count")
plot_histogram(valid_n_uterrances, "Valid Utterances", "Number of utterances", "Count")
# train stats
print("Train Min: {}".format(min(train_n_uterrances)))
print("Train Max: {}".format(max(train_n_uterrances)))
print("Train Mean: {}".format(numpy.mean(train_n_uterrances)))
print("Train Std: {}".format(numpy.std(train_n_uterrances)))
# test stats
print("Test Min: {}".format(min(test_n_uterrances)))
print("Test Max: {}".format(max(test_n_uterrances)))
print("Test Mean: {}".format(numpy.mean(test_n_uterrances)))
print("Test Std: {}".format(numpy.std(test_n_uterrances)))
# valid stats
print("Valid Min: {}".format(min(valid_n_uterrances)))
print("Valid Max: {}".format(max(valid_n_uterrances)))
print("Valid Mean: {}".format(numpy.mean(valid_n_uterrances)))
print("Valid Std: {}".format(numpy.std(valid_n_uterrances)))
# ttest of means
pvalue = ttest_ind(train_n_uterrances, test_n_uterrances, equal_var=False)
print("ttest: train-test, pvalue={}".format(pvalue))
pvalue = ttest_ind(train_n_uterrances, valid_n_uterrances, equal_var=False)
print("ttest: train-valid, pvalue={}".format(pvalue))
pos_utterances, pos_turns, pos_words, neg_utterances, neg_turns, neg_words = train_stats("/home/petrbel/ubuntu-ranking-dataset-creator/src/train.csv")
|
11563347
|
import pytest
from rest_framework import status
from usaspending_api.search.tests.data.utilities import setup_elasticsearch_test
url = "/api/v2/agency/{toptier_code}/sub_agency/count/{filter}"
@pytest.mark.django_db
def test_all_categories(client, monkeypatch, sub_agency_data_1, elasticsearch_transaction_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_transaction_index)
resp = client.get(url.format(toptier_code="001", filter="?fiscal_year=2021"))
expected_results = {
"toptier_code": "001",
"fiscal_year": 2021,
"sub_agency_count": 1,
"office_count": 2,
"messages": [],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_results
@pytest.mark.django_db
def test_alternate_year(client, monkeypatch, sub_agency_data_1, elasticsearch_transaction_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_transaction_index)
resp = client.get(url.format(toptier_code="001", filter="?fiscal_year=2020"))
expected_results = {
"toptier_code": "001",
"fiscal_year": 2020,
"sub_agency_count": 1,
"office_count": 1,
"messages": [],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_results
@pytest.mark.django_db
def test_alternate_agency(client, monkeypatch, sub_agency_data_1, elasticsearch_transaction_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_transaction_index)
resp = client.get(url.format(toptier_code="002", filter="?fiscal_year=2021"))
expected_results = {
"toptier_code": "002",
"fiscal_year": 2021,
"sub_agency_count": 1,
"office_count": 1,
"messages": [],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_results
@pytest.mark.django_db
def test_award_types(client, monkeypatch, sub_agency_data_1, elasticsearch_transaction_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_transaction_index)
resp = client.get(url.format(toptier_code="001", filter="?fiscal_year=2021&award_type_codes=[A]"))
assert resp.status_code == status.HTTP_200_OK
expected_results = {
"toptier_code": "001",
"fiscal_year": 2021,
"sub_agency_count": 1,
"office_count": 1,
"messages": [],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_results
@pytest.mark.django_db
def test_agency_types(client, monkeypatch, sub_agency_data_1, elasticsearch_transaction_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_transaction_index)
resp = client.get(url.format(toptier_code="001", filter="?fiscal_year=2021&agency_type=funding"))
assert resp.status_code == status.HTTP_200_OK
expected_results = {
"toptier_code": "001",
"fiscal_year": 2021,
"sub_agency_count": 1,
"office_count": 1,
"messages": [],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_results
@pytest.mark.django_db
def test_invalid_agency(client, monkeypatch, sub_agency_data_1, elasticsearch_account_index):
resp = client.get(url.format(toptier_code="XXX", filter="?fiscal_year=2021"))
assert resp.status_code == status.HTTP_404_NOT_FOUND
resp = client.get(url.format(toptier_code="999", filter="?fiscal_year=2021"))
assert resp.status_code == status.HTTP_404_NOT_FOUND
|
11563355
|
from builtins import property as _property, tuple as _tuple
from operator import itemgetter as _itemgetter
from collections import OrderedDict
class TIPO(tuple):
'TIPO(VALOR,)'
__slots__ = ()
_fields = ('VALOR',)
def __new__(_cls, VALOR,):
'Create new instance of TIPO(VALOR,)'
return _tuple.__new__(_cls, (VALOR,))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new TIPO object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != 1:
raise TypeError('Expected 1 arguments, got %d' % len(result))
return result
def _replace(_self, **kwds):
'Return a new TIPO object replacing specified fields with new values'
result = _self._make(map(kwds.pop, ('VALOR',), _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % list(kwds))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '(VALOR=%r)' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values.'
return OrderedDict(zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
VALOR = _property(_itemgetter(0), doc='Alias for field number 0')
|
11563367
|
import json
import os
import shutil
from multiprocessing import Pool
import pandas as pd
import covid_data_api
import covid_data_bed
import covid_data_briefing
import covid_data_dash
import covid_data_situation
import covid_data_testing
import covid_data_tweets
import covid_data_vac
from utils_pandas import add_data
from utils_pandas import export
from utils_pandas import import_csv
from utils_scraping import CHECK_NEWER
from utils_scraping import logger
from utils_scraping import USE_CACHE_DATA
from utils_scraping import web_files
from utils_thai import DISTRICT_RANGE
from utils_thai import get_fuzzy_provinces
from utils_thai import join_provinces
from utils_thai import today
def prov_to_districts(dfprov):
# Reduce down to health areas
dfprov_grouped = dfprov.groupby(["Date", "Health District Number"]).sum(min_count=1).reset_index()
dfprov_grouped = dfprov_grouped.pivot(index="Date", columns=['Health District Number'])
dfprov_grouped = dfprov_grouped.rename(columns=dict((i, f"Area {i}") for i in DISTRICT_RANGE))
# Can cause problems sum across all provinces. might be missing data.
# by_type = dfprov_grouped.groupby(level=0, axis=1).sum(min_count=1)
# Collapse columns to "Cases Proactive Area 13" etc
dfprov_grouped.columns = dfprov_grouped.columns.map(' '.join).str.strip()
by_area = dfprov_grouped # .combine_first(by_type)
# Ensure we have all areas
for i in DISTRICT_RANGE:
col = f"Cases Walkin Area {i}"
if col not in by_area:
by_area[col] = by_area.get(col, pd.Series(index=by_area.index, name=col))
col = f"Cases Proactive Area {i}"
if col not in by_area:
by_area[col] = by_area.get(col, pd.Series(index=by_area.index, name=col))
return by_area
################################
# Misc
################################
def get_ifr():
# replace with https://stat.bora.dopa.go.th/new_stat/webPage/statByAgeMonth.php
url = "http://statbbi.nso.go.th/staticreport/Page/sector/EN/report/sector_01_11101_EN_.xlsx"
file, _, _ = next(web_files(url, dir="inputs/json", check=False))
pop = pd.read_excel(file, header=3, index_col=1)
def year_cols(start, end):
return [f"{i} year" for i in range(start, end)]
pop['At 0'] = pop[year_cols(1, 10) + ["under 1"]].sum(axis=1)
pop["At 10"] = pop[year_cols(10, 25)].sum(axis=1)
pop["At 25"] = pop[year_cols(25, 46) + ["47 year"] + year_cols(47, 54)].sum(axis=1)
pop["At 55"] = pop[year_cols(55, 65)].sum(axis=1)
pop["At 65"] = pop[year_cols(65, 73) + ["74 year", "74 year"]].sum(axis=1)
pop["At 75"] = pop[year_cols(75, 85)].sum(axis=1)
pop["At 85"] = pop[year_cols(85, 101) + ["101 and over"]].sum(axis=1)
# from http://epimonitor.net/Covid-IFR-Analysis.htm. Not sure why pd.read_html doesn't work in this case.
ifr = pd.DataFrame([[.002, .002, .01, .04, 1.4, 4.6, 15]],
columns=["At 0", "At 10", "At 25",
"At 55", "At 65", "At 75", "At 85"],
).transpose().rename(columns={0: "risk"})
pop = pop[ifr.index]
pop = pop.reset_index().dropna().set_index("Province").transpose()
unpop = pop.reset_index().melt(
id_vars=['index'],
var_name='Province',
value_name='Population'
).rename(columns=dict(index="Age"))
total_pop = unpop.groupby("Province").sum().rename(
columns=dict(Population="total_pop"))
unpop = unpop.join(total_pop, on="Province").join(ifr["risk"], on="Age")
unpop['ifr'] = unpop['Population'] / unpop['total_pop'] * unpop['risk']
provifr = unpop.groupby("Province").sum()
provifr = provifr.drop([p for p in provifr.index if "Region" in p] + ['Whole Kingdom'])
# now normalise the province names
provifr = join_provinces(provifr, "Province")
return provifr
def get_hospital_resources():
logger.info("========ArcGIS==========")
# PUI + confirmed, recovered etc stats
fields = [
'OBJECTID', 'ID', 'agency_code', 'label', 'agency_status', 'status',
'address', 'province', 'amphoe', 'tambol', 'latitude', 'longitude',
'level_performance', 'ministryname', 'depart', 'ShareRoom_Total',
'ShareRoom_Available', 'ShareRoom_Used', 'Private_AIIR_Total',
'Private_AIIR_Available', 'Private_AIIR_Used',
'Private_Modified_AIIR_Total', 'Private_Modified_AIIR_Available',
'Private_Modified_AIIR_Used', 'Private_Isolation_room_Total',
'Private_Isolation_room_Availabl', 'Private_Isolation_room_Used',
'Private_Cohort_ward_Total', 'Private_Cohort_ward_Available',
'Private_Cohort_ward_Used', 'Private_High_Flow_Total',
'Private_High_Flow_Available', 'Private_High_Flow_Used',
'Private_OR_negative_pressure_To', 'Private_OR_negative_pressure_Av',
'Private_OR_negative_pressure_Us', 'Private_ICU_Total',
'Private_ICU_Available', 'Private_ICU_Used',
'Private_ARI_clinic_Total', 'Private_ARI_clinic_Available',
'Private_ARI_clinic_Used', 'Volume_control_Total',
'Volume_control_Available', 'Volume_control_Used',
'Pressure_control_Total', 'Pressure_control_Available',
'Pressure_control_Used', 'Volumecontrol_Child_Total',
'Volumecontrol_Child_Available', 'Volumecontrol_Child_Used',
'Ambulance_Total', 'Ambulance_Availble', 'Ambulance_Used',
'Pills_Favipiravir_Total', 'Pills_Favipiravir_Available',
'Pills_Favipiravir_Used', 'Pills_Oseltamivir_Total',
'Pills_Oseltamivir_Available', 'Pills_Oseltamivir_Used',
'Pills_ChloroquinePhosphate_Tota', 'Pills_ChloroquinePhosphate_Avai',
'Pills_ChloroquinePhosphate_Used', 'Pills_LopinavirRitonavir_Total',
'Pills_LopinavirRitonavir_Availa', 'Pills_LopinavirRitonavir_Used',
'Pills_Darunavir_Total', 'Pills_Darunavir_Available',
'Pills_Darunavir_Used', 'Lab_PCRTest_Total', 'Lab_PCRTest_Available',
'Lab_PCRTest_Used', 'Lab_RapidTest_Total', 'Lab_RapidTest_Available',
'Lab_RapidTest_Used', 'Face_shield_Total', 'Face_shield_Available',
'Face_shield_Used', 'Cover_all_Total', 'Cover_all_Available',
'Cover_all_Used', 'ถุงมือไนไตรล์ชนิดใช้', 'ถุงมือไนไตรล์ชนิดใช้_1',
'ถุงมือไนไตรล์ชนิดใช้_2', 'ถุงมือไนไตรล์ชนิดใช้_3',
'ถุงมือไนไตรล์ชนิดใช้_4', 'ถุงมือไนไตรล์ชนิดใช้_5',
'ถุงมือยางชนิดใช้แล้ว', 'ถุงมือยางชนิดใช้แล้ว_1',
'ถุงมือยางชนิดใช้แล้ว_2', 'ถุงสวมขา_Leg_cover_Total',
'ถุงสวมขา_Leg_cover_Available', 'ถุงสวมขา_Leg_cover_Used',
'พลาสติกหุ้มคอ_HOOD_Total', 'พลาสติกหุ้มคอ_HOOD_Available',
'พลาสติกหุ้มคอ_HOOD_Used', 'พลาสติกหุ้มรองเท้า_Total',
'พลาสติกหุ้มรองเท้า_Availab', 'พลาสติกหุ้มรองเท้า_Used',
'แว่นครอบตาแบบใส_Goggles_Total', 'แว่นครอบตาแบบใส_Goggles_Availab',
'แว่นครอบตาแบบใส_Goggles_Used', 'เสื้อกาวน์ชนิดกันน้ำ_T',
'เสื้อกาวน์ชนิดกันน้ำ_A', 'เสื้อกาวน์ชนิดกันน้ำ_U',
'หมวกคลุมผมชนิดใช้แล้', 'หมวกคลุมผมชนิดใช้แล้_1',
'หมวกคลุมผมชนิดใช้แล้_2', 'เอี๊ยมพลาสติกใส_Apron_Total',
'เอี๊ยมพลาสติกใส_Apron_Available', 'เอี๊ยมพลาสติกใส_Apron_Used',
'UTM_Total', 'UTM_Available', 'UTM_Used', 'VTM_Total', 'VTM_Available',
'VTM_Used', 'Throat_Swab_Total', 'Throat_Swab_Available',
'Throat_Swab_Used', 'NS_Swab_Total', 'NS_Swab_Available',
'NS_Swab_Used', 'Surgicalmask_Total', 'Surgicalmask_Available',
'Surgicalmask_Used', 'N95_Total', 'N95_Available', 'N95_Used',
'Dr_ChestMedicine_Total', 'Dr_ChestMedicine_Available',
'Dr_ChestMedicine_Used', 'Dr_ID_Medicine_Total',
'Dr_ID_Medicine_Availble', 'Dr_ID_Medicine_Used', 'Dr_Medical_Total',
'Dr_Medical_Available', 'Dr_Medical_Used', 'Nurse_ICN_Total',
'Nurse_ICN_Available', 'Nurse_ICN_Used', 'Nurse_RN_Total',
'Nurse_RN_Available', 'Nurse_RN_Used', 'Pharmacist_Total',
'Pharmacist_Available', 'Pharmacist_Used', 'MedTechnologist_Total',
'MedTechnologist_Available', 'MedTechnologist_Used', 'Screen_POE',
'Screen_Walk_in', 'PUI', 'Confirm_mild', 'Confirm_moderate',
'Confirm_severe', 'Confirm_Recovered', 'Confirm_Death', 'GlobalID',
'region_health', 'CoverAll_capacity', 'ICU_Covid_capacity',
'N95_capacity', 'AIIR_room_capacity', 'CoverAll_status',
'Asymptomatic', 'ICUforCovidTotal', 'ICUforCovidAvailable',
'ICUforCovidUsed'
]
# pui = "https://services8.arcgis.com/241MQ9HtPclWYOzM/arcgis/rest/services/Corona_Date/FeatureServer/0/query?f=json&where=1%3D1&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&orderByFields=Date%20asc&resultOffset=0&resultRecordCount=32000&resultType=standard&cacheHint=true" # noqa: E501
# icu = "https://services8.arcgis.com/241MQ9HtPclWYOzM/arcgis/rest/services/Hospital_Data_Dashboard/FeatureServer/0/query?f=json&where=1%3D1&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&outStatistics=%5B%7B%22statisticType%22%3A%22sum%22%2C%22onStatisticField%22%3A%22Private_ICU_Total%22%2C%22outStatisticFieldName%22%3A%22value%22%7D%5D&resultType=standard&cacheHint=true" # noqa: E501
rows = []
for page in range(0, 2000, 1000):
every_district = f"https://services8.arcgis.com/241MQ9HtPclWYOzM/arcgis/rest/services/Hospital_Data_Dashboard/FeatureServer/0/query?f=json&where=1%3D1&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&resultOffset={page}&resultRecordCount=1000&cacheHint=true" # noqa: E501
file, content, _ = next(web_files(every_district, dir="inputs/json", check=True))
jcontent = json.loads(content)
rows.extend([x['attributes'] for x in jcontent['features']])
data = pd.DataFrame(rows).groupby("province").sum()
data['Date'] = today().date()
data['Date'] = pd.to_datetime(data['Date'])
data = data.reset_index().set_index(["Date", "province"])
old = import_csv("hospital_resources")
if old is not None:
old = old.set_index(["Date", "province"])
# TODO: seems to be dropping old data. Need to test
data = add_data(old, data)
export(data, "hospital_resources", csv_only=True)
return data
# TODO: Additional data sources
# - new moph apis
# - https://covid19.ddc.moph.go.th/
# - medical supplies (tableau)
# - https://public.tableau.com/app/profile/karon5500/viz/moph_covid_v3/Story1
# - is it accurate?
# - no timeseries
# - vaccine imports (unofficial) (getting out of date?)
# - https://docs.google.com/spreadsheets/u/1/d/1BaCh5Tbm1EXwh4SeRM9dv-yemK2J5RpO-dz28UVtX3s/htmlview?fbclid=IwAR36L3itMKFv6fq7q-7_CF4WpxtI-QGQAcJ1f62BLen6N6IHc1iq-u-wWNI/export?gid=0&format=csv # noqa
# - vaccine dashboard (power BI)
# - https://dashboard-vaccine.moph.go.th/dashboard.html
# - groups, ages, manuf per prov. ages per group all thailand
# - no timeseries
# - Vaccine total numbers in at risk groups
# - https://hdcservice.moph.go.th/hdc/main/index.php
# - vaccine slides
# - has complications list but in graphic
# - briefings
# - clusters per day
# - nationality of deaths
# - time to death?
# - deaths at home
# - test reports
# - top labs over time
# Public transport usage to determine mobility?
# - https://datagov.mot.go.th/dataset/covid-19/resource/71a552d0-0fea-4e05-b78c-42d58aa88db6
# - doesn't have pre 2020 dailies though
# health district 8 data - https://r8way.moph.go.th/r8way/covid-19
def scrape_and_combine():
os.makedirs("api", exist_ok=True)
quick = USE_CACHE_DATA and os.path.exists(os.path.join('api', 'combined.csv'))
MAX_DAYS = int(os.environ.get("MAX_DAYS", 1 if USE_CACHE_DATA else 0))
logger.info('\n\nUSE_CACHE_DATA = {}\nCHECK_NEWER = {}\nMAX_DAYS = {}\n\n', quick, CHECK_NEWER, MAX_DAYS)
# TODO: replace with cli --data=situation,briefings --start=2021-06-01 --end=2021-07-01
# "--data=" to plot only
if USE_CACHE_DATA and MAX_DAYS == 0:
old = import_csv("combined")
old = old.set_index("Date")
return old
with Pool(1 if MAX_DAYS > 0 else None) as pool:
dash_daily = pool.apply_async(covid_data_dash.dash_daily)
# These 3 are slowest so should go first
dash_by_province = pool.apply_async(covid_data_dash.dash_by_province)
# This doesn't add any more info since severe cases was a mistake
# dash_trends_prov = pool.apply_async(covid_data_dash.dash_trends_prov)
vac_slides = pool.apply_async(covid_data_vac.vac_slides)
vac_reports_and_prov = pool.apply_async(covid_data_vac.vaccination_reports)
beds = pool.apply_async(covid_data_bed.get_df)
# TODO: split vac slides as that's the slowest
briefings_prov__cases_briefings = pool.apply_async(covid_data_briefing.get_cases_by_prov_briefings)
dash_ages = pool.apply_async(covid_data_dash.dash_ages)
# today_situation = pool.apply_async(covid_data_situation.get_situation_today)
th_situation = pool.apply_async(covid_data_situation.get_thai_situation)
en_situation = pool.apply_async(covid_data_situation.get_en_situation)
cases_demo__risks_prov = pool.apply_async(covid_data_api.get_cases_by_demographics_api)
tweets_prov__twcases = pool.apply_async(covid_data_tweets.get_cases_by_prov_tweets)
timelineapi = pool.apply_async(covid_data_api.get_cases)
tests = pool.apply_async(covid_data_testing.get_tests_by_day)
tests_reports = pool.apply_async(covid_data_testing.get_test_reports)
xcess_deaths = pool.apply_async(covid_data_api.excess_deaths)
case_api_by_area = pool.apply_async(covid_data_api.get_cases_by_area_api) # can be very wrong for the last days
ihme_dataset = pool.apply_async(covid_data_api.ihme_dataset)
# Now block getting until we get each of the data
# today_situation = today_situation.get()
th_situation = th_situation.get()
en_situation = en_situation.get()
dash_daily = dash_daily.get()
dash_ages = dash_ages.get()
dash_by_province = dash_by_province.get()
# dash_trends_prov = dash_trends_prov.get()
vac_reports, vac_reports_prov = vac_reports_and_prov.get()
vac_slides = vac_slides.get()
ihme_dataset = ihme_dataset.get()
briefings_prov, cases_briefings = briefings_prov__cases_briefings.get()
cases_demo, risks_prov = cases_demo__risks_prov.get()
tweets_prov, twcases = tweets_prov__twcases.get()
timelineapi = timelineapi.get()
tests = tests.get()
tests_reports = tests_reports.get()
xcess_deaths.get()
case_api_by_area = case_api_by_area.get() # can be very wrong for the last days
beds = beds.get()
# Combine dashboard data
# dash_by_province = dash_trends_prov.combine_first(dash_by_province)
export(dash_by_province, "moph_dashboard_prov", csv_only=True, dir="inputs/json")
# "json" for caching, api so it's downloadable
shutil.copy(os.path.join("inputs", "json", "moph_dashboard_prov.csv"), "api")
shutil.copy(os.path.join("inputs", "json", "moph_dashboard.csv"), "api")
shutil.copy(os.path.join("inputs", "json", "moph_dashboard_ages.csv"), "api")
shutil.copy(os.path.join("inputs", "json", "moph_bed.csv"), "api")
# Export briefings
briefings = import_csv("cases_briefings", ["Date"], not USE_CACHE_DATA)
briefings = briefings.combine_first(cases_briefings).combine_first(twcases)
export(briefings, "cases_briefings")
# Export per province
dfprov = import_csv("cases_by_province", ["Date", "Province"], not USE_CACHE_DATA)
dfprov = dfprov.combine_first(
briefings_prov).combine_first(
dash_by_province).combine_first(
tweets_prov).combine_first(
risks_prov) # TODO: check they agree
dfprov = join_provinces(dfprov, on="Province")
if "Hospitalized Severe" in dfprov.columns:
# Made a mistake. This is really Cases Proactive
dfprov["Cases Proactve"] = dfprov["Hospitalized Severe"]
dfprov = dfprov.drop(columns=["Hospitalized Severe"])
export(dfprov, "cases_by_province")
# Export per district (except tests which are dodgy?)
by_area = prov_to_districts(dfprov[[c for c in dfprov.columns if "Tests" not in c]])
cases_by_area = import_csv("cases_by_area", ["Date"], not USE_CACHE_DATA)
cases_by_area = cases_by_area.combine_first(by_area).combine_first(case_api_by_area)
export(cases_by_area, "cases_by_area")
# Export IHME dataset
export(ihme_dataset, "ihme")
# Export situation
situation = covid_data_situation.export_situation(th_situation, en_situation)
vac = covid_data_vac.export_vaccinations(vac_reports, vac_reports_prov, vac_slides)
logger.info("========Combine all data sources==========")
df = pd.DataFrame(columns=["Date"]).set_index("Date")
for f in [tests_reports, tests, timelineapi, cases_briefings, twcases, cases_demo, cases_by_area, situation, vac, dash_ages, dash_daily]:
df = df.combine_first(f)
logger.info(df)
if quick:
old = import_csv("combined", index=["Date"])
df = df.combine_first(old)
return df
else:
export(df, "combined", csv_only=True)
export(get_fuzzy_provinces(), "fuzzy_provinces", csv_only=True)
return df
if __name__ == "__main__":
# does exports
scrape_and_combine()
|
11563373
|
from django.conf.urls import url
from . import index,yb
urlpatterns = [
url(r'^$', index.index),
url(r'^login$', yb.login),
url(r'^is_login$', yb.is_login),
url(r'^rush_yb$', yb.rush_yb),
url(r'^captcha$', yb.captcha),
url(r'^wangxin_jingyan$', yb.wangxin_jingyan),
]
|
11563382
|
routes = [
('Binance Futures', 'SKL-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
('Binance Futures', 'RUNE-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
('Binance Futures', 'AVAX-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
('Binance Futures', 'ZEN-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'), # cd
('Binance Futures', 'ONE-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'), # Candle data
('Binance Futures', 'ETH-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
('Binance Futures', 'ONT-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'), # cd
('Binance Futures', 'STORJ-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'), # cd
('Binance Futures', 'AKRO-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'), # cd
#
('Binance Futures', 'BAT-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
('Binance Futures', 'ZRX-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'), # cd
('Binance Futures', 'SC-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'), # cd
('Binance Futures', 'WAVES-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
('Binance Futures', 'NEO-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
('Binance Futures', 'BNB-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
('Binance Futures', 'XTZ-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
# ('Binance Futures', 'XLM-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
# ('Binance Futures', 'XEM-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
# ('Binance Futures', '1INCH-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
# ('Binance Futures', 'MANA-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
# ('Binance Futures', 'ANKR-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
# ('Binance Futures', 'FIL-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
# ('Binance Futures', 'RLC-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
# ('Binance Futures', 'ENJ-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
# ('Binance Futures', 'IOTA-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
# ('Binance Futures', 'ALGO-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
# ('Binance Futures', 'STMX-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
# ('Binance Futures', 'BCH-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
# ('Binance Futures', 'VET-USDT', '5m', 'Ott2butKAMA', 'WL1T,O'),
]
extra_candles = []
|
11563411
|
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
setup(
name='arlpy',
version='1.7.0',
description='ARL Python Tools',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/org-arl/arlpy',
license='BSD (3-clause)',
keywords='underwater acoustics signal processing communication',
packages=find_packages(exclude=('tests', 'docs')),
install_requires=[
'numpy>=1.18.1',
'scipy>=1.4.1',
'utm>=0.5.0',
'pandas>=1.0.1',
'bokeh>=1.4.0'
]
)
|
11563442
|
import ftplib
import gzip
import os.path
import requests
import responses
import shutil
import unittest
import urllib3
from contextlib import closing
from urllib.request import urlopen
from pkt_kg.utils import *
class TestDataUtilsDownloading(unittest.TestCase):
"""Class to test the downloading methods from the data utility script."""
def setUp(self):
# create temporary directory to store data for testing
current_directory = os.path.dirname(__file__)
dir_loc = os.path.join(current_directory, 'data/temp')
self.dir_loc = os.path.abspath(dir_loc)
os.mkdir(self.dir_loc)
# create fake zipped data
empty_zip_data = b'1F 8B 08 00 00 00 00 00 00 0B'
with open(self.dir_loc + '/variant_summary.txt.gz', 'wb') as zp:
zp.write(empty_zip_data)
content = b'Lots of content here'
with gzip.open(self.dir_loc + '/variant_summary.txt.gz', 'wb') as f:
f.write(content)
# create some fake ftp data
with open(self.dir_loc + '/hgnc_complete_set.txt', 'w') as file:
file.write('None')
# set some urls
self.url = 'https://proconsortium.org/download/current/promapping.txt'
self.ftp_url = 'http://ftp.ebi.ac.uk/pub/databases/genenames/hgnc/tsv/hgnc_complete_set.txt'
self.gzipped_ftp_url = 'ftp://ftp.ncbi.nlm.nih.gov/pub/clinvar/tab_delimited/variant_summary.txt.gz'
self.zipped_url = 'https://reactome.org/download/current/ReactomePathways.gmt.zip'
self.gzipped_url = 'https://www.disgenet.org/static/disgenet_ap1/files/downloads/disease_mappings.tsv.gz'
# set write location
self.write_location = self.dir_loc + '/'
return None
@responses.activate
def test_url_download_200(self):
"""Tests url_download method when returning a 200 status."""
# filename
filename = self.url.split('/')[-1]
# fake file connection
responses.add(
responses.GET,
self.url,
body='test',
status=200,
content_type='text/plain',
headers={'Content-Length': '1200'}
)
# test mocked download
r = requests.get(self.url, allow_redirects=True)
self.assertTrue(r.ok)
# test writing data
downloaded_data = open(self.write_location + '{filename}'.format(filename=filename), 'wb')
downloaded_data.write(r.content)
downloaded_data.close()
self.assertTrue(os.path.exists(self.write_location + filename))
return None
@responses.activate
def test_url_download_404(self):
"""Tests url_download method when not returning a 200 status."""
# fake file connection
responses.add(
responses.GET,
self.url,
body='test',
status=400,
content_type='text/plain',
headers={'Content-Length': '1200'}
)
# test mocked download
r = requests.get(self.url, allow_redirects=True)
self.assertFalse(r.ok)
return None
@responses.activate
def test_ftp_url_download(self):
"""Tests ftp_url_download method."""
# filename
filename = self.ftp_url.split('/')[-1]
# fake file connection
responses.add(
responses.GET,
self.ftp_url,
body='test',
status=200,
content_type='text/plain',
headers={'Content-Length': '1200'}
)
# test mocked download
with closing(urlopen(self.ftp_url)) as r:
with open(self.write_location + '{filename}'.format(filename=filename), 'wb') as f:
shutil.copyfileobj(r, f)
r.close()
# test mocked download
self.assertTrue(os.path.exists(self.write_location + filename))
return None
@responses.activate
def test_gzipped_ftp_url_download(self):
"""Tests gzipped_ftp_url_download method."""
# get ftp server info
file = self.gzipped_ftp_url.replace('ftp://', '').split('/')[-1]
write_loc = self.write_location + '{filename}'.format(filename=file)
# read in gzipped file,uncompress, and write to directory
with gzip.open(write_loc, 'rb') as fid_in:
with open(write_loc.replace('.gz', ''), 'wb') as f:
f.write(fid_in.read())
fid_in.close()
# change filename and remove gzipped and original files
os.remove(write_loc)
# test mocked download
self.assertFalse(os.path.exists(write_loc))
self.assertTrue(os.path.exists(write_loc[:-3]))
return None
@responses.activate
def test_zipped_url_download_200(self):
"""Tests zipped_url_download method when returning a 200 status."""
# filename
filename = self.zipped_url.split('/')[-1]
# fake file connection
responses.add(
responses.GET,
self.zipped_url,
body='test',
status=200,
content_type='text/plain',
headers={'Content-Length': '1200'}
)
# test mocked download
r = requests.get(self.zipped_url, allow_redirects=True)
self.assertTrue(r.ok)
# test writing data
downloaded_data = open(self.write_location + '{filename}'.format(filename=filename[:-4]), 'wb')
downloaded_data.write(r.content)
downloaded_data.close()
self.assertFalse(os.path.exists(self.write_location + filename))
self.assertTrue(os.path.exists(self.write_location + filename[:-4]))
return None
@responses.activate
def test_zipped_url_download_400(self):
"""Tests zipped_url_download method when not returning a 200 status."""
# fake file connection
responses.add(
responses.GET,
self.zipped_url,
body='test',
status=400,
content_type='text/plain',
headers={'Content-Length': '1200'}
)
# test mocked download
r = requests.get(self.zipped_url, allow_redirects=True)
self.assertFalse(r.ok)
return None
@responses.activate
def test_gzipped_url_download_200(self):
"""Tests gzipped_url_download method when returning a 200 status."""
# filename
filename = self.gzipped_url.split('/')[-1]
# fake file connection
responses.add(
responses.GET,
self.gzipped_url,
body=gzip.compress(b'test data'),
status=200,
content_type='gzip',
headers={'Content-Length': '1200'}
)
# test mocked download
r = requests.get(self.gzipped_url, allow_redirects=True)
self.assertTrue(r.ok)
# test writing data
with open(self.write_location + '{filename}'.format(filename=filename[:-3]), 'wb') as outfile:
outfile.write(gzip.decompress(r.content))
outfile.close()
self.assertFalse(os.path.exists(self.write_location + filename))
self.assertTrue(os.path.exists(self.write_location + filename[:-3]))
return None
@responses.activate
def test_gzipped_url_download_400(self):
"""Tests gzipped_url_download method when not returning 200 status."""
# fake file connection
responses.add(
responses.GET,
self.gzipped_url,
body=gzip.compress(b'test data'),
status=400,
content_type='gzip',
headers={'Content-Length': '1200'}
)
# test mocked download
r = requests.get(self.gzipped_url, allow_redirects=True)
self.assertFalse(r.ok)
return None
def test_data_downloader(self):
"""Tests data_downloader method."""
# url data
data_downloader(self.url, self.write_location)
self.assertTrue(os.path.exists(self.write_location + self.url.split('/')[-1]))
# ftp url data
data_downloader(self.ftp_url, self.write_location)
self.assertTrue(os.path.exists(self.write_location + self.ftp_url.split('/')[-1]))
# # gzipped ftp url data
# file = self.gzipped_ftp_url.replace('ftp://', '').split('/')[-1]
# write_loc = self.write_location + '{filename}'.format(filename=file)
# data_downloader(self.gzipped_ftp_url, self.write_location)
# self.assertTrue(os.path.exists(os.path.exists(write_loc[:-3])))
#
# # zipped data
# data_downloader(self.zipped_url, self.write_location)
# self.assertTrue(os.path.exists(self.write_location + self.zipped_url.split('/')[-1][:-4]))
#
# # gzipped data
# data_downloader(self.gzipped_url, self.write_location)
# self.assertTrue(os.path.exists(self.write_location + self.gzipped_url.split('/')[-1][:-3]))
return None
def tearDown(self):
# remove temp directory
shutil.rmtree(self.dir_loc)
return None
|
11563470
|
from sqlalchemy import Column, Integer, String, Boolean
from Agent import Base, Session
class APIVersion(Base):
"""description of class"""
__tablename__ = 'vw_apiversions'
Id = Column(Integer, primary_key = True)
LunaAPIId = Column(Integer)
VersionName = Column(String)
AMLWorkspaceId = Column(Integer)
AzureDatabricksWorkspaceId = Column(Integer)
AzureSynapseWorkspaceId = Column(Integer)
GitRepoId = Column(Integer)
ModelDisplayName = Column(String)
ModelName = Column(String)
ModelVersion = Column(Integer)
EndpointName = Column(String)
EndpointVersion = Column(String)
IsManualInputEndpoint = Column(Boolean)
EndpointUrl = Column(String)
EndpointSwaggerUrl = Column(String)
EndpointAuthType = Column(String)
EndpointAuthKey = Column(String)
EndpointAuthAddTo = Column(String)
EndpointAuthSecretName = Column(String)
EndpointAuthTenantId = Column(String)
EndpointAuthClientId = Column(String)
GitVersion = Column(String)
LinkedServiceType = Column(String)
RunConfigFile = Column(String)
IsUseDefaultRunConfig = Column(Boolean)
IsRunProjectOnManagedCompute = Column(Boolean)
LinkedServiceComputeTarget = Column(String)
AdvancedSettings = Column(String)
CreatedTime = Column(String)
LastUpdatedTime = Column(String)
ApplicationName = Column(String)
APIName = Column(String)
APIType = Column(String)
@staticmethod
def Get(applicationName, apiName, versionName):
session = Session()
version = session.query(APIVersion).filter_by(ApplicationName = applicationName, APIName = apiName, VersionName = versionName).first()
session.close()
return version
|
11563491
|
from UQpy.Surrogates.PCE.ErrorEstimation import ErrorEstimation
from UQpy.Surrogates.PCE.MomentEstimation import MomentEstimation
from UQpy.Surrogates.PCE.PCE import PCE
from UQpy.Surrogates.PCE.Polynomials import Polynomials
from UQpy.Surrogates.PCE.PolyChaosLasso import PolyChaosLasso
from UQpy.Surrogates.PCE.PolyChaosLstsq import PolyChaosLstsq
from UQpy.Surrogates.PCE.PolyChaosRidge import PolyChaosRidge
from UQpy.Surrogates.PCE.polynomials import *
|
11563497
|
from openprocurement.tender.core.procedure.utils import is_item_owner
from openprocurement.api.utils import raise_operation_error
from openprocurement.api.validation import OPERATIONS
def validate_download_bid_document(request, **_):
if request.params.get("download"):
document = request.validated["document"]
if (
document.get("confidentiality", "") == "buyerOnly"
and request.authenticated_role not in ("aboveThresholdReviewers", "sas")
and not is_item_owner(request, request.validated["bid"])
and not is_item_owner(request, request.validated["tender"])
):
raise_operation_error(request, "Document download forbidden.")
def validate_bid_document_in_tender_status(request, **_):
"""
active.tendering - tendering docs
active.awarded - qualification docs that should be posted into award (another temp solution)
"""
status = request.validated["tender"]["status"]
if status not in (
"active.tendering",
"active.qualification", # multi-lot procedure may be in this status despite of the active award
"active.awarded",
):
operation = OPERATIONS.get(request.method)
raise_operation_error(
request,
"Can't {} document in current ({}) tender status".format(operation, status)
)
def validate_bid_document_operation_in_award_status(request, **_):
if request.validated["tender"]["status"] in ("active.qualification", "active.awarded") and not any(
award["status"] == "active" and award["bid_id"] == request.validated["bid"]["id"]
for award in request.validated["tender"].get("awards", "")
):
raise_operation_error(
request,
"Can't {} document because award of bid is not active".format(
OPERATIONS.get(request.method)
),
)
def validate_update_bid_document_confidentiality(request, **_):
tender_status = request.validated["tender"]["status"]
if tender_status != "active.tendering" and "confidentiality" in request.validated.get("data", {}):
document = request.validated["document"]
if document.get("confidentiality", "public") != request.validated["data"]["confidentiality"]:
raise_operation_error(
request,
"Can't update document confidentiality in current ({}) tender status".format(tender_status),
)
# award document
def validate_accepted_complaints(request, **kwargs):
award_lot = request.validated["award"].get("lotID")
if any(
any(c.get("status") == "accepted" for c in i.get("complaints", ""))
for i in request.validated["tender"].get("awards", "")
if i.get("lotID") == award_lot
):
raise_operation_error(
request,
f"Can't {OPERATIONS.get(request.method)} document with accepted complaint",
)
|
11563501
|
import re
from ztag.annotation import Annotation
from ztag.annotation import OperatingSystem
from ztag import protocols
import ztag.test
class FtpZxfs(Annotation):
protocol = protocols.FTP
subprotocol = protocols.FTP.BANNER
port = None
version_re = re.compile("^220-ZXFS Ftp Server v(\d+\.\d+)", re.IGNORECASE)
revision_re = re.compile("\((build \d+)\)", re.IGNORECASE)
tests = {
"FtpZxfs_1": {
"local_metadata": {
"product": "ZXFS",
"version": "1.0"
}
},
"FtpZxfs_2": {
"local_metadata": {
"product": "ZXFS",
"version": "1.0",
"revision": "build 1027"
}
},
}
def process(self, obj, meta):
banner = obj["banner"]
if banner.startswith("220-ZXFS Ftp Server v"):
meta.local_metadata.product = "ZXFS"
version = self.version_re.search(banner).group(1)
meta.local_metadata.version = version
rev = self.revision_re.search(banner)
if rev:
meta.local_metadata.revision = rev.group(1)
return meta
""" Tests
"220-ZXFS Ftp Server v1.0, Service ready for new user.\r\n 1 user online, your ip is 192.168.3.11:37454\r\n220 this server supports SIZE, resume broken downloads\r\n"
"220-ZXFS Ftp Server v1.0, Service ready for new user.\r\n 1 user online, your ip is 192.168.3.11:51903\r\n220 this server supports SIZE, resume broken downloads\r\n"
"220-ZXFS Ftp Server v1.0, Service ready for new user.\r\n 1 user online, your ip is 192.168.3.11:31863\r\n220 this server supports SIZE, resume broken downloads\r\n"
"220-ZXFS Ftp Server v1.0, Service ready for new user.\r\n 1 user online, your ip is 192.168.3.11:41810\r\n220 this server supports SIZE, resume broken downloads\r\n"
"220-ZXFS Ftp Server v1.0(build 1027), Service ready for new user.\r\n 1 user online, your ip is 192.168.3.11:32338\r\n220 this server supports SIZE, resume broken downloads\r\n"
"""
|
11563506
|
import pytest
from aiohttp_admin2.mappers import Mapper
from aiohttp_admin2.mappers import fields
class StringMapper(Mapper):
field = fields.StringField()
class LongStringFieldMapper(Mapper):
field = fields.LongStringField()
@pytest.mark.parametrize('mapper_cls', [StringMapper, LongStringFieldMapper])
def test_correct_str_type(mapper_cls):
"""
In this test we check success convert to str type.
"""
mapper = mapper_cls({"field": 1})
mapper.is_valid()
assert mapper.data["field"] == '1'
mapper = mapper_cls({"field": False})
mapper.is_valid()
assert mapper.data["field"] == "False"
mapper = mapper_cls({"field": -3})
mapper.is_valid()
assert mapper.data["field"] == "-3"
mapper = mapper_cls({"field": 0.0})
mapper.is_valid()
assert mapper.data["field"] == "0.0"
mapper = mapper_cls({"field": "string"})
mapper.is_valid()
assert mapper.data["field"] == "string"
mapper = mapper_cls({"field": ""})
mapper.is_valid()
assert mapper.data["field"] == ""
mapper = mapper_cls({"field": [1, 2]})
mapper.is_valid()
assert mapper.data["field"] == "[1, 2]"
|
11563511
|
import os
import sys
sys.path.insert(0, os.path.abspath("../src/djask"))
project = "Djask"
copyright = "2021, <NAME>"
author = "<NAME>"
release = "0.5.0"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx_tabs.tabs",
"pallets_sphinx_themes",
]
templates_path = ["_templates"]
exclude_patterns = []
autodoc_typehints = "description"
intersphinx_mapping = {
"flask": ("https://flask.palletsprojects.com/", None),
}
html_theme = "flask"
html_static_path = ["_static"]
html_favicon = "_static/djask.ico"
|
11563546
|
from sympy.abc import s
from sympy.physics.control.lti import TransferFunction
from sympy.physics.control.control_plots import step_response_plot
tf1 = TransferFunction(8*s**2 + 18*s + 32, s**3 + 6*s**2 + 14*s + 24, s)
step_response_plot(tf1) # doctest: +SKIP
|
11563551
|
from _pagetests import _PageTestsKeywords
from _draganddrop import _DragAndDropKeywords
from _actionchains import _ActionChainsKeywords
__all__ = [
"_PageTestsKeywords",
"_DragAndDropKeywords",
"_ActionChainsKeywords"
]
|
11563575
|
from __future__ import annotations
import json
import re
from subprocess import DEVNULL, PIPE
import sys
import tempfile
from dataclasses import dataclass
from datetime import date
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from anitracker import logger, ffprobe_cmd, ffmpeg_cmd
from anitracker.media.media import BaseAnime, BaseCollection
from anitracker.utilities import UserStatus
from anitracker.utilities import subprocess
if TYPE_CHECKING:
from anitracker.sync import AniList
__all__ = (
"Anime",
"AnimeCollection",
"AnimeFile",
"SubtitleTrack",
)
def ffprobe_data(file: str) -> Dict:
args = [ffprobe_cmd, "-show_format", "-show_streams", "-of", "json", file]
logger.info(f"Running ffprobe command {args}")
# I hate windows
stdin = None
shell = False
if sys.platform.startswith("win32"):
stdin = DEVNULL
shell = True
out, _ = subprocess.run(
args,
stdout=PIPE,
stdin=stdin,
shell=shell,
)
if out:
return json.loads(out)
else:
return {}
@dataclass
class NyaaResult:
title: str
link: str
magnet: str
size: str
upload_date: str
seeders: int
leechers: int
downloads: int
def __repr__(self) -> str:
return (
f"<NyaaResult "
"title={self.title} "
"size={self.size} "
"upload_date={self.upload_date} "
"seeders={self.seeders} "
"leechers={self.leechers} "
"downloads={self.downloads}>"
)
__str__ = __repr__
@classmethod
def from_data(cls, result) -> NyaaResult:
children = result.findAll("td")
link = children[2].find("a", href=re.compile(r"^\/download\/.*$")).get("href")
magnet = children[2].find("a", href=re.compile(r"^magnet:.*$")).get("href")
return cls(
children[1].find("a", href=re.compile(r"^/view/\d+$")).text,
link,
magnet,
children[3].text,
str(date.fromtimestamp(int(children[4]["data-timestamp"]))),
int(children[5].text),
int(children[6].text),
int(children[7].text),
)
@dataclass
class Anime(BaseAnime):
def __repr__(self) -> str:
return f"<Anime id={self.id} title={self.english_title}>"
__str__ = __repr__
def __hash__(self) -> int:
return self.id
def __eq__(self, o: object) -> bool:
return isinstance(o, Anime) and self.id == o.id
def edit(self, sync: AniList, *, status: UserStatus):
sync.gql("update_entry", {"mediaId": self.id, "status": status.name})
@dataclass
class AnimeCollection(BaseCollection, Anime):
def __repr__(self) -> str:
return f"<AnimeCollection(id={self.id} user_status={self.user_status} title={self.english_title})>"
__str__ = __repr__
def edit(
self,
sync: AniList,
*,
status: Optional[UserStatus] = None,
score: Optional[float] = None,
progress: Optional[int] = None,
repeat: Optional[int] = None,
notes: Optional[str] = None,
started_at: Optional[date] = None,
completed_at: Optional[date] = None,
):
payload: Dict[str, Any] = {"id": self._list_id}
if status is not None:
payload["status"] = status.name
if score is not None:
payload["score"] = score
if progress is not None:
payload["progress"] = progress
if repeat is not None:
payload["repeat"] = repeat
if notes is not None:
payload["notes"] = notes
if completed_at is not None:
payload["completedAt"] = {
"year": completed_at.year,
"month": completed_at.month,
"day": completed_at.day,
}
if started_at is not None:
payload["startedAt"] = {
"year": started_at.year,
"month": started_at.month,
"day": started_at.day,
}
ret = sync.gql("update_entry", payload)
self.update_user_data(ret["data"]["SaveMediaListEntry"])
def update_user_data(self, data: Dict):
self.user_status = UserStatus[data["status"]]
self.score = data["score"]
self.notes = data["notes"]
self.progress = data["progress"]
self.repeat = data["repeat"]
self.updated_at = (
date.fromtimestamp(data["updatedAt"]) if data["updatedAt"] else None
)
user_start = (
date(**data["startedAt"])
if all(value for value in data["startedAt"].values())
else None
)
user_end = (
date(**data["completedAt"])
if all(value for value in data["completedAt"].values())
else None
)
self.user_start_date = user_start
self.user_end_date = user_end
def delete(self, sync: AniList):
sync.gql("delete_entry", {"id": self._list_id})
class AnimeFile:
title: str
season: int
episode_title: str
file: str
episode_number: int
subtitles: List[SubtitleTrack]
alternate_title: Optional[str]
_thumbnail: Optional[bytes]
def __repr__(self) -> str:
return f"<AnimeFile title={self.title} season={self.season} episode_number={self.episode_number}>"
__str__ = __repr__
@classmethod
def from_data(cls, data: Dict) -> Union[List[AnimeFile], AnimeFile]:
# TODO: Probably create my own parser, this fails on e.g.
# [Samir755] Violet Evergarden - 05- You Write Letters That Bring People Together.mkv
# (No space after the episode number)
episode = data["episode"]
def ret_file(_episode: str) -> AnimeFile:
inst = cls()
inst.title = data["anime_title"]
inst.season = int(data.get("season", 1))
inst.episode_title = data.get("episode_title", "Unknown")
inst.alternate_title = data.get("alternate_title")
inst.file = data["file_name"]
inst.episode_number = int(_episode)
inst.subtitles = []
inst._thumbnail = None
return inst
# We want to return a list of files, one for each episode
if isinstance(episode, list):
results = []
for ep in episode:
results.append(ret_file(ep))
return results
elif isinstance(episode, str):
return ret_file(episode)
else:
raise TypeError(
f"Could not parse data, expected list or string as episode, got {type(episode)}"
)
@property
def thumbnail(self):
if self._thumbnail is None:
with tempfile.NamedTemporaryFile(suffix=".jpg") as f:
cmd = [
ffmpeg_cmd,
"-ss",
"00:03:30.00",
"-i",
self.file,
"-vframes",
"1",
"-y",
f.name,
]
subprocess.run(cmd)
f.seek(0)
image = f.read()
self._thumbnail = image
return self._thumbnail
def load_subtitles(self, standalone_subs: Dict[Tuple[str, int], str]):
self.subtitles = []
sub_id = 1
for stream in ffprobe_data(self.file).get("streams", []):
if stream["codec_type"] == "subtitle":
# Insert the sub_id into the data
stream["tags"]["id"] = sub_id
self.subtitles.append(SubtitleTrack.from_data(stream))
sub_id += 1
# Now find all the matching standalone ones
for (title, episode_number), track in standalone_subs.items():
if title == self.title and episode_number == self.episode_number:
self.subtitles.append(SubtitleTrack.from_file(track))
class SubtitleTrack:
language: str
title: str
id: int
file: Optional[str]
def __repr__(self) -> str:
return f"<SubtitleTrack lang='{self.language}' title='{self.title}'>"
__str__ = __repr__
@classmethod
def from_data(cls, data: Dict):
inst = cls()
inst.language = data.get("tags", {}).get("language", "Unknown")
inst.title = data.get("tags", {}).get("title", "Unknown")
inst.id = data.get("tags", {}).get("id", 0)
if "file_name" in data:
inst.file = data["file_name"]
else:
inst.file = None
return inst
@classmethod
def from_file(cls, file: str):
data = ffprobe_data(file)["streams"][0]
data["file_name"] = file
return cls.from_data(data)
@property
def is_songs_signs(self) -> bool:
"""A convenience property that tries to guess if this subtitle track is
just for songs and signs, based on the name"""
return "songs" in self.title.lower() or "signs" in self.title.lower()
|
11563592
|
import requests
import re
import sys
import json
#目的:web初步配置 + admin用户注册 + admin用户登录并新建dxy用户
#url="http://192.168.56.100"
url="http://anchor_cms"
requests.adapters.DEFAULT_RETRIES = 5
s = requests.Session()
s.keep_alive = False
def register_admin():
data0 = {
"language": "en_GB",
"timezone": "UTC"
}
r = s.post(url+'/install/index.php?route=/start', data0)
#host= 'http://anchor_cms_mysql'
#host=re.search('(.+?)/install/index.php?route=/database', r.url).group(1)
data1 = {
"driver": 'mysql',
"host": '192.168.56.100',
#"host": host,
"port": '3306',
"user" : 'username',
"pass": 'password',
"name": "anchor_cms",
"prefix": "anchor",
"collation": 'utf8mb4_unicode_ci'
}
r = s.post(url+'/install/index.php?route=/database', data1)
if r.status_code != 200:
print("[!] database set incorrect.")
exit()
print("[+] database set successful.")
data2 = {
"site_name": "My+First+Anchor+Blog",
"site_description": "It’s+not+just+any+blog.+It’s+an+Anchor+blog.",
"site_path": "/",
"theme": "default"
}
r = s.post(url+'/install/index.php?route=/metadata', data2)
if r.status_code != 200:
print("[!] metadata set incorrect.")
exit()
print("[+] metadata set successful.")
data3 = {
"username": "admin",
"email": "<EMAIL>",
"password": "<PASSWORD>"
}
r = s.post(url+'/install/index.php?route=/account', data3)
if r.status_code != 200:
print("[!] account_admin register incorrect.")
exit()
print("[+] account_admin register successful.")
def login_admin():
set_page = s.get(url+'/index.php/admin/login')
token = re.search('name="token" type="hidden" value=\"(.+?)\"', set_page.text).group(1)
data4 = {
"token": token,
"user": "admin",
"pass": "<PASSWORD>"
}
r = s.post(url+'/index.php/admin/login', data4)
if r.status_code != 200:
print("[!] account_admin loggin incorrect.")
exit()
print("[+] account_admin loggin successful.")
def register_victim():
set_page = s.get(url+'/admin/users/add')
token = re.search('name="token" type="hidden" value=\"(.+?)\"', set_page.text).group(1)
data5 = {
"token": token,
"real_name": 'dxy',
"bio": '',
"status": 'active',
"username": 'dxy',
"password": '<PASSWORD>',
"email": '<EMAIL>'
}
r = s.post(url+'/admin/users/add', data5)
if r.status_code != 200:
print("[!] account_dxy register incorrect.")
exit()
print("[+] account_dxy register successful.")
if __name__ == '__main__':
register_admin()
login_admin()
register_victim()
|
11563632
|
from ..moc import MOC, World2ScreenMPL
from astropy import units as u
from astropy.coordinates import SkyCoord
import numpy as np
def test_create_from_polygon():
lon = [83.71315909, 83.71378887, 83.71297292, 83.71233919] * u.deg
lat = [-5.44217436,-5.44298864, -5.44361751, -5.4428033] * u.deg
moc = MOC.from_polygon(lon=lon, lat=lat, max_depth=20)
truth_ipix_d = {'17': [89921647231],
'18': [359686588915,
359686588918,
359686588919,
359686588921,
359686588923,
359686589268,
359686589269,
359686589608,
359686589610,
359686589952],
'19': [1438746355657,
1438746355659,
1438746357060,
1438746357061,
1438746357063,
1438746358408,
1438746358410,
1438746359814],
'20': [5754985422603,
5754985422606,
5754985422607,
5754985422618,
5754985422619,
5754985422622,
5754985422623,
5754985422625,
5754985422627,
5754985422633,
5754985422635,
5754985422664,
5754985422665,
5754985422666,
5754985422667,
5754985422668,
5754985422669,
5754985422670,
5754985422671,
5754985422680,
5754985422681,
5754985422682,
5754985422683,
5754985422684,
5754985422685,
5754985422686,
5754985422687,
5754985422721,
5754985422724,
5754985422725,
5754985422726,
5754985422727,
5754985422732,
5754985422733,
5754985422734,
5754985422735,
5754985422756,
5754985422757,
5754985422758,
5754985422759,
5754985422765,
5754985422767,
5754985428229,
5754985428231,
5754985428237,
5754985428248,
5754985428249,
5754985428250,
5754985428251,
5754985428272,
5754985428273,
5754985428274,
5754985428275,
5754985428276,
5754985428277,
5754985428278,
5754985428279,
5754985428320,
5754985428321,
5754985428322,
5754985428323,
5754985428324,
5754985428325,
5754985428326,
5754985428327,
5754985428336,
5754985428337,
5754985428338,
5754985428339,
5754985428340,
5754985428341,
5754985428342,
5754985428343,
5754985433608,
5754985433609,
5754985433610,
5754985433611,
5754985433612,
5754985433613,
5754985433614,
5754985433615,
5754985433636,
5754985433637,
5754985433638,
5754985433639,
5754985433644,
5754985433645,
5754985433646,
5754985433647,
5754985433656,
5754985433658,
5754985433744,
5754985433746,
5754985433752,
5754985433754,
5754985433755,
5754985433776,
5754985433777,
5754985433778,
5754985433779,
5754985433784,
5754985433785,
5754985433786,
5754985433787,
5754985439248,
5754985439249,
5754985439250,
5754985439251,
5754985439252,
5754985439254,
5754985439260,
5754985439262,
5754985439264,
5754985439265,
5754985439266,
5754985439267,
5754985439268,
5754985439269,
5754985439270,
5754985439271,
5754985439280,
5754985439281,
5754985439282,
5754985439283,
5754985439284]}
moc_truth = MOC.from_json(truth_ipix_d)
assert(moc == moc_truth)
def test_polygon2_issue_44():
from astropy import units as u
from mocpy import MOC
import numpy as np
ra = [174.75937396073138, 185.24062603926856, 184.63292896369916, 175.3670710363009]
dec = [-49.16744206799886, -49.16744206799887, -42.32049830486584, -42.32049830486584]
moc = MOC.from_polygon(ra * u.deg, dec * u.deg)
assert not moc.empty()
# Test from https://github.com/cds-astro/mocpy/issues/50
def test_polygon_issue_50():
from mocpy import MOC
from astropy.coordinates import SkyCoord
from astropy import units as u
coords = SkyCoord([(353.8156714, -56.33202193), (6.1843286, -56.33202193), (5.27558041, -49.49378172), (354.72441959, -49.49378172)], unit=u.deg)
moc = MOC.from_polygon_skycoord(coords)
assert not moc.empty()
|
11563646
|
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
'/conjurinc/ansible-role-conjur/tests/inventory.tmp').get_hosts('testapp')
def test_hosts_file(host):
f = host.file('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
def test_retrieved_secret(host):
secrets_file = host.file('conjur_secrets.txt')
assert secrets_file.exists
result = host.check_output("cat conjur_secrets.txt", shell=True)
assert result == "ansible_master_secret_password"
|
11563699
|
import cv2 as cv
# Identificar a câmera
# Condição de captura
# Loop para realizar a captura das nossas imagens em sequência
# Capturar um frame
# Mostrar o frame capturado
|
11563747
|
import argparse
import configparser
import os
from typing import Any, Dict, List, Optional
str_settings = (
"pub_key",
"secret",
"api_version",
"api_base",
"upload_base",
)
bool_settings = (
"verify_api_ssl",
"verify_upload_ssl",
)
client_setting_mapping = {
"pub_key": "public_key",
"secret": "secret_key",
}
def load_config_from_file( # noqa: C901
filename, conf: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
if not conf:
conf = {}
filename = os.path.expanduser(filename)
if not os.path.exists(filename):
return conf
config = configparser.RawConfigParser()
config.read(filename)
for name in str_settings:
try:
conf[name] = config.get("ucare", name)
except (configparser.NoOptionError, configparser.NoSectionError):
pass
for name in bool_settings:
try:
conf[name] = config.getboolean("ucare", name)
except (configparser.NoOptionError, configparser.NoSectionError):
pass
return conf
def load_config_from_args( # noqa: C901
arg_namespace, conf: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
if not conf:
conf = {}
for name in str_settings:
arg = getattr(arg_namespace, name, None)
if arg is not None:
conf[name] = arg
if arg_namespace and arg_namespace.no_check_upload_certificate:
conf["verify_upload_ssl"] = False
if arg_namespace and arg_namespace.no_check_api_certificate:
conf["verify_api_ssl"] = False
if getattr(arg_namespace, "cdnurl", False):
arg_namespace.store = True
return conf
def load_config( # noqa: C901
arg_namespace: Optional[argparse.Namespace] = None,
config_file_names: Optional[List[str]] = None,
) -> Dict[str, Any]:
conf: Dict[str, Any] = {}
if config_file_names:
for file_name in config_file_names:
conf = load_config_from_file(file_name, conf)
if arg_namespace:
conf = load_config_from_args(arg_namespace, conf)
client_conf = {}
for key, value in conf.items():
if key in client_setting_mapping:
key = client_setting_mapping[key]
client_conf[key] = value
return client_conf
|
11563761
|
import pandas as pd
import numpy as np
from pathlib import Path
from shutil import copyfile
from src.datasets.tools.transforms import GlobalShift
import code
class HarmonizationMapping:
def __init__(self, config):
scans_path = config['dataset']['scans_path']
target_scan_num = config['dataset']['target_scan']
harmonization_path = config['dataset']['harmonized_path']
self.harmonization_path = Path(harmonization_path)
self.harmonization_path.mkdir(exist_ok=True, parents=True)
# 1. collect all scans
scans = [str(f) for f in Path(scans_path).glob("*.npy")]
# 2. select target scan(s)
target_scan_path = Path(scans_path) / (target_scan_num+".npy")
# copy to the harmonized path.
# - one time this just didn't work. Deleting the copy and restarting the
# program seemed to work.
if not (self.harmonization_path / (target_scan_num+".npy")).exists():
if not config['dataset']['shift']:
copyfile(
str(target_scan_path),
str(self.harmonization_path / (target_scan_num+".npy")))
else:
# move this later?
target = np.load(str(target_scan_path))
G = GlobalShift(**config["dataset"])
target = G(target)
np.save(str(self.harmonization_path / (target_scan_num+".npy")), target)
if not config['dataset']['create_new']:
if (self.harmonization_path / "df.csv").exists():
self.df = pd.read_csv((self.harmonization_path / "df.csv"), index_col=0)
else:
exit(f"Couldn't find HM csv file at {self.harmonization_path / 'df.csv'}")
else:
if (self.harmonization_path / "df.csv").exists():
# store a backup just in case
copyfile(str(self.harmonization_path / "df.csv"),
str(self.harmonization_path / "df_old.csv")
)
# initialize the df
self.df = pd.DataFrame(
columns=["source_scan",
"harmonization_target",
"source_scan_path",
"harmonization_scan_path",
"processing_stage"])
self.df.source_scan_path = scans
self.df.harmonization_target = [None]*len(scans)
self.df.harmonization_scan_path = [None]*len(scans)
self.df.source_scan = [int(Path(f).stem) for f in scans]
self.df.processing_stage = [0]*len(scans)
# setup target scan
target_scan_num = int(target_scan_num)
self.df.loc[self.df.source_scan == target_scan_num, "harmonization_target"] = int(target_scan_num)
self.df.loc[self.df.source_scan == target_scan_num, "harmonization_scan_path"] = str(self.harmonization_path / (str(target_scan_num)+".npy"))
self.df.loc[self.df.source_scan == target_scan_num, "processing_stage"] = 2
# need processing stages for each source. Sources start at stage 0.
# Stage 0 means that the sources haven't been identified as having
# any overlap with a target scan. By extension, they don't have
# examples in the dataset, nor do they have the harmonized
# version. A source scan enters stage one after overlap in the
# scan has been detected and examples have been added to the
# dataset. After a model is trained with the new dataset, this
# source scan can then be harmonized with the target. The source
# scan enters stage 2 after it has been harmonized. This source
# scan can now be used as a target scan to search for overlap
# regions with other soure scans. After all sources have been
# checked for overlap, the stage 2 source scan can then be moved
# to stage 3 (done). Stage 3 scans do not have to be used again.
# The harmonization is process is finished when all scans are stage
# 2 or higher OR all scans are stage 3 or stage 0.
self.save()
def __getitem__(self, source_scan_num):
# return the entire row for a source scan num (float or int or str)
return self.df.loc[self.df.source_scan == int(source_scan_num)]
def __len__(self):
return len(self.df)
def save(self):
self.df.to_csv(self.harmonization_path / "df.csv")
def done(self):
# there are two conditions for being done. If either are not satisified,
# then the whole process is not finished. The first condition is that
# all sources must be harmonized (all scans are stage 2 and above). In
# the event that a scan does not contain enough overlap to reach stage
# 1, all stage 2 and above scans will be harmonized to stage 3 while
# searching for overlap, so there will be no stage 1 or stage 2 sources
# remaining.
# All scans are harmonized
cond1 = ((1 not in self.df.processing_stage.values) and
(0 not in self.df.processing_stage.values))
# All scans are harmonized except for stage 0 scans which don't have
# any reasonable overlap
cond2 = ((2 not in self.df.processing_stage.values) and
(1 not in self.df.processing_stage.values))
return cond1 or cond2
def add_target(self, source_scan_num, harmonization_target_num):
self.df.loc[self.df.source_scan == int(source_scan_num), "harmonization_target"] = harmonization_target_num
self.save()
def incr_stage(self, source_scan_num):
self.df.loc[self.df.source_scan == int(source_scan_num), "processing_stage"] += 1
self.save()
def get_stage(self, stage_num):
return self.df.loc[self.df.processing_stage == int(stage_num)].source_scan.values.tolist()
def add_harmonized_scan_path(self, source_scan_num):
self.df.loc[self.df.source_scan == int(source_scan_num), "harmonization_scan_path"] = str(self.harmonization_path / (str(source_scan_num)+".npy"))
self.save()
def print_mapping(self):
print("Final Mapping:")
for idx, row in self.df.iterrows():
print(f"{row.source_scan}: {row.harmonization_target}")
|
11563778
|
import logging
import math
import os
import pickle
import cv2
from kawalc1 import settings
import pathlib
import numpy as np
from mengenali.registration import unpickle_keypoints, filter_matches_with_amount, feature_similarity
def read_descriptors(reference_form_path):
with open(reference_form_path, "rb") as pickled:
return pickle.load(pickled)
def detect_party(image):
brisk = cv2.BRISK_create()
im_kp, im_descriptors = brisk.detectAndCompute(cv2.resize(image, None, fx=1.0, fy=1.0), None)
features_dir = pathlib.Path(settings.STATIC_DIR).joinpath('datasets/features')
listdir = os.listdir(features_dir)
most_similar = 0
most_similar_form = ""
for file in listdir:
key_point_file = pathlib.Path(features_dir).joinpath(file)
key_points = read_descriptors(key_point_file)
ref_kp, ref_descriptors = unpickle_keypoints(key_points['keypoints'])
bf = cv2.BFMatcher(cv2.NORM_L2)
raw_matches = bf.knnMatch(np.float32(im_descriptors), trainDescriptors=np.float32(ref_descriptors), k=2)
amount, matches = filter_matches_with_amount(im_kp, ref_kp, raw_matches)
if amount > 0:
mkp1, mkp2 = zip(*matches)
similarity = feature_similarity(mkp1, mkp2)
if similarity > most_similar:
most_similar = similarity
most_similar_form = file
logging.info("match: %s %s", most_similar, most_similar_form)
return most_similar_form.replace(".p", ""), most_similar
|
11563788
|
import numpy as np
import os
class WarmingUpCount(object):
def __init__(self, size):
self.DType = 'i8'
self.WUC = np.zeros(size, dtype=self.DType)
def decrease(self, indexes):
self.WUC[indexes] -= 1
self.WUC[self.WUC < 0] = 0
def extend(self, size, value):
start = self.WUC.shape[0]
end = size
self.WUC.resize(size, refcheck=False)
self.WUC[start:end] = value
def assign(self, index, value):
self.WUC[index] = value
def flush(self, indexes):
self.WUC = self.WUC.take(indexes, axis=0)
def create(self, size):
pass
def __len__(self):
return self.WUC.shape[0]
class PersistentWarmingUpCount(WarmingUpCount):
def __init__(self, path, size):
super().__init__(size)
self.Path = path
if os.path.exists(self.Path):
self.WUC = np.memmap(self.Path, dtype=self.DType, mode='readwrite')
else:
self.create(size)
def extend(self, size, value):
start = self.WUC.shape[0]
end = size
wuc = np.zeros(self.WUC.shape[0], dtype=self.DType)
wuc[:] = self.WUC[:]
wuc.resize(size, refcheck=False)
self.WUC = np.memmap(self.Path, dtype=self.DType, mode='w+', shape=wuc.shape)
self.WUC[:] = wuc[:]
self.WUC[start:end] = value
def flush(self, indexes):
super().flush(indexes)
self.create(self.WUC.shape[0])
def create(self, size):
wuc = np.memmap(self.Path, dtype=self.DType, mode='w+', shape=(size,))
wuc[:] = self.WUC[:]
self.WUC = wuc
|
11563805
|
from vibora import Vibora, Response
from vibora.blueprints import Blueprint
from vibora.tests import TestSuite
class BlueprintsTestCase(TestSuite):
def setUp(self):
self.app = Vibora()
async def test_simple_sub_domain_expects_match(self):
b1 = Blueprint(hosts=['.*'])
@b1.route('/')
async def home():
return Response(b'123')
self.app.add_blueprint(b1)
async with self.app.test_client() as client:
response = await client.request('/')
self.assertEqual(response.content, b'123')
async def test_exact_match_sub_domain_expects_match(self):
b1 = Blueprint(hosts=['test.vibora.io'])
@b1.route('/')
async def home():
return Response(b'123')
self.app.add_blueprint(b1)
async with self.app.test_client() as client:
response = await client.request('/', headers={'Host': 'test.vibora.io'})
self.assertEqual(response.content, b'123')
async def test_different_sub_domain_expects_404(self):
b1 = Blueprint(hosts=['test.vibora.io'])
@b1.route('/')
async def home():
return Response(b'123')
self.app.add_blueprint(b1)
async with self.app.test_client() as client:
response = await client.request('/', headers={'Host': 'test2.vibora.io'})
self.assertEqual(response.status_code, 404)
async def test_sub_domain_working_with_non_hosts_based(self):
b1 = Blueprint(hosts=['test.vibora.io'])
b2 = Blueprint()
@b1.route('/')
async def home():
return Response(b'123')
@b2.route('/test')
async def home():
return Response(b'123')
self.app.add_blueprint(b1)
self.app.add_blueprint(b2)
async with self.app.test_client() as client:
response = await client.request('/', headers={'Host': 'test.vibora.io'})
self.assertEqual(response.status_code, 200)
response = await client.request('/', headers={'Host': 'test2.vibora.io'})
self.assertEqual(response.status_code, 404)
response = await client.request('/test', headers={'Host': 'anything.should.work'})
self.assertEqual(response.status_code, 200)
response = await client.request('/test2', headers={'Host': 'anything.should.404'})
self.assertEqual(response.status_code, 404)
|
11563826
|
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import numpy as np
from random import randint
import pytest
from mmgroup.mm import mm_sub_test_prep_xy
from mmgroup import mat24 as m24
from mmgroup.tests.spaces.sparse_mm_space import SparseMmV
from mmgroup.tests.spaces.sparse_mm_space import SparseMmVector
from mmgroup.tests.groups.mgroup_n import MGroupNWord
from mmgroup.mm_space import characteristics
PRIMES = characteristics()
def _as_suboctad(v1, o):
d = m24.octad_to_gcode(o)
c = m24.ploop_cap(v1, d)
return m24.cocode_to_suboctad(c, d)
class prep_xy:
group = MGroupNWord
space = SparseMmVector
def __init__(self, eps, e, f):
self.f = f & 0x1fff
self.e = e & 0x1fff
self.eps = eps = eps & 0xfff
self.odd = (eps & 0x800) >> 11
lin = np.zeros(6, dtype = np.uint32)
mm_sub_test_prep_xy(f, e, eps, 1, lin)
self.lin_i = lin[:3]
self.lin_d = lin[3:6]
self.sign_XYZ = np.zeros(2048, dtype = np.uint32)
mm_sub_test_prep_xy(f, e, eps, 2, self.sign_XYZ)
self.s_T = np.zeros(759, dtype = np.uint32)
mm_sub_test_prep_xy(f, e, eps, 3, self.s_T)
def inv_op_unit(self, tag, d, j):
if tag == 'X':
tag1 = 'X'
d1 = d ^ self.lin_d[0]
j1 = j
sign = (self.sign_XYZ[d] & 1)
sign ^= (self.lin_i[0] >> j) & 1
if self.odd:
cc = m24.vect_to_cocode(1 << j)
sign ^= m24.scalar_prod(d, cc)
elif tag in 'ZY':
s = self.odd ^ (tag == 'Y')
tag1 = 'ZY'[s]
s += 1
d1 = d ^ self.lin_d[s]
j1 = j
sign = (self.sign_XYZ[d] >> s) & 1
sign ^= (self.lin_i[s] >> j) & 1
elif tag == 'T':
tag1 = 'T'
d1 = d
te = self.s_T[d]
so_exp = _as_suboctad(self.f, d)
assert te & 0x3f == so_exp , (hex(te), hex(so_exp))
j1 = j ^ (te & 0x3f)
sign = m24.suboctad_scalar_prod(j, (te >> 8) & 0x3f)
sign ^= (te >> 14) & 1
sign ^= m24.suboctad_weight(j) & self.odd & 1
assert ((te >> 15) ^ self.odd) & 1 == 0
else:
raise ValueError("Illegal tag " + str(tag))
return sign & 1, tag1, d1, j1
def inv_op(self, v):
w = self.space(v.p)
for value, tag, d, j in v.as_tuples():
sign, tag, d, j = self.inv_op_unit(tag, d, j)
if sign & 1:
value = -value % p
w += value * space(v.p, tag, d, j)
return w
def check_v(self, v, verbose = 0):
grp = self.group
delta_atom = grp('d', self.eps)
x_atom = grp('x', self.e)**(-1)
y_atom = grp('y', self.f)**(-1)
w_ref = v * delta_atom * x_atom * y_atom
w = self.inv_op(v)
error = w != w_ref
if error or verbose:
eps, e, f = self.eps, self.e, self.f
print("vector", v)
print("operation", "d_%xh * x_%xh * y_%xh" % (eps, e, f))
print("obtained:", w)
if error:
print(v * delta_atom , v, delta_atom)
print("expected:", w_ref)
raise ValueError("x-y operation failed")
print("Error: x-y operation failed!!!")
p = PRIMES[0]
space = SparseMmVector
def as_vector(x):
if isinstance(x, str):
data = [(tag, 'r') for tag in x]
return space(p, data)
if isinstance(x, tuple):
return space(p, *x)
if isinstance(x, list):
return space(p, x)
raise TypeError("Bad type for vector of rep")
p = PRIMES[0]
space = SparseMmVector
def prep_xy_testcases():
testcases = [
[ [("X", 3, 6)], 0, 0, 0x1171 ],
[ [("X", 3, 6)], 12, 0, 0 ],
[ [("X", 3, 6)], 12, 1111, 0 ],
[ [("X", 3, 6)], 12, 0, 1111],
[ [("Z", 0, 0)], 0, 0, 0],
[ [("Z", 0, 0)], 12, 0, 0],
[ [("Z", 0, 0)], 0, 34, 0],
[ [("Z", 0, 0)], 0x800, 0, 0],
[ [("Z", 0, 0)], 0x812, 0, 0],
[ [("Z", 0, 0)], 0x800, 34, 0],
[ [("Z", 0, 0)], 0x800, 0, 34],
]
for v, eps, e, f in testcases:
yield as_vector(v), prep_xy(eps, e, f)
v_tags = "TXZY"
for v in v_tags:
for i in range(1000):
v1 = as_vector(v)
eps = randint(0, 0xfff)
e = randint(0, 0x1fff)
f = randint(0, 0x1fff)
yield v1, prep_xy(eps, e, f)
@pytest.mark.mm
def test_prep_xy(verbose = 0):
print("Testing preparation of operation x-y...")
for v, op in prep_xy_testcases():
op.check_v(v, verbose = verbose)
if verbose: print("")
print("passed")
|
11563863
|
import numpy as np
from scipy.spatial.distance import pdist, squareform
from scipy.fft import fftn
def compute_diversity(pred, *args):
if pred.shape[0] == 1:
return 0.0
dist = pdist(pred.reshape(pred.shape[0], -1))
diversity = dist.mean().item()
return diversity
def compute_ade(pred, gt, *args):
diff = pred - gt
dist = np.linalg.norm(diff, ord=2, axis=2).mean(axis=1)
return dist.min()
def compute_fde(pred, gt, *args):
diff = pred - gt
dist = np.linalg.norm(diff, ord=2, axis=2)[:, -1]
return dist.min()
def compute_mmade(pred, gt, gt_multi):
gt_dist = []
for gt_multi_i in gt_multi:
dist = compute_ade(pred, gt_multi_i)
gt_dist.append(dist)
gt_dist = np.array(gt_dist).mean()
return gt_dist
def compute_mmfde(pred, gt, gt_multi):
gt_dist = []
for gt_multi_i in gt_multi:
dist = compute_fde(pred, gt_multi_i)
gt_dist.append(dist)
gt_dist = np.array(gt_dist).mean()
return gt_dist
def compute_bone_deform(gen, gt, gt_multi):
'''
gen, gt - [nsamp, time, dim]
'''
jts = gen.reshape([gen.shape[0], gen.shape[1],22,3]) #[gen, t, 22, 3]
l_LFA = np.linalg.norm(jts[:,:,18]-jts[:,:,20], axis=-1).std(axis=-1).mean()
l_LUA = np.linalg.norm(jts[:,:,18]-jts[:,:,16], axis=-1).std(axis=-1).mean()
l_RUA = np.linalg.norm(jts[:,:,19]-jts[:,:,17], axis=-1).std(axis=-1).mean()
l_RFA = np.linalg.norm(jts[:,:,19]-jts[:,:,21], axis=-1).std(axis=-1).mean()
l_LTH = np.linalg.norm(jts[:,:,1]-jts[:,:,4], axis=-1).std(axis=-1).mean()
l_LCA = np.linalg.norm(jts[:,:,7]-jts[:,:,4], axis=-1).std(axis=-1).mean()
l_RTH = np.linalg.norm(jts[:,:,2]-jts[:,:,5], axis=-1).std(axis=-1).mean()
l_RCA = np.linalg.norm(jts[:,:,5]-jts[:,:,8], axis=-1).std(axis=-1).mean()
deform = l_LFA+l_LUA+l_RUA+l_RFA+l_LTH+l_LCA+l_RTH+l_RCA
return deform
def compute_ps_entropy(gen, gt, gt_multi):
'''
gen, gt - [nsamp, time, dim]
'''
### ps entropy
ps_gen = np.abs(fftn(gen, axes=1))**2 + 1e-6
ps_gen = ps_gen / np.sum(ps_gen, axis=1, keepdims=True)
ps_entropy_gen = np.mean(-ps_gen*np.log(ps_gen),axis=-1)
ps_gt = np.abs(fftn(gt, axes=1))**2 + 1e-6
ps_gt = ps_gt / np.sum(ps_gt, axis=1, keepdims=True)
ps_entropy_gt = np.mean(-ps_gt*np.log(ps_gt), axis=-1)
return np.mean(ps_entropy_gen-ps_entropy_gt)
def get_multimodal_gt(all_data, t_his, thresh):
all_start_pose = all_data[:,t_his - 1,:]
pd = squareform(pdist(all_start_pose))
traj_gt_arr = []
for i in range(pd.shape[0]):
ind = np.nonzero(pd[i] < thresh)
traj_gt_arr.append(all_data[ind][:, t_his:, :])
return traj_gt_arr
|
11563897
|
command = "/usr/bin/sudo /sbin/reboot now"
import subprocess
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
print output
|
11563960
|
import os
import re
from collections import defaultdict
from io import StringIO
from unittest import mock
import dateutil.tz
import pytest
from lxml import etree
from nikola.nikola import Nikola, Post
from nikola.utils import LocaleBorg, TranslatableSetting
def test_feed_is_valid(rss_feed_content, rss_schema):
"""
A testcase to check if the generated feed is valid.
Validation can be tested with W3 FEED Validator that can be found
at http://feedvalidator.org
"""
document = etree.parse(StringIO(rss_feed_content))
assert rss_schema.validate(document)
@pytest.fixture
def rss_schema(rss_schema_filename):
with open(rss_schema_filename, "r") as rss_schema_file:
xmlschema_doc = etree.parse(rss_schema_file)
return etree.XMLSchema(xmlschema_doc)
@pytest.fixture
def rss_schema_filename(test_dir):
return os.path.join(test_dir, "data", "rss-2_0.xsd")
@pytest.mark.parametrize("element", ["guid", "link"])
def test_feed_items_have_valid_URLs(rss_feed_content, blog_url, element):
"""
The items in the feed need to have valid urls in link and guid.
As stated by W3 FEED Validator:
* "link must be a full and valid URL"
* "guid must be a full URL, unless isPermaLink attribute is false: /weblog/posts/the-minimal-server.html"
"""
# This validation regex is taken from django.core.validators
url_validation_regex = re.compile(
r"^(?:http|ftp)s?://" # http:// or https://
# domain...
r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|"
r"localhost|" # localhost...
# ...or ipv4
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|"
# ...or ipv6
r"\[?[A-F0-9]*:[A-F0-9:]+\]?)" r"(?::\d+)?" r"(?:/?|[/?]\S+)$", # optional port
re.IGNORECASE,
)
def is_valid_URL(url):
return url_validation_regex.match(url) is not None
et = etree.parse(StringIO(rss_feed_content))
channel = et.find("channel")
item = channel.find("item")
element = item.find(element)
assert is_valid_URL(element.text)
assert blog_url in element.text
@pytest.fixture(autouse=True)
def localeborg(default_locale):
"""
LocaleBorg with default settings
"""
LocaleBorg.reset()
LocaleBorg.initialize({}, default_locale)
try:
yield
finally:
LocaleBorg.reset()
@pytest.fixture
def rss_feed_content(blog_url, config, default_locale):
default_post = {
"title": "post title",
"slug": "awesome_article",
"date": "2012-10-01 22:41",
"author": None,
"tags": "tags",
"link": "link",
"description": "description",
"enclosure": "http://www.example.org/foo.mp3",
"enclosure_length": "5",
}
meta_mock = mock.Mock(return_value=(defaultdict(str, default_post), None))
with mock.patch("nikola.post.get_meta", meta_mock):
with \
mock.patch(
"nikola.nikola.utils.os.path.isdir", mock.Mock(return_value=True)), \
mock.patch(
"nikola.nikola.Post.text", mock.Mock(return_value="some long text")
):
with mock.patch(
"nikola.post.os.path.isfile", mock.Mock(return_value=True)):
example_post = Post(
"source.file",
config,
"blog_folder",
True,
{"en": ""},
"post.tmpl",
FakeCompiler(),
)
filename = "testfeed.rss"
opener_mock = mock.mock_open()
with mock.patch("nikola.nikola.io.open", opener_mock, create=True):
Nikola().generic_rss_renderer(
default_locale,
"blog_title",
blog_url,
"blog_description",
[example_post, ],
filename,
True,
False,
)
opener_mock.assert_called_once_with(filename, "w+", encoding="utf-8")
# Python 3 / unicode strings workaround
# lxml will complain if the encoding is specified in the
# xml when running with unicode strings.
# We do not include this in our content.
file_content = [call[1][0] for call in opener_mock.mock_calls[2:-1]][0]
splitted_content = file_content.split("\n")
# encoding_declaration = splitted_content[0]
content_without_encoding_declaration = splitted_content[1:]
yield "\n".join(content_without_encoding_declaration)
@pytest.fixture
def config(blog_url, default_locale):
fake_conf = defaultdict(str)
fake_conf["TIMEZONE"] = "UTC"
fake_conf["__tzinfo__"] = dateutil.tz.tzutc()
fake_conf["DEFAULT_LANG"] = default_locale
fake_conf["TRANSLATIONS"] = {default_locale: ""}
fake_conf["BASE_URL"] = blog_url
fake_conf["BLOG_AUTHOR"] = TranslatableSetting(
"BLOG_AUTHOR", "<NAME>", [default_locale]
)
fake_conf["TRANSLATIONS_PATTERN"] = "{path}.{lang}.{ext}"
return fake_conf
@pytest.fixture
def blog_url():
return "http://some.blog"
class FakeCompiler:
demote_headers = False
compile = None
def extension(self):
return ".html"
def read_metadata(*args, **kwargs):
return {}
def register_extra_dependencies(self, post):
pass
|
11563968
|
import os, sys
inFilePath = sys.argv[1]
colIndex = int(sys.argv[2])
hasHeader = sys.argv[3] == "True"
inFile = open(inFilePath)
if hasHeader:
inFile.readline()
values = set()
for line in inFile:
lineItems = line.rstrip("\n").split("\t")
values.add(lineItems[colIndex])
print(len(values))
|
11563972
|
import re
from typing import Optional
from typing.re import Match
from ..model.configuration import Configuration
class Validator:
def __init__(self, config: Configuration):
self.config = config
def check(self, string) -> Optional[Match[str]]:
"""Check the validation"""
return re.match(self.config.validation_regex, string)
|
11563978
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from src.utils.config import cfg
__all__ = [
"ResNet", "ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152"
]
class ResNet():
def __init__(self, layers=50, scale=1.0):
self.layers = layers
self.scale = scale
def net(self,
input,
class_dim=1000,
end_points=None,
decode_points=None,
resize_points=None,
dilation_dict=None):
layers = self.layers
supported_layers = [18, 34, 50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
decode_ends = dict()
def check_points(count, points):
if points is None:
return False
else:
if isinstance(points, list):
return (True if count in points else False)
else:
return (True if count == points else False)
def get_dilated_rate(dilation_dict, idx):
if dilation_dict is None or idx not in dilation_dict:
return 1
else:
return dilation_dict[idx]
if layers == 18:
depth = [2, 2, 2, 2]
elif layers == 34 or layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
num_filters = [64, 128, 256, 512]
# stage_1: 3 3x3_Conv
conv = self.conv_bn_layer(
input=input,
num_filters=int(64 * self.scale),
filter_size=3,
stride=2,
act='relu',
name="conv1_1")
conv = self.conv_bn_layer(
input=conv,
num_filters=int(64 * self.scale),
filter_size=3,
stride=1,
act='relu',
name="conv1_2")
conv = self.conv_bn_layer(
input=conv,
num_filters=int(128 * self.scale),
filter_size=3,
stride=1,
act='relu',
name="conv1_3")
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
layer_count = 1
if check_points(layer_count, decode_points):
decode_ends[layer_count] = conv
if check_points(layer_count, end_points):
return conv, decode_ends
if layers >= 50:
for block in range(len(depth)):
for i in range(depth[block]): #depth = [3, 4, 23, 3]
if layers in [101, 152] and block == 2:
if i == 0:
conv_name = "res" + str(block + 2) + "a"
else:
conv_name = "res" + str(block + 2) + "b" + str(i)
else:
conv_name = "res" + str(block + 2) + chr(97 + i)
dilation_rate = get_dilated_rate(dilation_dict, block)
# added by Rosun, employ multi-grid
if cfg.MODEL.BACKBONE_MULTI_GRID== True and block==3:
if i==0:
dilation_rate = dilation_rate*(i+1)
else:
dilation_rate = dilation_rate*(2*i) # 2, 4
print("employ multi-grid for resnet backbone network: dilation_rate={}\n".format(dilation_rate))
conv = self.bottleneck_block(
input=conv,
num_filters=int(num_filters[block] * self.scale),
stride=2
if i == 0 and block != 0 and dilation_rate == 1 else 1,
name=conv_name,
dilation=dilation_rate)
layer_count += 3
if check_points(layer_count, decode_points):
decode_ends[layer_count] = conv
if check_points(layer_count, end_points):
return conv, decode_ends
if check_points(layer_count, resize_points):
conv = self.interp(
conv,
np.ceil(
np.array(conv.shape[2:]).astype('int32') / 2))
pool = fluid.layers.pool2d(input=conv, pool_size=7, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
out = fluid.layers.fc(input=pool,
size=class_dim,
param_attr=fluid.param_attr.ParamAttr(initializer=fluid.initializer.Uniform(-stdv, stdv)))
else:
for block in range(len(depth)):
for i in range(depth[block]):
conv_name = "res" + str(block + 2) + chr(97 + i)
conv = self.basic_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
is_first=block == i == 0,
name=conv_name)
layer_count += 2
if check_points(layer_count, decode_points):
decode_ends[layer_count] = conv
if check_points(layer_count, end_points):
return conv, decode_ends
pool = fluid.layers.pool2d(
input=conv, pool_size=7, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
out = fluid.layers.fc(
input=pool,
size=class_dim,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv)))
return out
def zero_padding(self, input, padding):
return fluid.layers.pad(
input, [0, 0, 0, 0, padding, padding, padding, padding])
def interp(self, input, out_shape):
out_shape = list(out_shape.astype("int32"))
return fluid.layers.resize_bilinear(input, out_shape=out_shape)
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
dilation=1,
groups=1,
act=None,
name=None):
bias_attr=False
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2 if dilation == 1 else 0,
dilation=dilation,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=bias_attr,
name=name + '.conv2d.output.1')
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
return fluid.layers.batch_norm(input=conv,
act=act,
name=bn_name + '.output.1',
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance', )
def shortcut(self, input, ch_out, stride, is_first, name):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1 or is_first == True:
return self.conv_bn_layer(input, ch_out, 1, stride, name=name)
else:
return input
def bottleneck_block(self, input, num_filters, stride, name, dilation=1):
if self.layers == 101:
strides = [1, stride]
else:
strides = [stride, 1]
conv0 = self.conv_bn_layer(
input=input,
num_filters=num_filters,
filter_size=1,
dilation=1,
stride=strides[0],
act='relu',
name=name + "_branch2a")
if dilation > 1:
conv0 = self.zero_padding(conv0, dilation)
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
dilation=dilation,
stride=strides[1],
act='relu',
name=name + "_branch2b")
conv2 = self.conv_bn_layer(
input=conv1,
num_filters=num_filters * 4,
dilation=1,
filter_size=1,
act=None,
name=name + "_branch2c")
short = self.shortcut(
input,
num_filters * 4,
stride,
is_first=False,
name=name + "_branch1")
return fluid.layers.elementwise_add(
x=short, y=conv2, act='relu', name=name + ".add.output.5")
def basic_block(self, input, num_filters, stride, is_first, name):
conv0 = self.conv_bn_layer(
input=input,
num_filters=num_filters,
filter_size=3,
act='relu',
stride=stride,
name=name + "_branch2a")
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
act=None,
name=name + "_branch2b")
short = self.shortcut(
input, num_filters, stride, is_first, name=name + "_branch1")
return fluid.layers.elementwise_add(x=short, y=conv1, act='relu')
def ResNet18():
model = ResNet(layers=18)
return model
def ResNet34():
model = ResNet(layers=34)
return model
def ResNet50():
model = ResNet(layers=50)
return model
def ResNet101():
model = ResNet(layers=101)
return model
def ResNet152():
model = ResNet(layers=152)
return model
|
11563987
|
class line2word :
def __init__(self, f, gf, ggf, nl , ori):
self.f = f
self.gf = gf
self.ggf = ggf
self.nl = nl
self.ori = ori
self.used = False
numberstack = []
list2wordlist = []
def readbacklist (Nl_num):
global numberstack
global list2wordlist
numberstack = []
f = open("../test_output_our_ast_back_past/"+str(Nl_num)+".txt","r")
lines = f.readlines()
f.close()
list2wordlist = []
for line in lines:
st = str(line)[:-1].replace("_fu_nc_na_me","").split(" ")
if len(st)==1:
numberstack.append(st[0])
else:
word = line2word(st[0],st[1],st[2],st[3],st[4])
list2wordlist.append(word)
def line2word_var (line,father,list2word):
for l in list2word:
if l.nl == line:
if l.f == father and l.used == False:
l.used = True
return l.ori
for l in list2word:
if l.nl == line:
if l.used == False:
l.used = True
return l.ori
for l in list2word:
if l.nl == line:
l.used = False
for l in list2word:
if l.nl == line:
if l.f == father and l.used == False:
l.used = True
return l.ori
for l in list2word:
if l.nl == line:
if l.used == False:
l.used = True
return l.ori
return "-12345"
def getnumberstack (numberstack):
if len(numberstack) == 0 :
return "-12345"
now = numberstack.pop(0)
numberstack.append(now)
return now
for i in range(1,67):
#global numberstack
numberstack = []
#global list2wordlist
list2wordlist = []
readbacklist(i)
f = open("../../out/"+str(i)+".txt","r")
pro = str(f.readline())
f.close()
pro = pro.replace("BootyBayBodyguard","line:0")
pro = pro.replace("Booty_Bay_Bodyguard","line:0")
pro = pro.replace("2","line:2")
pro = pro.replace("3","line:3")
pro = pro.replace("4","line:4")
pro = pro.replace("ALL1","line:6")
pro = pro.replace("TOTEM1","line:7")
pro = pro.replace("COMMON1","line:8")
pro = pro.replace("9","line:9")
pro = pro.replace("1","line:1")
t = i
code = pro.split()
ans = " "
for i in range(len(code)):
if "line:" in code[i]:
st = line2word_var(code[i], code[i - 1], list2wordlist)
if (code[i-1] == "num" or code[i - 1] == "Num" or code[i-1] == "NUM" or st == "-12345"):
if code[i] == "line:9":
code[i] = getnumberstack(numberstack)
elif st != "-12345":
code[i] = st
else :
code[i] = st
ans += " " + code[i]
i = t
ans = ans.replace(" ","")
f = open(str(i) + ".txt", "w")
f.write(ans)
f.close()
|
11563989
|
from django.db import models as django_models
from .. import models
class BaseTestModel(django_models.Model):
"""
An abstract base test model class for test objects that
could be related to Images. Contains just one text field
"""
name = django_models.CharField(max_length=100)
def __str__(self):
"""
A str version of the object just returns
the value of its text field
"""
return self.name
class Meta:
abstract = True
class TestModel(models.ContentGalleryMixin, BaseTestModel):
"""
A main test model. Uses the ContentGalleryMixin without
changes, so it allows to show the model in the list of
available models in the Image admin.
"""
class AnotherTestModel(models.ContentGalleryMixin, BaseTestModel):
"""
Another test model. It also uses the ContentGalleryMixin, but
sets the 'gallery_visible' to False, so it's still possible
to attach images to objects of this model, but the model
does not present in the available models list in the Image admin.
"""
gallery_visible = False
class WrongTestModel(BaseTestModel):
"""
A test model that does not uses the ContentGalleryMixin.
It could not be related to the Image objects.
"""
|
11563992
|
from pathlib import Path
from urllib.parse import urlparse
from tmclass_solutions.scraping import SimpleWebScraper
from tmclass_solutions.scraping import WikipediaArticle
EN_BASE_URL = "https://en.wikipedia.org/wiki/"
english_articles = [
"Agriculture", "Architecture", "Art", "Biology", "Business",
"Cinematography", "Culture", "Economy", "Literature", "Music",
"Politics", "Religion", "Sport", "Science", "Technology", "Trade"
]
# Most represented languages for those seed articles in text size
# (number of unicode symbols):
hostnames = [
"fr.wikipedia.org",
"en.wikipedia.org",
"ar.wikipedia.org",
"ru.wikipedia.org",
"uk.wikipedia.org",
"fa.wikipedia.org",
"ca.wikipedia.org",
"sr.wikipedia.org",
"es.wikipedia.org",
"zh.wikipedia.org",
"it.wikipedia.org",
"de.wikipedia.org",
"gl.wikipedia.org",
"pt.wikipedia.org",
"vi.wikipedia.org",
"ta.wikipedia.org",
"ja.wikipedia.org",
"bg.wikipedia.org",
"kn.wikipedia.org",
"azb.wikipedia.or",
"id.wikipedia.org",
"el.wikipedia.org",
"eo.wikipedia.org",
"hy.wikipedia.org",
"hi.wikipedia.org",
"sv.wikipedia.org",
"he.wikipedia.org",
"tr.wikipedia.org",
"th.wikipedia.org",
"bn.wikipedia.org",
]
output_folder = Path("/tmp/wikipedia_scraping")
output_folder.mkdir(exist_ok=True, parents=True)
scraper = SimpleWebScraper(output_folder)
whitelist = set(hostnames)
for article_name in english_articles:
article_url = EN_BASE_URL + article_name
folder = scraper.fetch_and_save(article_url)
print(f"Fetched {folder}")
article = WikipediaArticle((folder / "body").read_bytes())
language_links = article.get_language_links()
for language_link in language_links:
if urlparse(language_link).hostname not in whitelist:
continue
folder = scraper.fetch_and_save(language_link)
print(f"Fetched {folder}")
|
11563995
|
import hashlib
import sys
def convert_to_utf8(obj):
"""Encodes object into utf-8 bytes (or 'str' in Py2)"""
obj = str(obj)
if sys.version_info[0] == 3:
obj = bytes(obj, "utf-8")
else:
obj = u''.join(map(unichr, map(ord, obj))).encode("utf-8") # noqa: F821 in Py3 context
return obj
def _eval_config_dhash(obj):
""" Compute a data hash from the object. This is the hashing algorithm
used internally by MWDB to assign unique ids to configs
"""
if isinstance(obj, list):
# For lists: evaluate hash recursively for all elements and sort them lexicographically
return _eval_config_dhash(str(sorted([_eval_config_dhash(o) for o in obj])))
elif isinstance(obj, dict):
# For dicts: convert to key-ordered tuples with hashed value
return _eval_config_dhash(
[[o, _eval_config_dhash(obj[o])] for o in sorted(obj.keys())]
)
else:
# Other types: evaluate SHA256 after conversion to UTF-8
return hashlib.sha256(convert_to_utf8(obj)).hexdigest()
def config_dhash(obj):
"""
Compute a data hash from the object. This is the hashing algorithm
used internally by MWDB to assign unique ids to configs.
.. versionchanged:: 3.3.0
Added support for in-blob keys
:param obj: Dict with configuration
:type obj: dict
:return: SHA256 hex digest
"""
config = dict(obj)
for key, value in config.items():
if isinstance(value, dict) and list(value.keys()) == ["in-blob"]:
in_blob = value["in-blob"]
if isinstance(in_blob, dict):
config[key]["in-blob"] = hashlib.sha256(convert_to_utf8(in_blob["content"])).hexdigest()
return _eval_config_dhash(config)
|
11564004
|
from __future__ import print_function, division
from PyAstronomy import funcFit as fuf
import numpy as np
class AtanProfile(fuf.OneDFit):
"""
A profile based on the arc tangent function.
This class implements the following profile:
.. math::
f(x) = \\frac{A}{2\\arctan(\\sigma)} \\times \\left(\\arctan\\left(\\frac{x-\mu}{scale} + \sigma\\right) +
\\arctan\\left(-\\frac{x-\mu}{scale} + \sigma\\right)\\right) +
\mu \\times x + off
which can provide a relatively flat top and steep edges.
*Fit parameters*
- `A` - The amplitude. In this case, the height (not the area under)
the profile reached for :math:`x=0`. Note that for
:math:`\mu \\not = 0` the highest point may be elsewhere,
which is neglected here.
- `scale` - A scale parameter affecting the width of the profile. Note,
however, that also :math:`\sigma` affects the width.
- `mu` - The center of the profile.
- `off` - An offset
- `lin` - A gradient in the offset.
The width of the profile may be approximated by the inflection points, which
are given by
.. math::
\\frac{\\partial^2 f(x)}{\partial x^2} = 0 \\rightarrow
x_{1,2} = \mu \\pm\\frac{scale}{3}\\left(-3+3\sigma^2+6\\sqrt{\sigma^4+\sigma^2+1}\\right)^{1/2}
"""
def __init__(self):
fuf.OneDFit.__init__(self, ["scale", "sig", "mu", "A", "off", "lin"])
def evaluate(self, x):
"""
Calculates and returns model according to the
current parameter values.
Parameters
----------
x : Array
The positions at which to evaluate the model.
"""
# Shift by mu
x = x - self["mu"]
# The heart of the profile
y = np.arctan(x/self["scale"] + self["sig"]) + np.arctan(-x/self["scale"] + self["sig"])
# Make the highest point (actually most extreme point)
# equal to A
y *= (self["A"] / (2.*np.arctan(self["sig"])))
# Add offset and gradient
y += self["off"]
y += self["lin"] * (x + self["mu"])
return y
def inflectionPoints(self):
"""
Calculate the inflection points.
The inflection points of the profile depend on
both :math:`\sigma` and :math:`\mu`.
Returns
-------
Inflection points : tuple
Locations of the inflection points. Smaller one first.
"""
d = abs(self["scale"])/3.0 * \
np.sqrt(-3. + 3.*self["sig"]**2 + 6.*np.sqrt(self["sig"]**4 + self["sig"]**2 + 1.0))
return self["mu"]-d, self["mu"]+d
class AtanProfileDamped(fuf.OneDFit):
"""
A profile based on the arc tangent function.
This class implements the following profile:
.. math::
d(x) = f(x) \\times H(|x-\mu| - |ifp-\mu|) \\times
\\exp\\left(\\frac{|x-\mu| - |ifp-\mu|}{\\tau}\\right) +
\mu \\times x + off
Here :math:`f(x)` is the profile described in :py:class:`AtanProfile`,
H denotes the Heaviside function, and ifp is the location of the
inflection point. The parameter :math:`\\tau` can be used to provide
an additional drop at the edges of the profile.
*Fit parameters*
- `A` - The amplitude. In this case, the height (not the area under)
the profile reached for :math:`x=0`. Note that for
:math:`\mu \\not = 0` the highest point may be elsewhere,
which is neglected here.
- `scale` - A scale parameter affecting the width of the profile. Note,
however, that also :math:`\sigma` affects the width.
- `tau` - This parameter controls an additional drop at the edges
of the profile.
- `mu` - The center of the profile.
- `off` - An offset
- `lin` - A gradient in the offset.
The width of the profile may be approximated by the inflection points, which
are given by
.. math::
\\frac{\\partial^2 f(x)}{\partial x^2} = 0 \\rightarrow
x_{1,2} = \mu \\pm\\frac{scale}{3}\\left(-3+3\sigma^2+6\\sqrt{\sigma^4+\sigma^2+1}\\right)^{1/2}
"""
def __init__(self):
fuf.OneDFit.__init__(self, ["scale", "sig", "mu", "A", "off", "lin", "tau"])
def evaluate(self, x):
"""
Calculates and returns model according to the
current parameter values.
Parameters
----------
x : Array
The positions at which to evaluate the model.
"""
# Shift by mu
x = x - self["mu"]
# The heart of the profile
y = np.arctan(x/self["scale"] + self["sig"]) + np.arctan(-x/self["scale"] + self["sig"])
# Make the highest point (actually most extreme point)
# equal to A
y *= (self["A"] / (2.*np.arctan(self["sig"])))
# Produce additional drop
difp = abs(self.inflectionPoints()[0] - self["mu"])
indi = np.where(np.abs(x) > difp)[0]
y[indi] *= np.exp(-np.abs(np.abs(x[indi])-difp)**2/self["tau"])
# Add offset and gradient
y += self["off"]
y += self["lin"] * (x + self["mu"])
return y
def inflectionPoints(self):
"""
Calculate the inflection points.
The inflection points of the profile depend on
both :math:`\sigma` and :math:`\mu`.
Returns
-------
Inflection points : tuple
Locations of the inflection points. Smaller one first.
"""
d = abs(self["scale"])/3.0 * \
np.sqrt(-3. + 3.*self["sig"]**2 + 6.*np.sqrt(self["sig"]**4 + self["sig"]**2 + 1.0))
return self["mu"]-d, self["mu"]+d
|
11564079
|
import argparse
import os
import shutil
import time
import math
import sys
sys.path.append('./')
sys.path.append('./src')
import copy
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.models as models
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import logging
from cf_checkpoint import CFCheckpoint
from cf_manager import CFManager, CFMode
from cf_iterator import CFIterator
from torch.multiprocessing import Pool, Process, set_start_method
try:
set_start_method('spawn')
except RuntimeError:
pass
try:
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
except ImportError:
raise ImportError("Please install DALI from https://www.github.com/NVIDIA/DALI to run this example.")
import threading
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
from apex.parallel.LARC import LARC
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training using DALI')
parser.add_argument('--data', metavar='DIR', default="./", type=str,
help='path(s) to dataset (if one path is provided, it is assumed\n' +
'to have subdirectories named "train" and "val"; alternatively,\n' +
'train and val paths can be specified directly by providing both paths as arguments)')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=3, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--nopin', action='store_false', help='Use this '
'argument to disable memory pinning')
#parser.add_argument('--resume', default='', type=str, metavar='PATH',
parser.add_argument('--resume', default=False, action='store_true',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--fp16', action='store_true',
help='Run model fp16 mode.')
parser.add_argument('--dali_cpu', action='store_true',
help='Runs CPU based version of DALI pipeline.')
parser.add_argument('--static-loss-scale', type=float, default=1,
help='Static loss scale, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--dynamic-loss-scale', action='store_true',
help='Use dynamic loss scaling. If supplied, this argument supersedes ' +
'--static-loss-scale.')
parser.add_argument('--prof', dest='prof', action='store_true',
help='Only run 10 iterations for profiling.')
parser.add_argument('-t', '--test', action='store_true',
help='Launch test mode with preset arguments')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument("--steps_per_run", default=-1, type=int)
parser.add_argument("--classes", default=1000, type=int)
parser.add_argument("--cache_size", default=0, type=int)
parser.add_argument('--sync_bn', action='store_true',
help='enabling apex sync BN.')
parser.add_argument('--opt-level', type=str)
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
parser.add_argument('--channels-last', type=bool, default=False)
parser.add_argument('--deterministic', action='store_true')
parser.add_argument('--noeval', action='store_true')
parser.add_argument('--amp',action='store_true',help='Run model AMP (automatic mixed precision) mode.')
parser.add_argument("--nnodes", default=1, type=int)
parser.add_argument("--node_rank", default=0, type=int)
parser.add_argument('--mint', action='store_true')
parser.add_argument('--dali', action='store_true')
parser.add_argument('--persist', action='store_true', default=False)
parser.add_argument('--dynamic', action='store_true', default=False)
parser.add_argument('--node_ip_list', action='append', type=str, help='Enter IP of other nodes in order')
parser.add_argument('--node_port_list', action='append', type=int, help='Enter start port of other nodes in order')
parser.add_argument('--iters', default=-1, type=int,metavar='N', help='Num iters (default: 50')
parser.add_argument('--chk-freq', default=0, type=int,metavar='N', help='checkpoint frequency')
parser.add_argument('--barrier', action='store_true', default=False)
parser.add_argument('--overwrite', action='store_true', default=False)
parser.add_argument('--synchronous', action='store_true', default=False)
parser.add_argument('--tic-tac', action='store_true', default=False)
parser.add_argument('--rename', action='store_true', default=False)
parser.add_argument('--tic-tac-len', default=2, type=int)
parser.add_argument('--chk-prefix', type=str, default="./")
parser.add_argument('--checkfreq', action='store_true', default=False)
parser.add_argument('--cf_iterator', action='store_true', default=False)
parser.add_argument('--chk_mode_baseline', action='store_true', default=False)
cudnn.benchmark = True
must_chk = False
compute_time_list = []
data_time_list = []
chk_time_list = []
class HybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False, resume_index=0, resume_epoch=0):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id)
shard = int(args.node_rank*args.world_size/args.nnodes + args.local_rank)
if args.mint:
self.input = ops.FileReader(file_root=data_dir, shard_id=shard, num_shards=args.world_size, shuffle_after_epoch=True, cache_size=args.cache_size)
else:
cf_det=True
if not resume_index and not resume_epoch and not args.cf_iterator:
cf_det=False
self.input = ops.FileReader(file_root=data_dir, shard_id=shard, num_shards=args.world_size, shuffle_after_epoch=True)
else:
self.input = ops.FileReader(file_root=data_dir, shard_id=shard, num_shards=args.world_size, shuffle_after_epoch=True, resume_index=resume_index, resume_epoch=resume_epoch, cf_det=cf_det)
print("CF deterministic shuffling is {}".format(cf_det))
#let user decide which pipeline works him bets for RN version he runs
dali_device = 'cpu' if dali_cpu else 'gpu'
#decoder_device = 'cpu'
decoder_device = 'cpu' if dali_cpu else 'mixed'
# This padding sets the size of the internal nvJPEG buffers to be able to handle all images from full-sized ImageNet
# without additional reallocations
device_memory_padding = 211025920 if decoder_device == 'mixed' else 0
host_memory_padding = 140544512 if decoder_device == 'mixed' else 0
self.decode = ops.ImageDecoderRandomCrop(device=decoder_device, output_type=types.RGB,
device_memory_padding=device_memory_padding,
host_memory_padding=host_memory_padding,
random_aspect_ratio=[0.8, 1.25],
random_area=[0.1, 1.0],
num_attempts=100)
self.res = ops.Resize(device=dali_device, resize_x=crop, resize_y=crop, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485 * 255,0.456 * 255,0.406 * 255],
std=[0.229 * 255,0.224 * 255,0.225 * 255])
self.coin = ops.CoinFlip(probability=0.5)
print('DALI "{0}" variant'.format(dali_device))
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images.gpu(), mirror=rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size):
super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id)
shard = int(args.node_rank*args.world_size/args.nnodes + args.local_rank)
self.input = ops.FileReader(file_root=data_dir, shard_id=shard, num_shards=args.world_size, random_shuffle=False)
self.decode = ops.ImageDecoder(device="cpu", output_type=types.RGB)
self.res = ops.Resize(device="cpu", resize_shorter=size, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485 * 255,0.456 * 255,0.406 * 255],
std=[0.229 * 255,0.224 * 255,0.225 * 255])
def define_graph(self):
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images.gpu())
return [output, self.labels]
best_prec1 = 0
args = parser.parse_args()
# test mode, use default args for sanity test
if args.test:
args.fp16 = False
args.epochs = 1
args.start_epoch = 0
args.arch = 'resnet50'
args.batch_size = 64
args.data = []
args.prof = True
args.data.append('/data/imagenet/train-jpeg/')
args.data.append('/data/imagenet/val-jpeg/')
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
torch.manual_seed(args.local_rank)
torch.set_printoptions(precision=10)
if not len(args.data):
raise Exception("error: too few arguments")
if args.amp:
args.opt_level='O1'
if args.amp:
print("Using mixed precision : {}".format(args.amp))
print("opt_level = {}".format(args.opt_level))
print("keep_batchnorm_fp32 = {}".format(args.keep_batchnorm_fp32), type(args.keep_batchnorm_fp32))
print("loss_scale = {}".format(args.loss_scale), type(args.loss_scale))
if args.dali:
print("Using DALI")
else:
print("Using native dataloader")
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
# item() is a recent addition, so this helps with backward compatibility.
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
def main():
logging.basicConfig(format='%(module)s - %(funcName)s - %(levelname)s - %(message)s', level=logging.INFO)
start_full = time.time()
global best_prec1, args
time_stat = []
chk_stat = []
start = time.time()
args.gpu = 0
args.world_size = 1
torch.cuda.set_device(args.gpu)
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.total_batch_size = args.world_size * args.batch_size
if args.fp16:
assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
if args.amp and args.fp16:
print("Please use only one of the --fp16/--amp flags")
exit(1)
if args.static_loss_scale != 1.0:
if not args.fp16:
print("Warning: if --fp16 is not used, static_loss_scale will be ignored.")
if args.sync_bn:
import apex
print("using apex synced BN")
model = apex.parallel.convert_syncbn_model(model)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
if(args.arch == "inception_v3"):
model = models.__dict__[args.arch](num_classes=args.classes,aux_logits=False)
else:
model = models.__dict__[args.arch](num_classes=args.classes)
model = model.cuda()
if args.fp16:
model = network_to_half(model)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.fp16:
optimizer = FP16_Optimizer(optimizer,
static_loss_scale=args.static_loss_scale,
dynamic_loss_scale=args.dynamic_loss_scale)
# Initialize Amp. Amp accepts either values or strings for the optional override arguments,
# for convenient interoperation with argparse
if args.amp:
model, optimizer = amp.initialize(model, optimizer,
opt_level=args.opt_level,
keep_batchnorm_fp32=args.keep_batchnorm_fp32,
loss_scale=args.loss_scale,
min_loss_scale=1.0
)
# For distributed training, wrap the model with apex.parallel.DistributedDataParallel.
# This must be done AFTER the call to amp.initialize. If model = DDP(model) is called
# before model, ... = amp.initialize(model, ...), the call to amp.initialize may alter
# the types of model's parameters in a way that disrupts or destroys DDP's allreduce hooks.
if args.distributed:
# shared param/delay all reduce turns off bucketing in DDP, for lower latency runs this can improve perf
# for the older version of APEX please use shared_param, for newer one it is delay_allreduce
model = DDP(model, delay_allreduce=True)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
args.lr = args.lr*float(args.batch_size*args.world_size)/256.
if args.chk_mode_baseline:
args.chk_mode = CFMode.MANUAL
else:
args.chk_mode = CFMode.AUTO
#if args.local_rank == 0:
chk = CFCheckpoint(model=model, optimizer=optimizer)
cf_manager = CFManager(args.chk_prefix, chk, mode=args.chk_mode)
#else:
# cf_manager = None
# optionally resume from a checkpoint
args.start_index = 0
args.steps_so_far = 0
extra_state=None
if args.resume:
extra_state = cf_manager.restore(gpu=args.gpu)
if extra_state is not None:
args.start_epoch = extra_state['epoch']
args.start_index = extra_state['start_index']
args.steps_so_far = extra_state['steps_so_far']
print("Populated: epoch :{}, start_idx:{}, steps_so_far:{}".format(args.start_epoch,args.start_index,args.steps_so_far))
#if os.path.isfile(args.resume):
# print("=> loading checkpoint '{}'".format(args.resume))
# checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage.cuda(args.gpu))
# args.start_epoch = checkpoint['epoch']
# args.start_index = checkpoint['iter']*args.batch_size
# args.steps_so_far = checkpoint['steps_so_far']
# args.shuffle_seed = checkpoint['dl_shuffle_seed']
# best_prec1 = checkpoint['best_prec1']
# model.load_state_dict(checkpoint['state_dict'])
# optimizer.load_state_dict(checkpoint['optimizer'])
# print("=> loaded checkpoint '{}' (epoch {})"
# .format(args.resume, checkpoint['epoch']))
#else:
# print("=> no checkpoint found at '{}'".format(args.resume))
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
train_pipe = None
if args.dali:
if(args.arch == "inception_v3"):
crop_size = 299
val_size = 320 # I chose this value arbitrarily, we can adjust.
else:
crop_size = 224
val_size = 256
if not args.cf_iterator:
args.start_index = 0
pipe = HybridTrainPipe(batch_size=args.batch_size, num_threads=args.workers, device_id=args.local_rank, data_dir=traindir, crop=crop_size, dali_cpu=args.dali_cpu)
else:
pipe = HybridTrainPipe(batch_size=args.batch_size, num_threads=args.workers, device_id=args.local_rank, data_dir=traindir, crop=crop_size, dali_cpu=args.dali_cpu, resume_index=args.start_index, resume_epoch=args.start_epoch)
pipe.build()
train_pipe = pipe
resume_size = int(pipe.epoch_size("Reader") / args.world_size) - args.start_index
train_loader = DALIClassificationIterator(pipe, size=int(pipe.epoch_size("Reader") / args.world_size), fill_last_batch=False, resume_size=resume_size)
if args.cf_iterator:
train_loader = CFIterator(train_loader, worker_id=args.local_rank, bs=args.batch_size, steps_this_epoch=int(args.start_index/args.batch_size), epoch=args.start_epoch, dali=args.dali, cf_manager=cf_manager, chk_freq=args.chk_freq, arch=args.arch, steps_to_run=args.steps_per_run, persist=args.persist, dynamic=args.dynamic)
if args.resume:
train_loader.load_state_dict(extra_state)
if not args.noeval:
pipe_val = HybridValPipe(batch_size=args.batch_size, num_threads=args.workers, device_id=args.local_rank, data_dir=valdir, crop=crop_size, size=val_size)
pipe_val.build()
val_loader = DALIClassificationIterator(pipe_val, size=int(pipe_val.epoch_size("Reader") / args.world_size))
else:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=args.nopin, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=args.nopin)
if args.evaluate and not args.noeval:
validate(val_loader, model, criterion)
return
total_time = AverageMeter()
dur_setup = time.time() - start
time_stat.append(dur_setup)
print("Batch size for GPU {} is {}, workers={}".format(args.gpu, args.batch_size, args.workers))
fname = 'time-split' + str(args.local_rank) + '.csv'
df = open(fname, 'w+')
if args.rename:
df.write("epoch, iter, dtime, mtime, ftime, ctime, ttime,chktime, renametime, tottime\n")
else:
df.write("epoch, iter,dtime, mtime, ftime, ctime, ttime, chktime, tottime\n")
for epoch in range(args.start_epoch, args.epochs):
if args.local_rank == 0 and epoch == 0:
os.system("swapoff -a")
os.system("free -g")
# log timing
start_ep = time.time()
df.write("\n")
# train for one epoch
avg_train_time = train(train_loader, model, criterion, optimizer, epoch, df, cf_manager)
total_time.update(avg_train_time)
if args.prof:
break
# evaluate on validation set
if args.noeval:
[prec1, prec5] = [0,0]
else:
[prec1, prec5] = validate(val_loader, model, criterion)
filename = 'acc-progress-' + str(args.gpu) + '.csv'
with open(filename, 'a+') as fw:
fw.write("{},{},{},{}\n".format(epoch, time.time() -start_ep, prec1, prec5))
chk_st = time.time()
# remember best prec@1 and save checkpoint
if args.local_rank == 0:
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
'''
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best)
'''
if epoch == args.epochs - 1:
print('##Top-1 {0}\n'
'##Top-5 {1}\n'
'##Perf {2}'.format(prec1, prec5, args.total_batch_size / total_time.avg))
dur_chk = time.time() - chk_st
if args.cf_iterator and train_loader.exit:
break
if args.dali:
# reset DALI iterators
train_loader.reset()
if not args.noeval:
val_loader.reset()
dur_ep = time.time() - start_ep
print("EPOCH DURATION = {}".format(dur_ep))
time_stat.append(dur_ep)
chk_stat.append(dur_chk)
if args.local_rank == 0:
for i in time_stat:
print("Time_stat : {}".format(i))
for i in range(0, len(data_time_list)):
print("Data time : {}\t Compute time : {}\t Chk time : {}".format(data_time_list[i], compute_time_list[i],chk_time_list[i]))
dur_full = time.time() - start_full
if args.local_rank == 0:
print("Total time for all epochs = {}".format(dur_full))
if cf_manager.chk_process is not None:
cf_manager.chk_process.join()
if args.dali:
del pipe
if not args.noeval:
del pipe_val
def train(train_loader, model, criterion, optimizer, epoch, df, cf_manager):
batch_time = AverageMeter()
total_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
global must_chk
# switch to train mode
model.train()
end = time.time()
dataset_time = compute_time = checkpoint_time = rename_time = 0
chk_per_epoch = 0
for i, data in enumerate(train_loader):
rename_time = 0
if args.dali:
images = data[0]["data"]
target = data[0]["label"].squeeze().cuda().long()
train_loader_len = int(math.ceil(train_loader._size / args.batch_size))
input_var = Variable(images)
target_var = Variable(target)
else:
images, target = data
target = target.squeeze().cuda().long()
input_var = Variable(images).cuda(args.gpu, non_blocking=True)
target_var = Variable(target).cuda(args.gpu, non_blocking=True)
train_loader_len = int(len(train_loader))
adjust_learning_rate(optimizer, epoch, i, train_loader_len)
if args.prof:
if i > 10:
break
# measure data loading time
dtime = time.time() - end
start_copy = time.time()
mtime = time.time() - start_copy
data_time.update(time.time() - end)
dataset_time += (time.time() - end)
compute_start = time.time()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), images.size(0))
top1.update(to_python_float(prec1), images.size(0))
top5.update(to_python_float(prec5), images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
ftime = time.time() - compute_start
if args.fp16:
optimizer.backward(loss)
elif args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
#if args.cf_iterator:
#torch.cuda.synchronize()
if args.local_rank == 0:
cf_manager.weight_update()
else:
optimizer.step()
torch.cuda.synchronize()
compute_time += (time.time() - compute_start)
ctime = time.time() - compute_start
proc = []
ttime = time.time() - end
ch_st = time.time()
chktime = time.time() - ch_st
checkpoint_time += chktime
#print("After CF chk : mem before={}MB, after={}MB".format(mem_before/1024/1024, mem_after/1024/1024))
if args.barrier:
dist.barrier()
tottime = time.time() - end
total_time.update(time.time() - end)
df.write("{},{},{}\n".format(epoch, i, tottime))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and i % args.print_freq == 0 and i > 1:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {3:.3f} ({4:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, train_loader_len,
args.total_batch_size / batch_time.val,
args.total_batch_size / batch_time.avg,
batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
if args.iters > 0 and args.iters == i:
must_chk = False
#if args.local_rank == 0:
# for p in proc:
# p.join()
break
data_time_list.append(dataset_time)
compute_time_list.append(compute_time)
chk_time_list.append(checkpoint_time)
return batch_time.avg
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, data in enumerate(val_loader):
if args.dali:
images = data[0]["data"]
target = data[0]["label"].squeeze().cuda().long()
val_loader_len = int(val_loader._size / args.batch_size)
target = target.cuda(non_blocking=True)
input_var = Variable(images)
target_var = Variable(target)
else:
images, target = data
target = target.squeeze().cuda().long()
val_loader_len = int(len(val_loader))
input_var = Variable(images).cuda(args.gpu, non_blocking=True)
target_var = Variable(target).cuda(args.gpu, non_blocking=True)
# compute output
with torch.no_grad():
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), images.size(0))
top1.update(to_python_float(prec1), images.size(0))
top5.update(to_python_float(prec5), images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {2:.3f} ({3:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, val_loader_len,
args.total_batch_size / batch_time.val,
args.total_batch_size / batch_time.avg,
batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return [top1.avg, top5.avg]
def save_one_checkpoint(state):
filename = 'checkpoint.pth.tar.bgk.one'
s = time.time()
torch.save(state, filename)
print("In bgk saved in {}s".format(time.time()-s))
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def bgk_save_checkpoint(model, optimizer):
global must_chk
i = 0
while must_chk and i < 10:
state = {
'epoch': 1,
'iter': 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': 0,
'optimizer': optimizer.state_dict(),
}
i += 1
filename = 'checkpoint.pth.tar.bgk'
s = time.time()
clone_state = copy.deepcopy(state)
for k, v in clone_state['state_dict'].items():
clone_state['state_dict'][k] = v.cpu()
dur = time.time() - s
torch.save(clone_state, filename)
print("In bgk saved {}, clone={}s, write={}s".format(i, dur, time.time()-s-dur))
s = time.time()
torch.save(state, filename)
print("In bgk saved {}, save={}s".format(i, time.time()-s))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, step, len_epoch):
"""LR schedule that should yield 76% converged accuracy with batch size 256"""
factor = epoch // 30
if epoch >= 80:
factor = factor + 1
lr = args.lr * (0.1 ** factor)
"""Warmup"""
if epoch < 5:
lr = lr * float(1 + step + epoch * len_epoch) / (5. * len_epoch)
if(args.local_rank == 0 and step % args.print_freq == 0 and step > 1):
print("Epoch = {}, step = {}, lr = {}".format(epoch, step, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= args.world_size
return rt
if __name__ == '__main__':
main()
|
11564088
|
from functools import wraps
from ..mapping import EpistasisMap
from numpy import random
class DistributionException(Exception):
""""""
class SimulatedEpistasisMap(EpistasisMap):
"""Just like an epistasis map, but with extra methods
for setting epistatic coefficients
"""
def __init__(self, gpm, df=None, sites=None, values=None, stdeviations=None):
super().__init__(df=df, sites=sites, values=values, stdeviations=stdeviations)
self._gpm = gpm
@property
def avail_distributions(self):
return random.__all__
def set_order_from_distribution(self, orders, dist="normal", **kwargs):
"""Sets epistatic coefficients to values drawn from a statistical distribution.
Distributions are found in SciPy's `random` module. Kwargs are passed
directly to these methods
"""
# Get distribution
try:
method = getattr(random, dist)
except AttributeError:
raise DistributionException("Distribution now found. Check the `avail_distribution` "
"attribute for available distributions.")
idx = self.data.orders.isin(orders)
self.data.loc[idx, "values"] = method(
size=sum(idx),
**kwargs
)
self._gpm.build()
@wraps(EpistasisMap.set_values)
def set_values(self, values, filter=None):
super().set_values(values, filter=filter)
self._gpm.build()
|
11564092
|
from __future__ import print_function
from six.moves import cPickle as pickle
import numpy as np
import os
from scipy.misc import imread
import platform
import random
# 读取文件
def load_pickle(f):
version = platform.python_version_tuple() # 取python版本号
if version[0] == "2":
return pickle.load(f) # pickle.load, 反序列化为python的数据类型
elif version[0] == "3":
return pickle.load(f, encoding="latin1")
raise ValueError("invalid python version: {}".format(version))
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, "rb") as f:
datadict = load_pickle(f) # dict类型
X = datadict["data"] # X, ndarray, 像素值
Y = datadict["labels"] # Y, list, 标签, 分类
# reshape, 一维数组转为矩阵10000行3列。每个entries是32x32
# transpose,转置
# astype,复制,同时指定类型
X = X.reshape(10000, 3, 32, 32).transpose(0, 1, 2, 3).astype("float")
Y = np.array(Y)
return X, Y
def load_CIFAR100_batch(filename, number):
""" load single batch of cifar """
with open(filename, "rb") as f:
datadict = load_pickle(f) # dict类型
X = datadict["data"] # X, ndarray, 像素值
Y = datadict["fine_labels"] # Y, list, 标签, 分类
# reshape, 一维数组转为矩阵10000行3列。每个entries是32x32
# transpose,转置
# astype,复制,同时指定类型
X = X.reshape(number, 3, 32, 32).transpose(0, 1, 2, 3).astype("float")
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = [] # list
ys = []
# 训练集batch 1~5
for b in range(1, 6):
f = os.path.join(ROOT, "data_batch_%d" % (b,))
X, Y = load_CIFAR_batch(f)
xs.append(X) # 在list尾部添加对象X, x = [..., [X]]
ys.append(Y)
Xtr = np.concatenate(xs) # [ndarray, ndarray] 合并为一个ndarray
Ytr = np.concatenate(ys)
del X, Y
# 测试集
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, "test_batch"))
return Xtr, Ytr, Xte, Yte
def load_CIFAR100(ROOT, typeName="train", numberdata=5000):
""" load all of cifar """
f = os.path.join(ROOT, typeName)
Xtr, Ytr = load_CIFAR100_batch(f, number=numberdata)
return Xtr, Ytr
def save_numpy(
X, # 全部的数据
Y, # 全部的数据
path, # npy 数据保存的目标路径
number=10000, # 最后的要保存的数据的数量,<=总的
shuff="random_equally", # 随机选取的类别数量要均衡的方式
datasetType="cifar10", # 数据集的名称
IsTargeted=False, # 是否生成的是用于目标攻击的标签,随机值(和原始的不同即可)
):
class_num = 10
if datasetType == "cifar100":
class_num = 100
ys = []
X_shuffe = np.zeros((number, 3, 32, 32), dtype=np.double)
Y_shuffe = np.zeros((number, 1), dtype=np.uint8)
class_number_list = [0 for i in range(class_num)]
# print(class_number_list)
# 选择平均数目的不同类别,例如cifar10,选择生成 1000个的话,每一类选择1000/10=100个
label_one_choice_number = int(number / class_num)
index = 0
if shuff == "random_equally":
# print(Y.shape[0])
for i in range(Y.shape[0]):
for j in range(class_num):
if class_number_list[j] < label_one_choice_number:
if Y[i] == j:
class_number_list[j] += 1
X_shuffe[index] = X[i]
Y_shuffe[index] = Y[i]
index += 1
else:
# 直接保存前 number个
for i in range(number):
X_shuffe[index] = X[i]
Y_shuffe[index] = Y[i]
index += 1
# print(class_number_list,Y_shuffe)
key = np.unique(Y_shuffe)
result = {}
for k in key:
mask = Y_shuffe == k
y_new = Y_shuffe[mask]
v = y_new.size
result[k] = v
print("check every type is include and in average", result)
if not IsTargeted:
for i in range(Y_shuffe.shape[0]):
y = np.zeros((1, class_num), dtype=np.uint8)
y[0][Y_shuffe[i]] = 1
ys.append(y[0])
# print(y[0])
np.save(
path + "{}_{}_origin_labels.npy".format(datasetType, number), np.array(ys)
)
print(
"save the npy file in path :",
path + "{}_{}_origin_labels.npy".format(datasetType, number),
)
np.save(
path + "{}_{}_origin_inputs.npy".format(datasetType, number),
np.array(X_shuffe / 255),
)
print(
"save the npy file in path :",
path + "{}_{}_origin_inputs.npy".format(datasetType, number),
)
else:
# print("A")
for i in range(Y_shuffe.shape[0]):
y = np.zeros((1, class_num), dtype=np.uint8)
list_target = [c for c in range(class_num)]
del list_target[int(Y_shuffe[i][0])]
target_index = random.randint(0, class_num - 2)
print("A", list_target, Y_shuffe[i], list_target[target_index])
y[0][list_target[target_index]] = 1
ys.append(y[0])
# print(y[0])
np.save(
path + "{}_{}_target_labels.npy".format(datasetType, number), np.array(ys)
)
print(
"save the npy file in path :",
path + "{}_{}_target_labels.npy".format(datasetType, number),
)
print(
"save model is :",
shuff,
"\nIsTargeted :",
IsTargeted,
"\nsample class number is: ",
class_num,
"\nsample total numbers is :{} each type number is : {}".format(
number, label_one_choice_number
),
)
def load_npy(path_inputs, path_labels):
origin_nature_samples = np.load(path_inputs)
origin_labels_samples = np.load(path_labels)
return origin_nature_samples, origin_labels_samples
#####返回了cifar10的train数据 Xtr,Ytr,cifar10的test数据
Xtr, Ytr, Xte, Yte = load_CIFAR10("../../cifar-10-python/cifar-10-batches-py")
######保存cifar10的test数据的1500个,方式是随机均匀取10类,各150这里,标签是原始的Groundtruth标签,IsTargeted=False######
######如果IsTargeted=True ,则是随机生成和原始样本的GroundTruth不一致的标签,可以用于目标攻击使用,用户也可以自行定义目标标签的生成类别规则#####
save_numpy(
Xte,
Yte,
"../Datasets/CIFAR_cln_data/",
1500,
shuff="random_equally",
datasetType="cifar10",
IsTargeted=False,
)
#
# cifar100 的调用例子
# numbertest=10000
# Xte100, Yte100=load_CIFAR100('../Datasets/CIFAR10/cifar-100-python','test',numbertest)
# save_numpy( Xte100, Yte100,'../Datasets/cln_data/',300,shuff="random_equally",datasetType="cifar100",IsTargeted=False)
# 加载和显示保存后的数据集的格式
# image_origin_path="../Datasets/cln_data/cifar10_100_origin_inputs.npy"
# label_origin_path="../Datasets/cln_data/cifar10_100_origin_labels.npy"
# origin_nature_samples = np.load(image_origin_path)
# origin_labels_samples = np.load(label_origin_path)
#
# print("sample_shape,label_shape",origin_nature_samples.shape,origin_labels_samples.shape)
|
11564095
|
import numpy as np
from os.path import join
def plot_weight_scatter(harn):
"""
Draw a scatter plot of the initial weights versus the final weights of a
network.
Example:
>>> import netharn as nh
>>> harn = nh.FitHarn.demo()
>>> harn.run()
Ignore:
>>> from netharn.plots.weight_scatter import * # NOQA
>>> from netharn.examples import mnist
>>> import kwplot
>>> harn = mnist.setup_harn()
>>> harn.preferences['timeout'] = 60 * 1
>>> kwplot.autompl(force='agg')
>>> harn.run()
>>> kwplot.autompl(force='auto')
>>> plot_weight_scatter(harn)
"""
import netharn as nh
cpu = nh.XPU.coerce('cpu')
path1 = join(harn.train_dpath, 'initial_state', 'initial_state.pt')
state1 = cpu.load(path1)
weights1 = state1['model_state_dict']
path2 = harn.best_snapshot()
state2 = cpu.load(path2)
weights2 = state2['model_state_dict']
keys1 = set(weights1.keys())
keys2 = set(weights2.keys())
keys = keys1 & keys2
assert keys == keys2
accum1 = []
accum2 = []
for key in keys:
w1 = weights1[key]
w2 = weights2[key]
accum1.append(w1.numpy().ravel())
accum2.append(w2.numpy().ravel())
points1 = np.hstack(accum1)
points2 = np.hstack(accum2)
# Find cosine of angle between the vectors
import scipy
cosangle = scipy.spatial.distance.cosine(points1, points2)
print('cosangle = {!r}'.format(cosangle))
import kwplot
import seaborn
seaborn.set()
plt = kwplot.autoplt()
plt.clf()
x = points1[::1]
y = points2[::1]
ax = plt.gca()
ax.figure.clf()
# seaborn.kdeplot(x, y, shade=True, gridsize=50)
ax = plt.gca()
ax.scatter(x, y, s=1, alpha=0.1, c='blue')
ax.set_xlabel('initial weights')
ax.set_ylabel('trained weights')
|
11564143
|
import tensorflow as tf
from awesome_gans.data import TFDatasets
from awesome_gans.utils import initialize, set_seed
from awesome_gans.wgan.config import get_config
from awesome_gans.wgan.model import WGAN
def main():
config = get_config()
# initial tf settings
initialize()
# reproducibility
set_seed(config.seed)
# load the data
dataset: tf.data.Dataset = TFDatasets(config).load_dataset(use_label=False)
if config.mode == 'train':
model = WGAN(config)
model.train(dataset)
elif config.mode == 'inference':
pass
else:
raise ValueError()
main()
|
11564151
|
import sys
__version__ = '0.1.2'
try:
__NLG_SETUP__
except NameError:
__NLG_SETUP__ = False
if __NLG_SETUP__:
sys.stderr.write('Partial import of nlg during the build process.\n')
else:
from .search import templatize # NOQA: F401
from .grammar import get_gramopts
grammar_options = get_gramopts()
__all__ = ['templatize', 'grammar_options']
|
11564165
|
from . import base_api_core
class LogCenter(base_api_core.Core):
def __init__(self, ip_address, port, username, password, secure=False, cert_verify=False, dsm_version=7,
debug=True, otp_code=None):
super(LogCenter, self).__init__(ip_address, port, username, password, secure, cert_verify, dsm_version, debug, otp_code)
def logcenter(self):
api_name = 'SYNO.LogCenter.RecvRule'
info = self.gen_list[api_name]
api_path = info['path']
req_param = {'version': info['maxVersion'], 'method': 'list'}
return self.request_data(api_name, api_path, req_param)
def client_status_cnt(self):
api_name = 'SYNO.Core.SyslogClient.Status'
info = self.gen_list[api_name]
api_path = info['path']
req_param = {'version': info['maxVersion'], 'method': 'cnt_get'}
return self.request_data(api_name, api_path, req_param)
def client_status_eps(self):
api_name = 'SYNO.Core.SyslogClient.Status'
info = self.gen_list[api_name]
api_path = info['path']
req_param = {'version': info['maxVersion'], 'method': 'eps_get'}
return self.request_data(api_name, api_path, req_param)
def remote_log_archives(self):
api_name = 'SYNO.LogCenter.Log'
info = self.gen_list[api_name]
api_path = info['path']
req_param = {'version': info['maxVersion'], 'method': 'get_remotearch_subfolder'}
return self.request_data(api_name, api_path, req_param)
def display_logs(self):
api_name = 'SYNO.Core.SyslogClient.Log'
info = self.gen_list[api_name]
api_path = info['path']
req_param = {'version': info['maxVersion'], 'method': 'list'}
return self.request_data(api_name, api_path, req_param)
def setting_storage_list(self):
api_name = 'SYNO.LogCenter.Setting.Storage'
info = self.gen_list[api_name]
api_path = info['path']
req_param = {'version': info['maxVersion'], 'method': 'get'}
return self.request_data(api_name, api_path, req_param)
def registry_send_list(self):
api_name = 'SYNO.LogCenter.Client'
info = self.gen_list[api_name]
api_path = info['path']
req_param = {'version': info['maxVersion'], 'method': 'get'}
return self.request_data(api_name, api_path, req_param)
def history(self):
api_name = 'SYNO.LogCenter.History'
info = self.gen_list[api_name]
api_path = info['path']
req_param = {'version': info['maxVersion'], 'method': 'list'}
return self.request_data(api_name, api_path, req_param)
|
11564168
|
from django.template import Library
from restthumbnails.helpers import get_thumbnail_proxy
register = Library()
@register.assignment_tag(takes_context=True)
def thumbnail(context, source, size, method, extension):
if source:
return get_thumbnail_proxy(source, size, method, extension)
return None
|
11564169
|
from typing import List, Union
from pysbr.queries.query import Query
import pysbr.utils as utils
class EventsByEventIds(Query):
"""Get events from a list of event ids.
All event queries return information about matching events including date and time,
location, participants, and associated ids.
Args:
event_ids: SBR event id or list of event ids.
"""
@Query.typecheck
def __init__(self, event_ids: Union[List[int], int]):
super().__init__()
utils.make_list(event_ids)
self.name = "eventsV2"
self.arg_str = self._get_args("event_ids")
self.args = {"eids": event_ids}
self.fields = self._get_fields("event")
self._raw = self._build_and_execute_query(
self.name, self.fields, self.arg_str, self.args
)
self._subpath_keys = ["events"]
self._sublist_keys = ["participants", "scores"]
self._id_key = "event id"
|
11564172
|
import json
import enum
import base64
from dataclasses import dataclass, field as dataclass_field, replace
from collections import defaultdict
from typing import Any, Union, Optional
from unittest.mock import MagicMock
from aws_lambda_api_event_utils.aws_lambda_api_event_utils import (
FormatVersion,
EVENT_FORMAT_VERSION_CACHE_KEY,
)
class IntegrationType(enum.Enum):
"""Event format identifiers"""
APIGW_HTTP_10 = ("API Gateway HTTP 1.0", FormatVersion.APIGW_10)
APIGW_REST = ("API Gateway REST", FormatVersion.APIGW_10)
APIGW_HTTP_20 = ("API Gateway HTTP 2.0", FormatVersion.APIGW_20)
# ALB_10 = "ALB 1.0"
def __init__(self, description, format_version) -> None:
self.description = description
self.format_version = format_version
def event_with_version(self, *args):
"""For setting the version on test events that don't match the full format spec."""
event = {}
for arg in args:
if arg:
event.update(arg)
event[EVENT_FORMAT_VERSION_CACHE_KEY] = self.format_version.name
return event
def create_context():
context = MagicMock()
del context.api_response
return context
def _merge_dicts(a: dict, b: dict, path: str = "") -> dict:
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
_merge_dicts(a[key], b[key], f"{path}.{key}")
elif a[key] == b[key]:
pass # same leaf value
else:
raise Exception(f"Conflict at {path}.{key}: {a[key]} {b[key]}")
else:
a[key] = b[key]
return a
@dataclass
class Path:
stage: str
path: str
resource: str
path_parameters: dict = None
route_method: str = None
def _get_path_data(self, integration_type: IntegrationType) -> dict:
path_with_stage = f"/{self.stage}{self.path}"
path_data = {}
if integration_type in [
IntegrationType.APIGW_HTTP_10,
IntegrationType.APIGW_REST,
]:
path_data["requestContext"] = {}
path_data["requestContext"]["stage"] = self.stage
path_data["resource"] = self.resource
path_data["requestContext"]["resourcePath"] = self.resource
path_data["path"] = path_with_stage
path_data["requestContext"]["path"] = path_with_stage
path_data["pathParameters"] = self.path_parameters
elif integration_type == IntegrationType.APIGW_HTTP_20:
path_data["requestContext"] = {"http": {}}
path_data["requestContext"]["stage"] = self.stage
# resource
path_data["rawPath"] = path_with_stage
path_data["requestContext"]["http"]["path"] = path_with_stage
if self.path_parameters:
path_data["pathParameters"] = self.path_parameters
return path_data
@dataclass
class Body:
value: str
is_base64_encoded: bool
@classmethod
def from_str(cls, s: str):
return cls(s, False)
@classmethod
def from_bytes(cls, b: str):
return cls(str(base64.b64encode(b), "ascii"), True)
@classmethod
def empty(cls):
return cls(None, False)
def _get_body_data(self, integration_type: IntegrationType) -> dict:
body_data = {}
if integration_type in [
IntegrationType.APIGW_HTTP_10,
IntegrationType.APIGW_REST,
]:
body_data["body"] = self.value
body_data["isBase64Encoded"] = self.is_base64_encoded
elif integration_type == IntegrationType.APIGW_HTTP_20:
if self.value:
body_data["body"] = self.value
body_data["isBase64Encoded"] = self.is_base64_encoded
return body_data
@dataclass
class Event:
integration_type: IntegrationType
method: str
path: Path
headers: dict = None
content_type: str = None
query_params: dict = None
body: Union[str, bytes, Body] = None
def to_string(self):
return json.dumps(self.get_event(), indent=2)
def with_(
self,
integration_type: IntegrationType = None,
method: str = None,
path: Path = None,
headers: dict = None,
content_type: str = None,
query_params: dict = None,
body: Union[str, bytes, Body] = None,
):
kwargs = {}
for field in [
"integration_type",
"method",
"path",
"headers",
"content_type",
"query_params",
"body",
]:
if locals()[field] is not None:
kwargs[field] = locals()[field]
return replace(self, **kwargs)
def get_event(self) -> dict:
event = {}
for method in [
self._get_integration_type_data,
self._get_method_data,
self._get_headers_data,
self._get_query_params_data,
]:
data = method()
_merge_dicts(event, data)
if self.path is None:
raise ValueError("path not set")
if not isinstance(self.path, Path):
raise TypeError("Path must be path")
data = self.path._get_path_data(self.integration_type)
_merge_dicts(event, data)
body = self.body
if not body:
body = Body.empty()
else:
if isinstance(body, str):
body = Body.from_str(body)
elif isinstance(body, bytes):
body = Body.from_bytes(body)
elif not isinstance(body, Body):
raise TypeError(f"unknown body type {type(body)}")
data = body._get_body_data(self.integration_type)
_merge_dicts(event, data)
return event
def _get_integration_type_data(self) -> dict:
if self.integration_type is None:
raise ValueError("integration_type not set")
data = {}
if self.integration_type == IntegrationType.APIGW_REST:
pass
elif self.integration_type == IntegrationType.APIGW_HTTP_10:
data["version"] = "1.0"
elif self.integration_type == IntegrationType.APIGW_HTTP_20:
data["version"] = "2.0"
else:
raise ValueError(self.integration_type)
return data
def _get_method_data(self) -> dict:
if self.method is None:
raise ValueError("method not set")
method_data = {}
if self.integration_type in [
IntegrationType.APIGW_HTTP_10,
IntegrationType.APIGW_REST,
]:
method_data["httpMethod"] = self.method
method_data["requestContext"] = {"httpMethod": self.method}
elif self.integration_type == IntegrationType.APIGW_HTTP_20:
method_data["requestContext"] = {"http": {"method": self.method}}
else:
raise ValueError(self.integration_type)
return method_data
def _get_headers_data(self) -> dict:
headers_data = {}
if self.integration_type in [
IntegrationType.APIGW_HTTP_10,
IntegrationType.APIGW_REST,
]:
headers_data["headers"] = {}
headers_data["multiValueHeaders"] = {}
elif self.integration_type == IntegrationType.APIGW_HTTP_20:
headers_data["headers"] = {}
else:
raise ValueError(self.integration_type)
if self.headers:
for key, value in self.headers.items():
if self.integration_type in [
IntegrationType.APIGW_HTTP_10,
IntegrationType.APIGW_REST,
]:
if not isinstance(value, str):
headers_data["headers"][key] = value[-1]
headers_data["multiValueHeaders"][key] = value
else:
headers_data["headers"][key] = value
headers_data["multiValueHeaders"][key] = [value]
elif self.integration_type == IntegrationType.APIGW_HTTP_20:
if not isinstance(value, str):
headers_data["headers"][key] = ",".join(value)
else:
headers_data["headers"][key] = value
else:
raise ValueError(self.integration_type)
if self.content_type:
if self.integration_type in [
IntegrationType.APIGW_HTTP_10,
IntegrationType.APIGW_REST,
]:
headers_data["headers"]["content-type"] = self.content_type
headers_data["multiValueHeaders"]["content-type"] = [self.content_type]
elif self.integration_type == IntegrationType.APIGW_HTTP_20:
headers_data["headers"]["content-type"] = self.content_type
else:
raise ValueError(self.integration_type)
return headers_data
def _get_query_params_data(self) -> dict:
query_params_data = {}
if self.integration_type in [
IntegrationType.APIGW_HTTP_10,
IntegrationType.APIGW_REST,
]:
query_params_data["queryStringParameters"] = {}
query_params_data["multiValueQueryStringParameters"] = {}
elif self.integration_type == IntegrationType.APIGW_HTTP_20:
query_params_data["rawQueryString"] = ""
else:
raise ValueError(self.integration_type)
if self.query_params:
raw_params_data = []
if self.integration_type == IntegrationType.APIGW_HTTP_20:
query_params_data["queryStringParameters"] = {}
for key, value in self.query_params.items():
if self.integration_type in [
IntegrationType.APIGW_HTTP_10,
IntegrationType.APIGW_REST,
]:
if not isinstance(value, str):
query_params_data["queryStringParameters"][key] = value[0]
query_params_data["multiValueQueryStringParameters"][
key
] = value
else:
query_params_data["queryStringParameters"][key] = value
query_params_data["multiValueQueryStringParameters"][key] = [
value
]
elif self.integration_type == IntegrationType.APIGW_HTTP_20:
if not isinstance(value, str):
raw_params_data.extend((key, v) for v in value)
query_params_data["queryStringParameters"][key] = ",".join(
value
)
else:
raw_params_data.append((key, value))
query_params_data["queryStringParameters"][key] = value
else:
raise ValueError(self.integration_type)
if self.integration_type == IntegrationType.APIGW_HTTP_20:
query_params_data["rawQueryString"] = "&".join(
f"{k}={v}" for k, v in raw_params_data
)
return query_params_data
|
11564174
|
argo.temperature.plot.pcolormesh(yincrease=False,
cbar_kwargs={'label': 'Temperature (°C)'},
cmap='Reds',
levels=[5, 15]
);
|
11564197
|
import tensorflow as tf
import os
import sys
import pickle
import numpy as np
import tensorflow as tf
import cv2 as cv2
class YOLO2_TINY_TF(object):
def __init__(self, in_shape, weight_pickle, proc="cpu"):
self.g = tf.Graph()
self.sess = tf.Session(graph=self.g)
self.proc = proc
self.weight_pickle = weight_pickle
self.inp, self.nodes = self.build_graph(in_shape)
def get_y2t_w(self):
with open(self.weight_pickle, "rb") as h:
if "2.7" in sys.version:
y2t_w = pickle.load(h)
elif "3.5" in sys.version:
y2t_w = pickle.load(h, encoding='latin1')
else:
raise Exception("Unknown python version")
return y2t_w
def build_graph(self, in_shape):
y2t_w = self.get_y2t_w()
nodes = []
with self.g.as_default():
with tf.device('/' + self.proc + ':0'):
inp = tf.placeholder(tf.float32, shape=in_shape, name="input")
conv0 = tf.nn.conv2d(inp, y2t_w[0]["kernel"], strides=[1, 1, 1, 1], padding='SAME') ; nodes.append(conv0)
bias0 = tf.nn.bias_add(conv0, y2t_w[0]["biases"]) ; nodes.append(bias0)
bn0 = tf.nn.batch_normalization(bias0, y2t_w[0]["moving_mean"], y2t_w[0]["moving_variance"], None, y2t_w[0]["gamma"], 1e-5) ; nodes.append(bn0)
l1 = tf.maximum(bn0, .1 * bn0) ; nodes.append(l1)
p2 = tf.nn.max_pool(l1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') ; nodes.append(p2)
conv3 = tf.nn.conv2d(p2, y2t_w[1]["kernel"], strides=[1, 1, 1, 1], padding='SAME') ; nodes.append(conv3)
bias3 = tf.nn.bias_add(conv3, y2t_w[1]["biases"]) ; nodes.append(bias3)
bn3 = tf.nn.batch_normalization(bias3, y2t_w[1]["moving_mean"], y2t_w[1]["moving_variance"], None, y2t_w[1]["gamma"], 1e-5) ; nodes.append(bn3)
l4 = tf.maximum(bn3, .1 * bn3) ; nodes.append(l4)
p5 = tf.nn.max_pool(l4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') ; nodes.append(p5)
conv6 = tf.nn.conv2d(p5, y2t_w[2]["kernel"], strides=[1, 1, 1, 1], padding='SAME') ; nodes.append(conv6)
bias6 = tf.nn.bias_add(conv6, y2t_w[2]["biases"]) ; nodes.append(bias6)
bn6 = tf.nn.batch_normalization(bias6, y2t_w[2]["moving_mean"], y2t_w[2]["moving_variance"], None, y2t_w[2]["gamma"], 1e-5) ; nodes.append(bn6)
l7 = tf.maximum(bn6, .1 * bn6) ; nodes.append(l7)
p8 = tf.nn.max_pool(l7, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') ; nodes.append(p8)
conv9 = tf.nn.conv2d(p8, y2t_w[3]["kernel"], strides=[1, 1, 1, 1], padding='SAME') ; nodes.append(conv9)
bias9 = tf.nn.bias_add(conv9, y2t_w[3]["biases"]) ; nodes.append(bias9)
bn9 = tf.nn.batch_normalization(bias9, y2t_w[3]["moving_mean"], y2t_w[3]["moving_variance"], None, y2t_w[3]["gamma"], 1e-5) ; nodes.append(bn9)
l10 = tf.maximum(bn9, .1 * bn9) ; nodes.append(l10)
p11 = tf.nn.max_pool(l10, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') ; nodes.append(p11)
conv12 = tf.nn.conv2d(p11, y2t_w[4]["kernel"], strides=[1, 1, 1, 1], padding='SAME') ; nodes.append(conv12)
bias12 = tf.nn.bias_add(conv12, y2t_w[4]["biases"]) ; nodes.append(bias12)
bn12 = tf.nn.batch_normalization(bias12, y2t_w[4]["moving_mean"], y2t_w[4]["moving_variance"], None, y2t_w[4]["gamma"], 1e-5) ; nodes.append(bn12)
l13 = tf.maximum(bn12, .1 * bn12) ; nodes.append(l13)
p14 = tf.nn.max_pool(l13, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') ; nodes.append(p14)
conv15 = tf.nn.conv2d(p14, y2t_w[5]["kernel"], strides=[1, 1, 1, 1], padding='SAME') ; nodes.append(conv15)
bias15 = tf.nn.bias_add(conv15, y2t_w[5]["biases"]) ; nodes.append(bias15)
bn15 = tf.nn.batch_normalization(bias15, y2t_w[5]["moving_mean"], y2t_w[5]["moving_variance"], None, y2t_w[5]["gamma"], 1e-5) ; nodes.append(bn15)
l16 = tf.maximum(bn15, .1 * bn15) ; nodes.append(l16)
p17 = tf.nn.max_pool(l16, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='SAME') ; nodes.append(p17)
conv18 = tf.nn.conv2d(p17, y2t_w[6]["kernel"], strides=[1, 1, 1, 1], padding='SAME') ; nodes.append(conv18)
bias18 = tf.nn.bias_add(conv18, y2t_w[6]["biases"]) ; nodes.append(bias18)
bn18 = tf.nn.batch_normalization(bias18, y2t_w[6]["moving_mean"], y2t_w[6]["moving_variance"], None, y2t_w[6]["gamma"], 1e-5) ; nodes.append(bn18)
l19 = tf.maximum(bn18, .1 * bn18) ; nodes.append(l19)
conv20 = tf.nn.conv2d(l19, y2t_w[7]["kernel"], strides=[1, 1, 1, 1], padding='SAME') ; nodes.append(conv20)
bias20 = tf.nn.bias_add(conv20, y2t_w[7]["biases"]) ; nodes.append(bias20)
bn20 = tf.nn.batch_normalization(bias20, y2t_w[7]["moving_mean"], y2t_w[7]["moving_variance"], None, y2t_w[7]["gamma"], 1e-5) ; nodes.append(bn20)
l21 = tf.maximum(bn20, .1 * bn20) ; nodes.append(l21)
conv22 = tf.nn.conv2d(l21, y2t_w[8]["kernel"], strides=[1, 1, 1, 1], padding='SAME') ; nodes.append(conv22)
out = tf.nn.bias_add(conv22, y2t_w[8]["biases"]) ; nodes.append(out)
init = tf.global_variables_initializer()
self.sess.run(init)
return inp, nodes
def _inference(self, im):
feed_dict = {self.inp: im}
out_tensors = self.sess.run(self.nodes, feed_dict)
return self.nodes, out_tensors
def inference(self, im, out_f=None):
feed_dict = {self.inp: im}
out_tensors = self.sess.run(self.nodes, feed_dict)
out_tensors_d = {}
for node_i in range(len(self.nodes)):
nodename = self.nodes[node_i].name
nodename = (nodename.split(":")[0]).split("/")[0]
out_tensors_d[nodename] = out_tensors[node_i]
if out_f != None:
with open(out_f, "wb") as h:
pickle.dump(out_tensors_d, h, protocol=2)
return out_tensors[len(out_tensors) - 1]
|
11564226
|
import re
import os.path
import urllib.request
import sublime
from ..emmet.html_matcher import AttributeToken
from ..emmet.action_utils import CSSProperty
pairs = {
'{': '}',
'[': ']',
'(': ')'
}
known_tags = [
'a', 'abbr', 'acronym', 'address', 'applet', 'area', 'article', 'aside', 'audio',
'b', 'base', 'basefont', 'bdi', 'bdo', 'bgsound', 'big', 'blink', 'blockquote', 'body', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'command', 'content',
'data', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'dir', 'div', 'dl', 'dt',
'element', 'em', 'embed',
'fieldset', 'figcaption', 'figure', 'font', 'footer', 'form', 'frame', 'frameset',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'head', 'header', 'hgroup', 'hr', 'html',
'i', 'iframe', 'image', 'img', 'input', 'ins', 'isindex',
'kbd', 'keygen',
'label', 'legend', 'li', 'link', 'listing',
'main', 'main', 'map', 'mark', 'marquee', 'menu', 'menuitem', 'meta', 'meter', 'multicol',
'nav', 'nextid', 'nobr', 'noembed', 'noframes', 'noscript',
'object', 'ol', 'optgroup', 'option', 'output',
'p', 'param', 'picture', 'plaintext', 'pre', 'progress',
'q',
'rb', 'rp', 'rt', 'rtc', 'ruby',
's', 'samp', 'script', 'section', 'select', 'shadow', 'slot', 'small', 'source', 'spacer', 'span', 'strike', 'strong', 'style', 'sub', 'summary', 'sup',
'table', 'tbody', 'td', 'template', 'textarea', 'tfoot', 'th', 'thead', 'time', 'title', 'tr', 'track', 'tt', 'u', 'ul', 'var', 'video', 'wbr', 'xmp'
]
pairs_end = {}
for k, v in pairs.items():
pairs_end[v] = k
NON_SPACE_LEFT = 1
NON_SPACE_RIGHT = 2
def narrow_to_non_space(view: sublime.View, region: sublime.Region, direction = NON_SPACE_LEFT | NON_SPACE_RIGHT) -> sublime.Region:
"Returns copy of region which starts and ends at non-space character"
begin = region.begin()
end = region.end()
if direction & NON_SPACE_LEFT:
while begin < end:
if not view.substr(begin).isspace():
break
begin += 1
if (direction & NON_SPACE_RIGHT):
while end > begin:
if not view.substr(end - 1).isspace():
break
end -= 1
return sublime.Region(begin, end)
def replace_with_snippet(view: sublime.View, edit: sublime.Edit, region: sublime.Region, snippet: str):
"Replaces given region view with snippet contents"
sel = view.sel()
sel.clear()
sel.add(sublime.Region(region.begin(), region.begin()))
view.replace(edit, region, '')
view.run_command('insert_snippet', {
'contents': preprocess_snippet(snippet)
})
def multicursor_replace_with_snippet(view: sublime.View, edit: sublime.Edit, payload: list):
"Replaces multiple regions with snippets, maintaining final caret positions"
sels = []
doc_size = view.size()
for region, snippet in reversed(list(payload)):
replace_with_snippet(view, edit, region, snippet)
# Update locations of existing regions
next_size = view.size()
delta = next_size - doc_size
for r in sels:
r.a += delta
r.b += delta
doc_size = next_size
sels += list(view.sel())
s = view.sel()
s.clear()
s.add_all(sels)
def get_caret(view: sublime.View) -> int:
"Returns current caret position for single selection"
sel = view.sel()
return sel[0].begin() if len(sel) else 0
def get_content(view: sublime.View) -> str:
"Returns contents of given view"
return view.substr(sublime.Region(0, view.size()))
def go_to_pos(view: sublime.View, pos: int):
"Scroll editor to given position in code"
sel = view.sel()
sel.clear()
sel.add(sublime.Region(pos, pos))
view.show(pos)
def is_url(file_path: str):
"Check if given file path is an URL"
return re.match(r'^\w+?://', file_path)
def read_file(file_path: str, size=-1):
"Reads content of given file. If `size` if given, reads up to `size` bytes"
if is_url(file_path):
with urllib.request.urlopen(file_path, timeout=5) as req:
return req.read(size)
with open(file_path, 'rb') as fp:
return fp.read(size)
def locate_file(editor_file: str, file_name: str):
"""
Locate `file_name` file relative to `editor_file`.
If `file_name` is absolute, will traverse up to folder structure looking for
matching file.
"""
previous_parent = ''
parent = os.path.dirname(editor_file)
while parent and os.path.exists(parent) and parent != previous_parent:
tmp = create_path(parent, file_name)
if os.path.exists(tmp):
return tmp
previous_parent = parent
parent = os.path.dirname(parent)
def create_path(parent: str, file_name: str):
"""
Creates absolute path by concatenating `parent` and `file_name`.
If `parent` points to file, its parent directory is used
"""
result = ''
file_name = file_name.lstrip('/')
if os.path.exists(parent):
if os.path.isfile(parent):
parent = os.path.dirname(parent)
result = os.path.normpath(os.path.join(parent, file_name))
return result
def attribute_value(attr: AttributeToken):
"Returns value of given attribute, parsed by Emmet HTML matcher"
value = attr.value
if is_quoted(value):
return value[1:-1]
return value
def patch_attribute(attr: AttributeToken, value: str, name: str=None):
"Returns patched version of given HTML attribute, parsed by Emmet HTML matcher"
if name is None:
name = attr.name
before = ''
after = ''
if attr.value is not None:
if is_quoted(attr.value):
# Quoted value or React-like expression
before = attr.value[0]
after = attr.value[-1]
else:
# Attribute without value (boolean)
before = after = '"'
return '%s=%s%s%s' % (name, before, value, after)
def patch_property(view: sublime.View, prop: CSSProperty, value: str, name=None):
"Returns patched version of given CSS property, parsed by Emmet CSS matcher"
if name is None:
name = view.substr(prop.name)
before = view.substr(sublime.Region(prop.before, prop.name.begin()))
between = view.substr(sublime.Region(prop.name.end(), prop.value.begin()))
after = view.substr(sublime.Region(prop.value.end(), prop.after))
return ''.join((before, name, between, value, after))
def is_quoted(value: str):
"Check if given value is either quoted or written as expression"
return value and ((value[0] in '"\'' and value[0] == value[-1]) or \
(value[0] == '{' and value[-1] == '}'))
def attribute_region(attr: AttributeToken):
"Returns region that covers entire attribute"
end = attr.value_end if attr.value is not None else attr.name_end
return sublime.Region(attr.name_start, end)
def has_new_line(text: str) -> bool:
"Check if given text contains newline character"
return '\n' in text or '\r' in text
def to_region(rng: list) -> sublime.Region:
"Converts given list range to Sublime region"
return sublime.Region(rng[0], rng[1])
def escape_snippet(text: str) -> str:
"Escapes given text for snippet insertion"
return text.replace('$', '\\$')
def preprocess_snippet(text: str) -> str:
"Preprocess given text before inserting into document: escapes $ charaters where required"
result = ''
i = 0
l = len(text)
while i < l:
ch = text[i]
next_ch = text[i + 1] if i + 1 < l else ''
i += 1
if ch == '\\':
# Escape sequence
result += ch + next_ch
i += 1
elif ch == '$' and next_ch != '{':
# Non-field $ character
result += '\\' + ch
else:
result += ch
return result
|
11564250
|
import unittest
from pyformlang.finite_automaton.symbol import Symbol
from pyformlang.regular_expression import Regex
from pyformlang.cfg import CFG
from pyformlang.rsa.recursive_automaton import RecursiveAutomaton
from pyformlang.rsa.box import Box
class TestRSA(unittest.TestCase):
def test_creation(self):
# S -> a S b | a b
enfa = Regex("a S b | a b").to_epsilon_nfa()
dfa = enfa.minimize()
box = Box(dfa, Symbol("S"))
rsa_1 = RecursiveAutomaton({Symbol("S")}, Symbol("S"), {box})
self.assertEqual(rsa_1.get_number_of_boxes(), 1)
self.assertEqual(box, rsa_1.get_box(Symbol("S")))
self.assertEqual(rsa_1.labels, {Symbol("S")})
self.assertEqual(rsa_1.initial_label, Symbol("S"))
rsa_2 = RecursiveAutomaton()
rsa_2.add_box(box)
rsa_2.change_initial_label(Symbol("S"))
self.assertEqual(rsa_2, rsa_1)
# Checking to add a start label
rsa_3 = RecursiveAutomaton(set(), Symbol("S"), {box})
self.assertEqual(rsa_3.labels, {Symbol("S")})
try:
rsa_4 = RecursiveAutomaton({Symbol("S"), Symbol("v")}, Symbol("S"), {box})
except ValueError:
self.assertEqual(True, True)
def test_from_regex(self):
# S -> a*
rsa_2 = RecursiveAutomaton.from_regex(Regex("a*"), Symbol("S"))
enfa = Regex("a*").to_epsilon_nfa()
dfa = enfa.minimize()
box = Box(dfa, Symbol("S"))
rsa_1 = RecursiveAutomaton({Symbol("S")}, Symbol("S"), {box})
self.assertEqual(rsa_2, rsa_1)
def test_is_equivalent_to(self):
# S -> a* b*
rsa_1 = RecursiveAutomaton.from_regex(Regex("a* b*"), Symbol("S"))
# S -> a+ b+
rsa_2 = RecursiveAutomaton.from_regex(Regex("a a* b b*"), Symbol("S"))
self.assertNotEqual(rsa_1, rsa_2)
def test_add_box(self):
rsa_1 = RecursiveAutomaton.from_regex(Regex("a* b*"), Symbol("S"))
new_box = Box(Regex("a*").to_epsilon_nfa().minimize(), Symbol("S"))
rsa_1.add_box(new_box)
self.assertEqual(new_box.dfa, rsa_1.get_box(Symbol("S")).dfa)
self.assertEqual(rsa_1.labels, {Symbol("S")})
def test_from_cfg(self):
# g1: S -> a S b | a b
rsa1_g1 = RecursiveAutomaton.from_cfg(CFG.from_text("S -> a S b | a b"))
rsa2_g1 = RecursiveAutomaton.from_regex(Regex("a S b | a b"), Symbol("S"))
self.assertEqual(rsa1_g1, rsa2_g1)
# g2: S -> a V b
# V -> c S d | c d
rsa1_g2 = RecursiveAutomaton.from_cfg(CFG.from_text("S -> a V b\nV -> c S d | c d"))
self.assertEqual(rsa1_g2.get_number_of_boxes(), 2)
self.assertEqual(rsa1_g2.labels, {Symbol("S"), Symbol("V")})
dfa_S = Regex("a V b").to_epsilon_nfa().minimize()
self.assertEqual(rsa1_g2.get_box(Symbol("S")), Box(dfa_S, Symbol("S")))
dfa_V = Regex("c S d | c d").to_epsilon_nfa().minimize()
self.assertEqual(rsa1_g2.get_box(Symbol("V")), Box(dfa_V, Symbol("V")))
|
11564272
|
import torch
import torch.nn as nn
import torchvision
def Conv1x1BnRelu(in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True),
)
def upSampling1(in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=1,stride=1,padding=0,bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True),
nn.Upsample(scale_factor=2, mode='nearest')
)
def upSampling2(in_channels,out_channels):
return nn.Sequential(
upSampling1(in_channels,out_channels),
nn.Upsample(scale_factor=2, mode='nearest'),
)
def downSampling1(in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True),
)
def downSampling2(in_channels,out_channels):
return nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=2,padding=1),
downSampling1(in_channels=in_channels, out_channels=out_channels),
)
class ASFF(nn.Module):
def __init__(self, level, channel1, channel2, channel3, out_channel):
super(ASFF, self).__init__()
self.level = level
funsed_channel = 8
if self.level == 1:
# level = 1:
self.level2_1 = downSampling1(channel2,channel1)
self.level3_1 = downSampling2(channel3,channel1)
self.weight1 = Conv1x1BnRelu(channel1, funsed_channel)
self.weight2 = Conv1x1BnRelu(channel1, funsed_channel)
self.weight3 = Conv1x1BnRelu(channel1, funsed_channel)
self.expand_conv = Conv1x1BnRelu(channel1,out_channel)
if self.level == 2:
# level = 2:
self.level1_2 = upSampling1(channel1,channel2)
self.level3_2 = downSampling1(channel3,channel2)
self.weight1 = Conv1x1BnRelu(channel2, funsed_channel)
self.weight2 = Conv1x1BnRelu(channel2, funsed_channel)
self.weight3 = Conv1x1BnRelu(channel2, funsed_channel)
self.expand_conv = Conv1x1BnRelu(channel2, out_channel)
if self.level == 3:
# level = 3:
self.level1_3 = upSampling2(channel1,channel3)
self.level2_3 = upSampling1(channel2,channel3)
self.weight1 = Conv1x1BnRelu(channel3, funsed_channel)
self.weight2 = Conv1x1BnRelu(channel3, funsed_channel)
self.weight3 = Conv1x1BnRelu(channel3, funsed_channel)
self.expand_conv = Conv1x1BnRelu(channel3, out_channel)
self.weight_level = nn.Conv2d(funsed_channel * 3, 3, kernel_size=1, stride=1, padding=0)
self.softmax = nn.Softmax(dim=1)
def forward(self, x, y, z):
if self.level == 1:
level_x = x
level_y = self.level2_1(y)
level_z = self.level3_1(z)
if self.level == 2:
level_x = self.level1_2(x)
level_y = y
level_z = self.level3_2(z)
if self.level == 3:
level_x = self.level1_3(x)
level_y = self.level2_3(y)
level_z = z
weight1 = self.weight1(level_x)
weight2 = self.weight2(level_y)
weight3 = self.weight3(level_z)
level_weight = torch.cat((weight1, weight2, weight3), 1)
weight_level = self.weight_level(level_weight)
weight_level = self.softmax(weight_level)
fused_level = level_x * weight_level[:,0,:,:] + level_y * weight_level[:,1,:,:] + level_z * weight_level[:,2,:,:]
out = self.expand_conv(fused_level)
return out
if __name__ == '__main__':
model = ASFF(level=3, channel1=512, channel2=256, channel3=128, out_channel=128)
print(model)
x = torch.randn(1, 512, 16, 16)
y = torch.randn(1, 256, 32, 32)
z = torch.randn(1, 128, 64, 64)
out = model(x,y,z)
print(out.shape)
|
11564277
|
from kivy.uix.floatlayout import FloatLayout
from kivymd.uix.tab import MDTabsBase
import utils
utils.load_kv("tab_two.kv")
class TabTwo(FloatLayout, MDTabsBase):
"""
Tab Item Two.
"""
|
11564364
|
import numpy as np
import pytest
from steppy.adapter import Adapter, E
@pytest.fixture
def data():
return {
'input_1': {
'features': np.array([
[1, 6],
[2, 5],
[3, 4]
]),
'labels': np.array([2, 5, 3])
},
'input_2': {
'extra_features': np.array([
[5, 7, 3],
[67, 4, 5],
[6, 13, 14]
])
},
'input_3': {
'images': np.array([
[[0, 255], [255, 0]],
[[255, 0], [0, 255]],
[[255, 255], [0, 0]],
]),
'labels': np.array([1, 1, 0])
}
}
def test_adapter_creates_defined_keys(data):
adapter = Adapter({
'X': [E('input_1', 'features')],
'Y': [E('input_2', 'extra_features')]
})
res = adapter.adapt(data)
assert {'X', 'Y'} == set(res.keys())
def test_recipe_with_single_item(data):
adapter = Adapter({
'X': E('input_1', 'labels'),
'Y': E('input_3', 'labels'),
})
res = adapter.adapt(data)
assert np.array_equal(res['X'], data['input_1']['labels'])
assert np.array_equal(res['Y'], data['input_3']['labels'])
def test_recipe_with_list(data):
adapter = Adapter({
'X': [],
'Y': [E('input_1', 'features')],
'Z': [E('input_1', 'features'),
E('input_2', 'extra_features')]
})
res = adapter.adapt(data)
for i, key in enumerate(('X', 'Y', 'Z')):
assert isinstance(res[key], list)
assert len(res[key]) == i
assert res['X'] == []
assert np.array_equal(res['Y'][0], data['input_1']['features'])
assert np.array_equal(res['Z'][0], data['input_1']['features'])
assert np.array_equal(res['Z'][1], data['input_2']['extra_features'])
def test_recipe_with_tuple(data):
adapter = Adapter({
'X': (),
'Y': (E('input_1', 'features'),),
'Z': (E('input_1', 'features'), E('input_2', 'extra_features'))
})
res = adapter.adapt(data)
for i, key in enumerate(('X', 'Y', 'Z')):
assert isinstance(res[key], tuple)
assert len(res[key]) == i
assert res['X'] == ()
assert np.array_equal(res['Y'][0], data['input_1']['features'])
assert np.array_equal(res['Z'][0], data['input_1']['features'])
assert np.array_equal(res['Z'][1], data['input_2']['extra_features'])
def test_recipe_with_dictionary(data):
adapter = Adapter({
'X': {},
'Y': {'a': E('input_1', 'features')},
'Z': {'a': E('input_1', 'features'),
'b': E('input_2', 'extra_features')}
})
res = adapter.adapt(data)
for i, key in enumerate(('X', 'Y', 'Z')):
assert isinstance(res[key], dict)
assert len(res[key]) == i
assert res['X'] == {}
assert np.array_equal(res['Y']['a'], data['input_1']['features'])
assert np.array_equal(res['Z']['a'], data['input_1']['features'])
assert np.array_equal(res['Z']['b'], data['input_2']['extra_features'])
def test_recipe_with_constants(data):
adapter = Adapter({
'A': 112358,
'B': 3.14,
'C': "lorem ipsum",
'D': ('input_1', 'features'),
'E': {112358: 112358, 'a': 'a', 3.14: 3.14},
'F': [112358, 3.14, "lorem ipsum", ('input_1', 'features')]
})
res = adapter.adapt(data)
assert res['A'] == 112358
assert res['B'] == 3.14
assert res['C'] == "lorem ipsum"
assert res['D'] == ('input_1', 'features')
assert res['E'] == {112358: 112358, 'a': 'a', 3.14: 3.14}
assert res['F'] == [112358, 3.14, "lorem ipsum", ('input_1', 'features')]
def test_nested_recipes(data):
adapter = Adapter({
'X': [{'a': [E('input_1', 'features')]}],
'Y': {'a': [{'b': E('input_2', 'extra_features')}]}
})
res = adapter.adapt(data)
assert res['X'] == [{'a': [data['input_1']['features']]}]
assert res['Y'] == {'a': [{'b': data['input_2']['extra_features']}]}
|
11564429
|
from __future__ import annotations
import pytest
from testing.runner import and_exit
from testing.runner import trigger_command_mode
@pytest.mark.parametrize('setting', ('tabsize', 'tabstop'))
def test_set_tabstop(run, setting):
with run() as h, and_exit(h):
h.press('a')
h.press('Left')
trigger_command_mode(h)
h.press_and_enter(f':{setting} 2')
h.await_text('updated!')
h.press('Tab')
h.await_text('\n a')
h.await_cursor_position(x=2, y=1)
@pytest.mark.parametrize('tabsize', ('-1', '0', 'wat'))
def test_set_invalid_tabstop(run, tabsize):
with run() as h, and_exit(h):
h.press('a')
h.press('Left')
trigger_command_mode(h)
h.press_and_enter(f':tabstop {tabsize}')
h.await_text(f'invalid size: {tabsize}')
h.press('Tab')
h.await_text(' a')
h.await_cursor_position(x=4, y=1)
|
11564434
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
# Local imports
from utils.utils import count_parameters
def create_model(params):
feat_ext = str(params['feat_ext'])
if feat_ext == 'b0_lite':
print('Using effnet-lite b0 as feature extractor.')
return effnetb0_lite_rnn(params)
if feat_ext == 'b0':
print('Using effnet b0 as feature extractor.')
return effnetb0_rnn(params)
else:
print('No feature extraction backbone selected.')
return None
# EfficientNet-Lite-B0
class effnetb0_lite_rnn(torch.nn.Module):
def __init__(self, params):
super(effnetb0_lite_rnn, self).__init__()
self.img_size = params['img_size']
self.seq_len = params['seq_len']
self.hidden_size = params['hidden_size']
self.num_classes = params['num_classes']
# Feature extraction (no 1x1 conv layer, global pooling, dropout and fc head)
effnetb0 = torch.hub.load(
"rwightman/gen-efficientnet-pytorch",
"efficientnet_lite0",
pretrained=True,
exportable=True)
self.feat = torch.nn.Sequential(*list(effnetb0.children())[:-4])
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.rnn = nn.GRU(input_size=1280, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
# Prediction structure
self.pred = nn.Sequential(
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(self.hidden_size, self.num_classes))
# Initialize rnn weights
init.xavier_normal_(self.rnn.all_weights[0][0])
init.xavier_normal_(self.rnn.all_weights[0][1])
print('count_parameters(self.feat):', count_parameters(self.feat))
print('count_parameters(self.rnn):', count_parameters(self.rnn))
print('count_parameters(self.pred):', count_parameters(self.pred))
def forward(self, x):
x = x.view(-1, 3, self.img_size[0], self.img_size[1])
x = self.feat.forward(x)
x = self.avgpool(x)
x = x.view(-1, self.seq_len, 1280)
self.rnn.flatten_parameters()
y, _ = self.rnn(x)
y = y.contiguous().view(-1, self.hidden_size)
y = self.pred(y)
return y
# EfficientNet-B0
class effnetb0_rnn(torch.nn.Module):
def __init__(self, params):
super(effnetb0_rnn, self).__init__()
self.img_size = params['img_size']
self.seq_len = params['seq_len']
self.hidden_size = params['hidden_size']
self.num_classes = params['num_classes']
# Feature extraction (no 1x1 conv layer, global pooling, dropout and fc head)
effnetb0 = torch.hub.load(
"rwightman/gen-efficientnet-pytorch",
"efficientnet_b0",
pretrained=True,
exportable=True)
self.feat = torch.nn.Sequential(*list(effnetb0.children())[:-4])
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.rnn = nn.GRU(input_size=1280, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
# Prediction structure
self.pred = nn.Sequential(
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(self.hidden_size, self.num_classes))
# Initialize rnn weights
init.xavier_normal_(self.rnn.all_weights[0][0])
init.xavier_normal_(self.rnn.all_weights[0][1])
print('count_parameters(self.feat):', count_parameters(self.feat))
print('count_parameters(self.rnn):', count_parameters(self.rnn))
print('count_parameters(self.pred):', count_parameters(self.pred))
def forward(self, x):
x = x.view(-1, 3, self.img_size[0], self.img_size[1])
x = self.feat.forward(x)
x = self.avgpool(x)
x = x.view(-1, self.seq_len, 1280)
self.rnn.flatten_parameters()
y, _ = self.rnn(x)
y = y.contiguous().view(-1, self.hidden_size)
y = self.pred(y)
return y
|
11564441
|
import os
import argparse
import pandas as pd
import datetime
import random
from string import ascii_lowercase
def combine_qc_matrices(library_list):
"""
Combines the QC matrices from multiple ChIA-PET libraries.
Args:
library_list (str):
The name of the file containing the list of libraries
for which to combine the QC matrices.
"""
libs = []
with open(library_list, 'r') as f:
for line in f:
entry = line.strip().split()[0]
libs.append(entry)
# Initialize the data frame with the QC matrix
# of the first library
matrix_file = '%s/%s.final_stats.tsv' % (libs[0], libs[0])
d = pd.read_csv(
matrix_file, header=None, sep='\t', index_col=0,names=[libs[0]])
# Load the QC matrices for the subsequent libraries one-by-one
for i in range(1, len(libs)):
matrix_file = '%s/%s.final_stats.tsv' % (libs[i], libs[i])
mat = pd.read_csv(
matrix_file, header=None, sep='\t', index_col=0,names=[libs[i]])
# Concatenate to the data frame the QC matrix
# for each additional library
d = pd.concat([d, mat], axis=1, join_axes=[d.index])
# Get date and letters tag
date = str(datetime.date.today())
#letters = ''
#idxs = random.sample(range(25), 2)
#for idx in idxs:
# letters += ascii_lowercase[idx]
# Get reference genome
genome = d.loc['Reference_genome'][d.columns[0]]
if '/' in genome:
genome = genome.split('/')[-1]
if '.' in genome:
genome = genome.split('.')[0]
# Write
out_file = "combined_qc_table_%s_%s.tsv" % (genome, date)
d.to_csv(out_file, sep="\t", header=False, index=True)
def parse_command_line_args():
"""
Parse command-line arguments.
Returns:
args (class 'argparse.Namespace'):
An object containing the parsed command-line arguments.
For every command-line option, the values are stored as follows:
args.{option}
"""
# Initiate the argument parser
parser = argparse.ArgumentParser()
required = parser.add_argument_group('required arguments')
# Indicate the required arguments
required.add_argument(
'-l', '--library_list', required=True,
help=('A file of the library IDs for which to combine the QC '
'matrices (one ID per line).'))
# Parse the arguments from the command-line input
args = parser.parse_args()
return args
if __name__ == '__main__':
# Parse command-line arguments
args = parse_command_line_args()
# Perform the read-level haplotyping
combine_qc_matrices(args.library_list)
|
11564550
|
import pomagma.examples.solve
from pomagma.examples.testing import ADDRESS, SKJA, WORLD, serve
from pomagma.util.testing import for_each
WORLD_EXAMPLES = [
(name, WORLD)
for name in pomagma.examples.solve.theories
if name.endswith('_test')
]
SKJA_EXAMPLES = [
(name, SKJA)
for name in pomagma.examples.solve.theories
]
@for_each(WORLD_EXAMPLES + SKJA_EXAMPLES)
def test_define(name, theory):
with serve(theory):
pomagma.examples.solve.define(name, address=ADDRESS)
def test_rs_pairs():
with serve(SKJA):
pomagma.examples.solve.rs_pairs(address=ADDRESS)
|
11564552
|
import itertools
import os
import unittest
from unittest.mock import patch, MagicMock
from iota_notebook_containers.kernel_image_creator import KernelImageCreator
class TestKernelImageCreator(unittest.TestCase):
def test_GIVEN_emtpy_list_WHEN_get_message_if_space_insufficientv_THEN_none(self):
self.assertEquals(None, KernelImageCreator._get_message_if_space_insufficient([]))
def test_GIVEN_suffient_space_WHEN_get_message_if_space_insufficient_THEN_none(self):
with patch("os.path.getsize", return_value=1):
self.assertEquals(None,
KernelImageCreator._get_message_if_space_insufficient(["file", "other_file"]))
def test_GIVEN_insuffient_space_WHEN_get_message_if_space_insufficient_THEN_msg(self):
# GIVEN
size_of_each_file = 100
free_space = 10
files = ["file", "other_file"]
# WHEN
with patch("os.path.getsize", return_value=size_of_each_file):
with patch("shutil.disk_usage", return_value=(None, None, free_space)):
observed = KernelImageCreator._get_message_if_space_insufficient(files)
# THEN
additional_required_bytes = int(KernelImageCreator.REQUIRED_SPACE_PER_FILES_SPACE * len(files)
* size_of_each_file) + KernelImageCreator.SPACE_REQUIREMENT_FUDGE_BYTES - free_space
expected = "There is insufficient space remaining on this instance to containerize this notebook. " + \
"Containerization would require {} bytes (10M) of additional space.".format(additional_required_bytes)
self.assertEquals(expected, observed)
def test_remove_prefix_with_prefix_present(self):
self.assertEquals("text", KernelImageCreator._remove_prefix("prefix_text", "prefix_"))
def test_remove_prefix_without_prefix_present(self):
self.assertEquals("text", KernelImageCreator._remove_prefix("text", "prefix_"))
def test_generate_files_to_copy(self):
# GIVEN
filenames = ["file1", "a_file2", "file3.pyc"]
input_paths = [os.path.realpath(f) for f in filenames]
# WHEN
with patch("os.walk", return_value=[("/", None, input_paths)]):
with patch("os.path.exists", return_value=True):
observed_output = [f for f in KernelImageCreator._generate_files_to_copy(["irrelevant"], KernelImageCreator.EXCLUDE_FROM_CP)]
# THEN
filtered_filenames = ["a_file2", "file1"]
expected_output = ["/home/ec2-user/iota_run_nb.py"] + [os.path.realpath(f) for f in filtered_filenames]
self.assertEquals(expected_output, observed_output)
def test_get_child_path_only_one_distinct(self):
# GIVEN
root = "/root/folder"
folder1 = "/root/folder/subfolder/folder1"
folder2 = "/root/folder/folder2"
folders = [root, folder1, folder2]
# WHEN/THEN
for permutation in itertools.permutations(folders):
self.assertCountEqual([folder1, folder2], KernelImageCreator._get_child_paths(permutation))
def test_get_child_path_two_distinct(self):
# GIVEN
root1 = "/root/folder"
root2 = "/root/other_folder"
folder1 = "/root/folder/subfolder/folder1"
folder2 = "/root/other_folder/folder2"
folders = [root1, root2, folder1, folder2]
# WHEN/THEN
for permutation in itertools.permutations(folders):
self.assertCountEqual([folder1, folder2], KernelImageCreator._get_child_paths(permutation))
def test_split_into_batches_1_batch(self):
# GIVEN
files = ["file1", "file2", "file3"]
# WHEN
with patch("os.path.getsize", return_value=KernelImageCreator.MAX_FILEBATCH_SIZE/4):
batches = [batch for batch in KernelImageCreator._split_into_batches(files)]
# THEN
self.assertEquals(1, len(batches))
self.assertCountEqual(files, batches[0])
def test_split_into_batches_2_batches(self):
# GIVEN
files = ["file1", "file2", "file3"]
# WHEN
with patch("os.path.getsize", return_value=KernelImageCreator.MAX_FILEBATCH_SIZE/2):
batches = [batch for batch in KernelImageCreator._split_into_batches(files)]
# THEN
self.assertEquals(2, len(batches))
self.assertCountEqual(files[:2], batches[0])
self.assertCountEqual([files[2]], batches[1])
def test_copy_onto_containers(self):
# GIVEN
files = ["folder1/file1", "folder2/file2", "folder3/file3"]
interim_container = MagicMock()
tarstream = MagicMock()
# WHEN
with patch("tarfile.TarFile.__enter__", return_value=tarstream):
KernelImageCreator._copy_onto_container(interim_container, files)
# THEN
for f in files:
tarstream.add.assert_any_call(f)
self.assertEquals(len(files), tarstream.add.call_count)
if __name__ == '__main__':
unittest.main()
|
11564595
|
import unittest
import logging
from lumbermill.utils.Decorators import ModuleDocstringParser
@ModuleDocstringParser
class DocStringExample:
"""
- module: DocStringExample
string: # <default: "TestString"; type: string; is: optional>
int: # <default: 1; type: integer; is: optional>
dict: # <default: {'filed1': 'value1'}; type: dict; is: optional>
list: # <default: ['field2', 'field3']; type: list; is: optional>
none: # <default: None; type: None||string; is: optional>
bool: # <default: True; type: boolean; is: optional>
...
receivers:
- ModuleName
- ModuleAlias
"""
def __init__(self):
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
filename=None,
filemode='w')
self.logger = logging.getLogger(self.__class__.__name__)
self.configuration_data = {}
class TestModuleDocStringParserDecorator(unittest.TestCase):
def testConfigurationData(self):
self.doc_string_example = DocStringExample()
doc_string_config_data = self.doc_string_example.configuration_data
self.assertEqual(doc_string_config_data['string'], "TestString")
self.assertEqual(doc_string_config_data['int'], 1)
self.assertEqual(doc_string_config_data['dict'], {'filed1': 'value1'})
self.assertEqual(doc_string_config_data['list'], ['field2', 'field3'])
self.assertEqual(doc_string_config_data['none'], None)
self.assertEqual(doc_string_config_data['bool'], True)
|
11564629
|
from pathlib import Path
from unittest.mock import Mock
from lightkube.config import kubeconfig, client_adapter
from lightkube.config import models
from lightkube import ConfigError
import pytest
import httpx
import asyncio
BASEDIR = Path("tests")
def single_conf(cluster=None, user=None, fname=None):
return kubeconfig.SingleConfig(
context=models.Context(cluster="x"), context_name="x",
cluster=cluster, user=user, fname=fname
)
def test_verify_cluster_insecure():
cfg = single_conf(cluster=models.Cluster(insecure=True))
verify = client_adapter.verify_cluster(cfg.cluster, cfg.abs_file)
assert verify is False
def test_verify_cluster_secure():
cfg = single_conf(cluster=models.Cluster())
verify = client_adapter.verify_cluster(cfg.cluster, cfg.abs_file)
assert verify is True
def test_verify_cluster_ca(tmpdir):
tmpdir = Path(tmpdir)
cluster = models.Cluster(certificate_auth="ca.pem")
cfg = single_conf(cluster=cluster, fname=tmpdir.joinpath("kubeconf"))
verify = client_adapter.verify_cluster(cfg.cluster, cfg.abs_file)
assert verify == tmpdir.joinpath("ca.pem")
# fname not provided
cfg = single_conf(cluster=cluster)
with pytest.raises(ConfigError):
client_adapter.verify_cluster(cfg.cluster, cfg.abs_file)
# cert path absolute
cluster.certificate_auth = tmpdir.joinpath("ca.pem")
verify = client_adapter.verify_cluster(cfg.cluster, cfg.abs_file)
assert verify == tmpdir.joinpath("ca.pem")
def test_verify_cluster_ca_data():
cluster = models.Cluster(certificate_auth_data="dGVzdCBkZWNvZGluZw==")
cfg = single_conf(cluster=cluster)
verify = client_adapter.verify_cluster(cfg.cluster, cfg.abs_file)
assert Path(verify).read_text() == "test decoding"
def test_user_cert_missing():
cfg = single_conf(user=models.User())
assert client_adapter.user_cert(cfg.user, cfg.abs_file) is None
def test_user_cert(tmpdir):
tmpdir = Path(tmpdir)
cfg = single_conf(user=models.User(client_cert="a.crt", client_key="a.key"), fname=tmpdir.joinpath("conf"))
certs = client_adapter.user_cert(cfg.user, cfg.abs_file)
assert certs == (tmpdir.joinpath("a.crt"), tmpdir.joinpath("a.key"))
def test_user_cert_data():
cfg = single_conf(user=models.User(client_cert_data="Y2VydA==", client_key_data="a2V5"))
certs = client_adapter.user_cert(cfg.user, cfg.abs_file)
assert Path(certs[0]).read_text() == "cert"
assert Path(certs[1]).read_text() == "key"
def test_user_auth_missing():
assert client_adapter.user_auth(None) is None
def test_user_auth_empty():
assert client_adapter.user_auth(models.User()) is None
def test_user_auth_basic():
auth = client_adapter.user_auth(models.User(username="user", password="<PASSWORD>"))
assert isinstance(auth, httpx.BasicAuth)
m = Mock(headers={})
next(auth.auth_flow(m))
assert m.headers["Authorization"] == "Basic dXNlcjpwc3c="
def test_user_auth_bearer():
auth = client_adapter.user_auth(models.User(token="abcd"))
assert isinstance(auth, client_adapter.BearerAuth)
m = Mock(headers={})
next(auth.auth_flow(m))
assert m.headers["Authorization"] == "Bearer abcd"
def test_user_auth_provider():
"""Auth provider not supported"""
with pytest.raises(ConfigError):
client_adapter.user_auth(models.User(auth_provider={'x': 1}))
def test_user_auth_exec_sync():
auth_script = str(Path(__file__).parent.joinpath('data', 'auth_script.sh'))
auth = client_adapter.user_auth(models.User(exec=models.UserExec(
apiVersion="client.authentication.k8s.io/v1beta1",
command=auth_script,
)))
assert isinstance(auth, client_adapter.ExecAuth)
m = Mock(headers={})
next(auth.sync_auth_flow(m))
assert m.headers["Authorization"] == "Bearer my-bearer-token"
# call again should cache
m = Mock(headers={})
flow = auth.sync_auth_flow(m)
next(flow)
assert m.headers["Authorization"] == "Bearer my-bearer-token"
m.headers["Authorization"] = None
# we pretend the cache is old
flow.send(httpx.Response(status_code=401, request=m))
assert m.headers["Authorization"] == "Bearer my-bearer-token"
def test_user_auth_exec_sync_with_args():
auth = client_adapter.user_auth(models.User(exec=models.UserExec(
apiVersion="client.authentication.k8s.io/v1beta1",
args=['{"apiVersion":"client.authentication.k8s.io/v1beta1",'
'"kind":"ExecCredential","status":{"token":"my-bearer-token"}}'],
command='echo',
)))
assert isinstance(auth, client_adapter.ExecAuth)
m = Mock(headers={})
next(auth.sync_auth_flow(m))
assert m.headers["Authorization"] == "Bearer my-bearer-token"
def test_user_auth_exec_sync_fail():
auth = client_adapter.user_auth(models.User(exec=models.UserExec(
apiVersion="client.authentication.k8s.io/v1beta1",
command="cp"
)))
with pytest.raises(ConfigError, match="cp"):
next(auth.sync_auth_flow(Mock(headers={})))
@pytest.mark.asyncio
async def test_user_auth_exec_async():
auth_script = str(Path(__file__).parent.joinpath('data', 'auth_script.sh'))
auth = client_adapter.user_auth(models.User(exec=models.UserExec(
apiVersion="client.authentication.k8s.io/v1beta1",
command=auth_script,
)))
assert isinstance(auth, client_adapter.ExecAuth)
m = Mock(headers={})
await auth.async_auth_flow(m).__anext__()
assert m.headers["Authorization"] == "Bearer my-bearer-token"
# call again should cache
m = Mock(headers={})
flow = auth.async_auth_flow(m)
await flow.__anext__()
assert m.headers["Authorization"] == "Bearer my-bearer-token"
m.headers["Authorization"] = None
# we pretend the cache is old
await flow.asend(httpx.Response(status_code=401, request=m))
assert m.headers["Authorization"] == "Bearer my-bearer-token"
with pytest.raises(StopAsyncIteration):
await flow.__anext__()
@pytest.mark.asyncio
async def test_user_auth_exec_async_fail():
auth = client_adapter.user_auth(models.User(exec=models.UserExec(
apiVersion="client.authentication.k8s.io/v1beta1",
command="cp"
)))
with pytest.raises(ConfigError, match="cp"):
await auth.async_auth_flow(Mock(headers={})).__anext__()
|
11564639
|
import numpy as np
from .. import igm
def test_igm():
"""
Test IGM module (Inoue14)
"""
igm_obj = igm.Inoue14()
# Test against result from a particular version
# (db97f839cf8afe4a22c31c5d6195fd707ba4de32)
z = 3.0
rest_wave = np.arange(850, 1251, 50)
igm_val = np.array([0.33537573, 0.54634578, 0.74207249, 0.74194787,
0.79182545, 0.75792504, 0.72135181, 0.68233589, 1.0])
assert(np.allclose(igm_obj.full_IGM(z, rest_wave*(1+z)), igm_val, rtol=1.e-2))
# With scaling
scale_tau = 2.
igm_obj.scale_tau = scale_tau
igm_scaled = np.exp(2*np.log(igm_val))
assert(np.allclose(igm_obj.full_IGM(z, rest_wave*(1+z)), igm_scaled,
rtol=1.e-2))
igm_obj = igm.Inoue14(scale_tau=scale_tau)
assert(np.allclose(igm_obj.full_IGM(z, rest_wave*(1+z)), igm_scaled,
rtol=1.e-2))
|
11564646
|
import boto3
import json
import unittest
from aws_ir_plugins import revokests_key
from moto import mock_iam
from unittest.mock import patch
class RevokeSTSTest(unittest.TestCase):
@mock_iam
def test_jinja_rendering(self):
self.iam = boto3.client('iam', region_name='us-west-2')
self.user = self.iam.create_user(
UserName='bobert'
)
self.access_key = self.iam.create_access_key(
UserName='bobert'
)
self.access_key_id = self.access_key['AccessKey']['AccessKeyId']
self.compromised_resource = {
'case_number': '123456',
'access_key_id': self.access_key_id,
'compromise_type': 'key'
}
session = None
with patch.object(
revokests_key.Plugin, '_get_client', return_value=self.iam
) as mock_client:
mock_client.return_value = self.iam
plugin = revokests_key.Plugin(
boto_session=session,
compromised_resource=self.compromised_resource,
dry_run=True
)
assert json.loads(plugin.template)
@mock_iam
@patch('aws_ir_plugins.revokests_key.Plugin')
def test_plugin(self, mock_revokests):
self.iam = boto3.client('iam', region_name='us-west-2')
mock_revokests._get_username_for_key.return_value = 'bobert'
mock_revokests.validate.return_value = 'True'
self.user = self.iam.create_user(
UserName='bobert'
)
self.access_key = self.iam.create_access_key(
UserName='bobert'
)
self.access_key_id = self.access_key['AccessKey']['AccessKeyId']
self.compromised_resource = {
'case_number': '123456',
'access_key_id': self.access_key_id,
'compromise_type': 'key'
}
with patch.object(
revokests_key.Plugin, '_get_client', return_value=self.iam
) as mock_client:
mock_client.return_value = self.iam
plugin = revokests_key.Plugin(
client=self.iam,
compromised_resource=self.compromised_resource,
dry_run=False
)
res1 = plugin.setup()
res2 = plugin.validate()
self.policies = self.iam.list_user_policies(
UserName='bobert'
)
assert res1 is not None
assert res2 is not None
|
11564651
|
from uniplot.axis_labels.extended_talbot_labels import extended_talbot_labels
x_min = 6.5
x_max = 7.5
for i in range(150):
x_max = x_max * 1.05
space = 60
ls = extended_talbot_labels(x_min=x_min, x_max=x_max, available_space=space, vertical_direction=False)
print(ls.render()[0])
|
11564671
|
from django.apps import AppConfig
from baserow.core.registries import plugin_registry
from .plugins import PluginNamePlugin
class PluginNameConfig(AppConfig):
name = '{{ cookiecutter.project_module }}'
def ready(self):
plugin_registry.register(PluginNamePlugin())
|
11564702
|
import time
import dweepy
from gpiozero import LED
KEY = 'tweet_about_me'
led = LED(18)
while True:
try:
for dweet in dweepy.listen_for_dweets_from(KEY):
print('Tweet: ' + dweet['content']['text'])
led.on()
time.sleep(10)
led.off()
except Exception:
pass
|
11564706
|
from .clustering import (agglomerative_clustering,
cluster_segmentation, cluster_segmentation_mala,
mala_clustering)
from .features import (compute_affinity_features, compute_boundary_features, compute_boundary_mean_and_length,
compute_grid_graph, compute_grid_graph_affinity_features, compute_grid_graph_image_features,
compute_rag, compute_region_features,
project_node_labels_to_pixels)
from .lifted_multicut import get_lifted_multicut_solver
from .multicut import get_multicut_solver, compute_edge_costs
from .watershed import distance_transform_watershed, stacked_watershed
from .workflows import edge_training, multicut_segmentation, multicut_workflow, simple_multicut_workflow
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.