id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11457852
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
from networkx.drawing.nx_pydot import graphviz_layout
mpl.rcParams["figure.dpi"] = 300
def simple_nx_plot(outnodes, innodes, node_labels):
"""Plot graph for debugging purposes."""
labels = dict([(i, j) for i, j in enumerate(node_labels)])
G = nx.DiGraph(
list(zip(outnodes, innodes)),
)
G.add_nodes_from(range(len(node_labels)))
options = {
"font_size": 6,
"node_size": 300,
"node_color": "white",
"edgecolors": "black",
"linewidths": 0.5,
"width": 0.5,
"labels": labels,
"with_labels": True,
}
pos = graphviz_layout(G, prog="dot")
nx.draw_networkx(G, pos, **options)
# Set margins for the axes so that nodes aren't clipped
ax = plt.gca()
ax.margins(0.10)
plt.axis("off")
plt.show()
def simple_dgl_plot(dglgraph):
"""Plot DGL graph simple."""
G = dglgraph.to_networkx()
options = {
"font_size": 6,
"node_size": 300,
"node_color": "white",
"edgecolors": "black",
"linewidths": 0.5,
"width": 0.5,
}
pos = graphviz_layout(G, prog="dot")
nx.draw_networkx(G, pos, **options)
# Set margins for the axes so that nodes aren't clipped
ax = plt.gca()
ax.margins(0.10)
plt.axis("off")
plt.show()
|
11457875
|
from django.shortcuts import resolve_url as r
from django.test import TestCase
from .test_base import BaseProposalTest
class ProposalListGet(BaseProposalTest, TestCase):
def setUp(self):
self.resp = self.client.get(r('proposal:proposal_list'))
def test_get(self):
self.assertEqual(200, self.resp.status_code)
def test_template(self):
self.assertTemplateUsed(self.resp, 'proposal/proposal_list.html')
def test_context(self):
variables = ['proposal_list']
for key in variables:
with self.subTest():
self.assertIn(key, self.resp.context)
class ProposalListGetEmpty(TestCase):
def test_get_empty(self):
response = self.client.get(r('proposal:proposal_list'))
self.assertContains(response, 'Sem itens na lista.')
|
11457924
|
import time
from typing import Dict, List
from instagrapi.exceptions import ClientError, MediaError, UserError
from instagrapi.utils import json_value
POST_TYPES = ("ALL", "CAROUSEL_V2", "IMAGE", "SHOPPING", "VIDEO")
TIME_FRAMES = (
"ONE_WEEK", "ONE_MONTH", "THREE_MONTHS", "SIX_MONTHS",
"ONE_YEAR", "TWO_YEARS"
)
DATA_ORDERS = (
"REACH_COUNT", "LIKE_COUNT", "FOLLOW", "SHARE_COUNT",
"BIO_LINK_CLICK", "COMMENT_COUNT", "IMPRESSION_COUNT",
"PROFILE_VIEW", "VIDEO_VIEW_COUNT", "SAVE_COUNT"
)
try:
from typing import Literal
POST_TYPE = Literal[POST_TYPES]
TIME_FRAME = Literal[TIME_FRAMES]
DATA_ORDERING = Literal[DATA_ORDERS]
except ImportError:
# python <= 3.8
POST_TYPE = TIME_FRAME = DATA_ORDERING = str
class InsightsMixin:
"""
Helper class to get insights
"""
def insights_media_feed_all(
self,
post_type: POST_TYPE = "ALL",
time_frame: TIME_FRAME = "TWO_YEARS",
data_ordering: DATA_ORDERING = "REACH_COUNT",
count: int = 0,
sleep: int = 2,
) -> List[Dict]:
"""
Get insights for all medias from feed with page iteration with cursor and sleep timeout
Parameters
----------
post_type: str, optional
Types of posts, default is "ALL"
time_frame: str, optional
Time frame to pull media insights, default is "TWO_YEARS"
data_ordering: str, optional
Ordering strategy for the data, default is "REACH_COUNT"
count: int, optional
Max media count for retrieving, default is 0
sleep: int, optional
Timeout between pages iterations, default is 2
Returns
-------
List[Dict]
List of dictionaries of response from the call
"""
assert post_type in POST_TYPES, \
f'Unsupported post_type="{post_type}" {POST_TYPES}'
assert time_frame in TIME_FRAMES, \
f'Unsupported time_frame="{time_frame}" {TIME_FRAMES}'
assert data_ordering in DATA_ORDERS, \
f'Unsupported data_ordering="{data_ordering}" {DATA_ORDERS}'
assert self.user_id, "Login required"
medias = []
cursor = None
data = {
"surface": "post_grid",
"doc_id": 2345520318892697,
"locale": "en_US",
"vc_policy": "insights_policy",
"strip_nulls": False,
"strip_defaults": False,
}
query_params = {
"IgInsightsGridMediaImage_SIZE": 480,
"count": 200, # TODO Try to detect max allowed value
# "cursor": "0",
"dataOrdering": data_ordering,
"postType": post_type,
"timeframe": time_frame,
"search_base": "USER",
"is_user": "true",
"queryParams": {"access_token": "", "id": self.user_id},
}
while True:
if cursor:
query_params["cursor"] = cursor
result = self.private_request(
"ads/graphql/",
self.with_query_params(data, query_params),
)
if not json_value(
result,
"data",
"shadow_instagram_user",
"business_manager",
default=None,
):
raise UserError("Account is not business account", **self.last_json)
stats = json_value(
result, "data", "shadow_instagram_user",
"business_manager", "top_posts_unit", "top_posts"
)
cursor = stats["page_info"]["end_cursor"]
medias.extend(stats["edges"])
if not stats["page_info"]["has_next_page"]:
break
if count and len(medias) >= count:
break
time.sleep(sleep)
if count:
medias = medias[:count]
return medias
"""
Helpers for getting insights for media
"""
def insights_account(self) -> Dict:
"""
Get insights for account
Returns
-------
Dict
A dictionary of response from the call
"""
assert self.user_id, "Login required"
data = {
"surface": "account",
"doc_id": 2449243051851783,
"locale": "en_US",
"vc_policy": "insights_policy",
"strip_nulls": False,
"strip_defaults": False,
}
query_params = {
"IgInsightsGridMediaImage_SIZE": 360,
"activityTab": True,
"audienceTab": True,
"contentTab": True,
"query_params": {"access_token": "", "id": self.user_id},
}
result = self.private_request(
"ads/graphql/",
self.with_query_params(data, query_params),
)
res = json_value(result, "data", "shadow_instagram_user", "business_manager")
if not res:
raise UserError("Account is not business account", **self.last_json)
return res
def insights_media(self, media_pk: int) -> Dict:
"""
Get insights data for media
Parameters
----------
media_pk: int
PK for the album you want to download
Returns
-------
Dict
A dictionary with insights data
"""
assert self.user_id, "Login required"
media_pk = self.media_pk(media_pk)
data = {
"surface": "post",
"doc_id": 3221905377882880,
"locale": "en_US",
"vc_policy": "insights_policy",
"strip_nulls": False,
"strip_defaults": False,
}
query_params = {
"query_params": {"access_token": "", "id": media_pk},
}
try:
result = self.private_request(
"ads/graphql/",
self.with_query_params(data, query_params),
)
return result["data"]["instagram_post_by_igid"]
except ClientError as e:
raise MediaError(e.message, media_pk=media_pk, **self.last_json)
|
11457946
|
from gevent.testing import six
import sys
import os
import errno
from gevent import select, socket
import gevent.core
import gevent.testing as greentest
import gevent.testing.timing
import unittest
class TestSelect(gevent.testing.timing.AbstractGenericWaitTestCase):
def wait(self, timeout):
select.select([], [], [], timeout)
@greentest.skipOnWindows("Cant select on files")
class TestSelectRead(gevent.testing.timing.AbstractGenericWaitTestCase):
def wait(self, timeout):
r, w = os.pipe()
try:
select.select([r], [], [], timeout)
finally:
os.close(r)
os.close(w)
# Issue #12367: http://www.freebsd.org/cgi/query-pr.cgi?pr=kern/155606
@unittest.skipIf(sys.platform.startswith('freebsd'),
'skip because of a FreeBSD bug: kern/155606')
def test_errno(self):
# Backported from test_select.py in 3.4
with open(__file__, 'rb') as fp:
fd = fp.fileno()
fp.close()
try:
select.select([fd], [], [], 0)
except OSError as err:
# Python 3
self.assertEqual(err.errno, errno.EBADF)
except select.error as err: # pylint:disable=duplicate-except
# Python 2 (select.error is OSError on py3)
self.assertEqual(err.args[0], errno.EBADF)
else:
self.fail("exception not raised")
@unittest.skipUnless(hasattr(select, 'poll'), "Needs poll")
@greentest.skipOnWindows("Cant poll on files")
class TestPollRead(gevent.testing.timing.AbstractGenericWaitTestCase):
def wait(self, timeout):
# On darwin, the read pipe is reported as writable
# immediately, for some reason. So we carefully register
# it only for read events (the default is read and write)
r, w = os.pipe()
try:
poll = select.poll()
poll.register(r, select.POLLIN)
poll.poll(timeout * 1000)
finally:
poll.unregister(r)
os.close(r)
os.close(w)
def test_unregister_never_registered(self):
# "Attempting to remove a file descriptor that was
# never registered causes a KeyError exception to be
# raised."
poll = select.poll()
self.assertRaises(KeyError, poll.unregister, 5)
def test_poll_invalid(self):
self.skipTest(
"libev >= 4.27 aborts the process if built with EV_VERIFY >= 2. "
"For libuv, depending on whether the fileno is reused or not "
"this either crashes or does nothing.")
with open(__file__, 'rb') as fp:
fd = fp.fileno()
poll = select.poll()
poll.register(fd, select.POLLIN)
# Close after registering; libuv refuses to even
# create a watcher if it would get EBADF (so this turns into
# a test of whether or not we successfully initted the watcher).
fp.close()
result = poll.poll(0)
self.assertEqual(result, [(fd, select.POLLNVAL)]) # pylint:disable=no-member
class TestSelectTypes(greentest.TestCase):
def test_int(self):
sock = socket.socket()
try:
select.select([int(sock.fileno())], [], [], 0.001)
finally:
sock.close()
if hasattr(six.builtins, 'long'):
def test_long(self):
sock = socket.socket()
try:
select.select(
[six.builtins.long(sock.fileno())], [], [], 0.001)
finally:
sock.close()
def test_string(self):
self.switch_expected = False
self.assertRaises(TypeError, select.select, ['hello'], [], [], 0.001)
if __name__ == '__main__':
greentest.main()
|
11457966
|
import string
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.hashers import check_password, make_password
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.test import TestCase
from django.urls import reverse
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.views.generic import View
from phone_auth import app_settings
from phone_auth.app_settings import AuthenticationMethod
from phone_auth.decorators import (
anonymous_required,
verified_email_required,
verified_phone_required,
)
from phone_auth.mixins import (
AnonymousRequiredMixin,
VerifiedEmailRequiredMixin,
VerifiedPhoneRequiredMixin,
)
from phone_auth.models import EmailAddress, PhoneNumber
from phone_auth.tokens import phone_token_generator
from phone_auth.validators import validate_username
class AccountTests(TestCase):
data = {
"phone": "+919876543210",
"username": "test",
"email": "<EMAIL>",
"first_name": "first",
"last_name": "last",
"password": "<PASSWORD>",
}
@classmethod
def setUpTestData(cls):
user_data = dict(cls.data)
phone = user_data.pop("phone")
user_data["password"] = make_password(user_data["password"])
cls.user = User.objects.create(**user_data)
cls.phone_obj = PhoneNumber.objects.create(user=cls.user, phone=phone)
cls.email_obj = EmailAddress.objects.create(
user=cls.user, email=user_data["email"]
)
def test_phone_signup_view(self):
url = reverse("phone_auth:phone_signup")
data = {
"phone": "+919999999999",
"username": "test1",
"email": "<EMAIL>",
"first_name": "first",
"last_name": "last",
"password": "<PASSWORD>",
"confirm_password": "<PASSWORD>",
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
user = User.objects.latest("id")
phone = user.phonenumber_set.latest("id").phone
email = user.emailaddress_set.latest("id").email
self.assertEqual(phone.__str__(), data["phone"])
self.assertEqual(email, data["email"])
self.assertTrue(check_password(data["password"], user.password))
self.assertEqual(user.username, data["username"])
self.assertEqual(user.email, data["email"])
self.assertEqual(user.first_name, data["first_name"])
self.assertEqual(user.last_name, data["last_name"])
def test_phone_login_view(self):
url = reverse("phone_auth:phone_login")
authentication_methods = app_settings.AUTHENTICATION_METHODS
# With correct password
for auth_method in authentication_methods:
credentials = {
"login": self.data[auth_method],
"password": <PASSWORD>["password"],
}
response = self.client.post(url, credentials)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.wsgi_request.user.is_authenticated)
# Logout
self.client.logout()
# With incorrect password
for auth_method in authentication_methods:
credentials = {
"login": self.data[auth_method],
"password": "<PASSWORD>",
}
response = self.client.post(url, credentials)
self.assertEqual(response.status_code, 400)
self.assertFalse(response.wsgi_request.user.is_authenticated)
def test_phone_logout_view(self):
# Login
self.client.login(login=self.data["email"], password=self.data["password"])
# Logout
url = reverse("phone_auth:phone_logout")
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
self.assertFalse(response.wsgi_request.user.is_authenticated)
def test_phone_password_reset_view(self):
url = reverse("phone_auth:phone_password_reset")
for method in ["phone", "email"]:
data = {"login": self.data[method]}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
def test_phone_password_reset_done_view(self):
url = reverse("phone_auth:phone_password_reset_done")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_phone_password_reset_confirm_view(self):
# generate `uidb64` and `token`
credentials = {
"uidb64": urlsafe_base64_encode(force_bytes(self.user.pk)),
"token": default_token_generator.make_token(self.user),
}
url = reverse("phone_auth:phone_password_reset_confirm", kwargs=credentials)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
# self.assertEqual(response.url[-14:], '/set-password/')
def test_phone_password_reset_complete_view(self):
url = reverse("phone_auth:phone_password_reset_complete")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_phone_change_password_view(self):
url = reverse("phone_auth:phone_change_password")
data = {
"old_password": self.data.get("password"),
"new_password": "<PASSWORD>",
"confirm_password": "<PASSWORD>",
}
response = self.client.post(url, data)
user = User.objects.get(email=self.data["email"])
self.assertEqual(response.status_code, 302)
self.assertTrue(check_password(data["new_password"], user.password))
def test_phone_token_generator(self):
# Test with email
email_obj = self.user.emailaddress_set.all()[0]
token_generator = phone_token_generator(
email_address_obj=email_obj, phone_number_obj=None
)
token = token_generator.make_token(self.user)
self.assertIsNotNone(token)
self.assertTrue(token_generator.check_token(self.user, token))
# Test with phone
phone_obj = self.user.phonenumber_set.all()[0]
token_generator = phone_token_generator(
email_address_obj=None, phone_number_obj=phone_obj
)
token = token_generator.make_token(self.user)
self.assertIsNotNone(token)
self.assertTrue(token_generator.check_token(self.user, token))
def test_phone_email_verification_view(self):
# Login
self.client.login(login=self.data["email"], password=self.data["password"])
# Test
url = reverse("phone_auth:phone_email_verification")
# GET request
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# POST request (email)
data = {"method": "email", "pk": self.email_obj.pk}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
# POST request (phone)
data = {"method": "phone", "pk": self.phone_obj.pk}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
def test_phone_email_verification_confirm_view(self):
# Email verification
self.assertFalse(self.email_obj.is_verified)
credentials = {
"idb64": urlsafe_base64_encode(force_bytes(f"email{self.email_obj.pk}")),
"token": phone_token_generator(
email_address_obj=self.email_obj, phone_number_obj=None
).make_token(self.user),
}
url = reverse("phone_auth:phone_email_verification_confirm", kwargs=credentials)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTrue(EmailAddress.objects.get(pk=self.email_obj.pk).is_verified)
# Phone Verification
self.assertFalse(self.phone_obj.is_verified)
credentials = {
"idb64": urlsafe_base64_encode(force_bytes(f"phone{self.phone_obj.pk}")),
"token": phone_token_generator(
email_address_obj=None, phone_number_obj=self.phone_obj
).make_token(self.user),
}
url = reverse("phone_auth:phone_email_verification_confirm", kwargs=credentials)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTrue(PhoneNumber.objects.get(pk=self.phone_obj.pk).is_verified)
def test_anonymous_required_decorator(self):
@anonymous_required
def tview(request):
return HttpResponse()
request = self.client.get("/").wsgi_request
self.assertTrue(request.user.is_anonymous)
response = tview(request)
self.assertEqual(response.status_code, 200)
# checking for logged in user
self.client.login(login=self.data["email"], password=self.data["password"])
request = self.client.get("/").wsgi_request
self.assertTrue(request.user.is_authenticated)
response = tview(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url, f"{app_settings.LOGIN_REDIRECT_URL}?{REDIRECT_FIELD_NAME}=/"
)
def test_verified_email_required_decorator(self):
if AuthenticationMethod.EMAIL in app_settings.AUTHENTICATION_METHODS:
@verified_email_required
def tview(request):
return HttpResponse()
self.client.login(login=self.data["email"], password=self.data["password"])
request = self.client.get("/").wsgi_request
# check with not verified email
response = tview(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url, reverse("phone_auth:phone_email_verification")
)
# check with verified email
self.email_obj.is_verified = True
self.email_obj.save()
response = tview(request)
self.assertEqual(response.status_code, 200)
def test_verified_phone_required_decorator(self):
if AuthenticationMethod.PHONE in app_settings.AUTHENTICATION_METHODS:
@verified_phone_required
def tview(request):
return HttpResponse()
self.client.login(login=self.data["phone"], password=self.data["password"])
request = self.client.get("/").wsgi_request
# check with not verified phone
response = tview(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url, reverse("phone_auth:phone_email_verification")
)
# check with verified email
self.phone_obj.is_verified = True
self.phone_obj.save()
response = tview(request)
self.assertEqual(response.status_code, 200)
def test_anonymous_required_mixin(self):
class Tview(AnonymousRequiredMixin, View):
def get(self, request, *args, **kwargs):
return HttpResponse()
request = self.client.get("/").wsgi_request
response = Tview.as_view()(request)
self.assertEqual(response.status_code, 200)
# checking for logged in user
self.client.login(login=self.data["email"], password=self.data["password"])
request = self.client.get("/").wsgi_request
response = Tview.as_view()(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, app_settings.LOGIN_REDIRECT_URL)
def test_verified_phone_required_mixin(self):
if AuthenticationMethod.PHONE in app_settings.AUTHENTICATION_METHODS:
class Tview(VerifiedPhoneRequiredMixin, View):
def get(self, request, *args, **kwargs):
return HttpResponse()
self.client.login(login=self.data["phone"], password=self.data["password"])
request = self.client.get("/").wsgi_request
# check with not verified phone
response = Tview.as_view()(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url, reverse("phone_auth:phone_email_verification")
)
# check with verified phone
self.phone_obj.is_verified = True
self.phone_obj.save()
response = Tview.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_verified_email_required_mixin(self):
if AuthenticationMethod.EMAIL in app_settings.AUTHENTICATION_METHODS:
class Tview(VerifiedEmailRequiredMixin, View):
def get(self, request, *args, **kwargs):
return HttpResponse()
self.client.login(login=self.data["email"], password=self.data["password"])
request = self.client.get("/").wsgi_request
# check with not verified email
response = Tview.as_view()(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url, reverse("phone_auth:phone_email_verification")
)
# check with verified email
self.email_obj.is_verified = True
self.email_obj.save()
response = Tview.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_username_validator(self):
self.assertIsNone(validate_username("a"))
self.assertIsNone(validate_username("A"))
self.assertIsNone(validate_username("a" * 150))
self.assertIsNone(validate_username(string.digits))
self.assertIsNone(validate_username(string.ascii_lowercase))
self.assertIsNone(validate_username(string.ascii_uppercase))
self.assertIsNone(validate_username("."))
self.assertIsNone(validate_username("-"))
self.assertIsNone(validate_username("_"))
try:
self.assertIsNone(validate_username(""))
# If exception not raised, test case failed.
self.assertTrue(False)
except ValidationError:
pass
try:
self.assertIsNone(validate_username("+"))
self.assertTrue(False)
except ValidationError:
pass
try:
self.assertIsNone(validate_username("@"))
self.assertTrue(False)
except ValidationError:
pass
def test_add_email_view(self):
# Login
self.client.login(login=self.data["email"], password=self.data["password"])
url = reverse("phone_auth:add_email")
# test with different mail should pass
data = {"email": "<EMAIL>"}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
self.assertTrue(EmailAddress.objects.filter(email=data["email"]).exists())
def test_add_phone_view(self):
# Login
self.client.login(login=self.data["phone"], password=self.data["password"])
url = reverse("phone_auth:add_phone")
# test with different phone no should pass
data = {"phone": "+919876543211"}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
self.assertTrue(PhoneNumber.objects.filter(phone=data["phone"]).exists())
|
11457969
|
from __future__ import print_function
import os
import pytest
from fasttrips import Run
"""
Run just the tests labeled basic using `pytest -v -m basic`
"""
demand_options = ["backward_bunnies","forward_bunnies"]
network_options = ["bunny_hop","many_bunny_hops"]
@pytest.mark.parametrize("demand", demand_options)
@pytest.mark.parametrize("network", network_options)
@pytest.mark.basic
@pytest.mark.travis
def test_bunny(demand, network):
"""
Test to ensure that the most simple of networks and demand is working.
"""
EXAMPLE_DIR = os.path.join(os.getcwd(), "fasttrips", "Examples","Bunny_Hop")
INPUT_NETWORK = os.path.join(EXAMPLE_DIR, "networks", network)
INPUT_DEMAND = os.path.join(EXAMPLE_DIR, "demand" , demand)
INPUT_CONFIG = os.path.join(EXAMPLE_DIR, "configs","base")
OUTPUT_DIR = os.path.join(EXAMPLE_DIR, "output")
Run.run_fasttrips(
input_network_dir = INPUT_NETWORK,
input_demand_dir = INPUT_DEMAND,
run_config = os.path.join(INPUT_CONFIG, "config_ft.txt"),
input_functions = os.path.join(INPUT_CONFIG, 'config_ft.py'),
input_weights = os.path.join(INPUT_CONFIG, "pathweight_ft.txt"),
output_dir = OUTPUT_DIR,
output_folder = demand+"-"+network,
pathfinding_type = "stochastic",
capacity = False,
iters = 1,
OVERLAP = "None",
dispersion = 0.5
)
if __name__ == '__main__':
import itertools
for demand,network in list(itertools.product(demand_options, network_options)):
print("running %s %s" % (demand,network))
test_bunny(demand, network)
|
11457982
|
from django.views.generic import DetailView
from django.views.generic.detail import SingleObjectMixin
from ..core.views import PaginatedListView
from .models import Account, Tweet, User
class HomeView(PaginatedListView):
template_name = "twitter/home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["account_list"] = Account.objects.all()
return context
def get_queryset(self):
"Get Tweets by all of the Accounts that have Users."
# Use select_related to fetch user details too. Could be nasty...
return Tweet.public_tweet_objects.all().prefetch_related("user")
class FavoriteListView(PaginatedListView):
template_name = "twitter/favorite_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["account_list"] = Account.objects.all()
return context
def get_queryset(self):
"Get Tweets by all of the Accounts that have Users."
return Tweet.public_favorite_objects.all().prefetch_related("user")
class SingleUserMixin(SingleObjectMixin):
"""Used for views that need data about a User based on screen_name in
the URL, and its Account if it has one.
"""
slug_field = "screen_name"
slug_url_kwarg = "screen_name"
def get(self, request, *args, **kwargs):
self.object = self.get_object(queryset=User.objects.all())
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["twitter_user"] = self.object
try:
context["account"] = Account.objects.get(user=self.object)
except Account.DoesNotExist:
context["account"] = None
context["public_accounts"] = Account.objects.filter(user__is_private=False)
return context
class UserDetailView(SingleUserMixin, PaginatedListView):
"""A single Twitter User and its Tweets.
The user might have an Account associated with it, or might not.
"""
template_name = "twitter/user_detail.html"
def get_queryset(self):
"All public tweets from this Account."
return Tweet.public_objects.filter(user=self.object).prefetch_related("user")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["tweet_list"] = context["object_list"]
return context
class AccountFavoriteListView(SingleUserMixin, PaginatedListView):
"A single Twitter User associated with an Account, and its Favorites."
template_name = "twitter/account_favorite_list.html"
def get_queryset(self):
"All public favorites from this Account."
return Tweet.public_favorite_objects.filter(
favoriting_users__in=[self.object]
).prefetch_related("user")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["tweet_list"] = context["object_list"]
return context
class TweetDetailView(DetailView):
"""Show a single tweet. It might be posted by one of the Accounts, or might
be a tweet by someone else, favorited.
"""
model = Tweet
slug_field = "twitter_id"
slug_url_kwarg = "twitter_id"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["twitter_user"] = context["tweet"].user
if context["twitter_user"].is_private:
# If private, we don't even send the Tweet to the template.
context["tweet"] = None
# We can show favorited tweets; they won't have an associated Account.
try:
context["account"] = Account.objects.get(user=context["twitter_user"])
except Account.DoesNotExist:
context["account"] = None
return context
|
11458002
|
from learnergy.models.bernoulli import HybridDiscriminativeRBM
# Creates a HybridDiscriminativeRBM-based class
model = HybridDiscriminativeRBM(n_visible=784, n_hidden=128, learning_rate=0.1,
alpha=0.01, momentum=0, decay=0, use_gpu=False)
|
11458012
|
import os
os.environ["PMD_CMD"] = "/opt/pmd-bin/bin/run.sh pmd"
os.environ["APP_SRC_DIR"] = "/usr/local/src"
|
11458091
|
from gnas.search_space.operation_space import RnnNodeConfig, RnnInputNodeConfig, CnnNodeConfig
from gnas.modules.node_module import RnnNodeModule, RnnInputNodeModule, ConvNodeModule
__module_dict__ = {RnnNodeConfig: RnnNodeModule,
RnnInputNodeConfig: RnnInputNodeModule,
CnnNodeConfig: ConvNodeModule}
def get_module(node_config, config_dict):
m = __module_dict__.get(type(node_config))
if m is None:
raise Exception('Can\'t find module named:' + node_config)
return m(node_config, config_dict)
|
11458117
|
import tensorflow as tf
from termcolor import colored
from deeptrain.util._backend import TF_KERAS
WARN = colored('WARNING:', 'red')
NOTE = colored('NOTE:', 'blue')
#### Env flags & Keras backend ###############################################
tf_eager = tf.executing_eagerly
TF_2 = bool(tf.__version__[0] == '2')
if TF_KERAS:
import tensorflow.keras.backend as K
else:
import keras.backend as K
#### Subpackage imports ######################################################
from . import model_utils
from .model_utils import *
|
11458126
|
import warnings
from typing import Tuple, Union
import torch
import numpy as np
from sklearn.metrics import fbeta_score, roc_auc_score, cohen_kappa_score
from sklearn.exceptions import UndefinedMetricWarning
class Metric:
name = "metric"
def __call__(self, truth: torch.Tensor, pred: torch.Tensor) -> Tuple[float, str]:
"""Calculate the metric from truth and prediction tensors
Parameters
----------
truth : torch.Tensor
pred : torch.Tensor
Returns
-------
Tuple[float, str]
(metric value(to be minimized), formatted string)
"""
raise NotImplementedError()
class FBeta(Metric):
"""FBeta for binary targets"""
name = "fbeta"
def __init__(self, step, beta=2, average="binary"):
self.step = step
self.beta = beta
self.average = average
def __call__(self, truth: torch.Tensor, pred: torch.Tensor) -> Tuple[float, str]:
best_fbeta, best_thres = self.find_best_fbeta_threshold(
truth.numpy(), torch.sigmoid(pred).numpy(),
step=self.step, beta=self.beta)
return best_fbeta * -1, f"{best_fbeta:.4f} @ {best_thres:.2f}"
def find_best_fbeta_threshold(self, truth, probs, beta=2, step=0.05):
best, best_thres = 0, -1
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UndefinedMetricWarning)
for thres in np.arange(step, 1, step):
current = fbeta_score(
truth, (probs >= thres).astype("int8"),
beta=beta, average=self.average)
if current > best:
best = current
best_thres = thres
return best, best_thres
class AUC(Metric):
"""AUC for binary targets"""
name = "auc"
def __call__(self, truth: torch.Tensor, pred: torch.Tensor) -> Tuple[float, str]:
auc_score = roc_auc_score(
truth.long().numpy(), torch.sigmoid(pred).numpy())
return auc_score * -1, f"{auc_score * 100:.2f}"
class BinaryAccuracy(Metric):
"""Accuracy for binary classification"""
name = "accuracy"
def __init__(self, threshold: Union[Tuple[float, float, float], float], logits: bool = False):
super().__init__()
self.threshold = threshold
self.logits = logits
def __call__(self, truth: torch.Tensor, pred: torch.Tensor) -> Tuple[float, str]:
truth = truth.long()
if self.logits:
pred = torch.sigmoid(pred)
if isinstance(self.threshold, float):
threshold = self.threshold
acc = (
truth == (pred > threshold).long()
).sum() * 1.0 / len(truth)
else:
best_thres, best_acc = -1, -1
for thres in np.arange(self.threshold[0], self.threshold[1], self.threshold[2]):
acc_tmp = (
truth == (pred > thres).long()
).sum() * 1.0 / len(truth)
if acc_tmp > best_acc:
best_thres = thres
best_acc = acc_tmp
threshold = best_thres
acc = best_acc
return acc * -1, f"{acc * 100:.2f}% @ {threshold:.2f}"
class Top1Accuracy(Metric):
"""Accuracy for Multi-class classification"""
name = "accuracy"
def __call__(self, truth: torch.Tensor, pred: torch.Tensor) -> Tuple[float, str]:
correct = torch.sum(
truth.view(-1) == torch.argmax(pred, dim=-1).view(-1)).item()
total = truth.view(-1).size(0)
accuracy = (correct / total)
return accuracy * -1, f"{accuracy * 100:.2f}%"
class TopKAccuracy(Metric):
"""Top K Accuracy for Multi-class classification"""
def __init__(self, k=1):
super().__init__()
self.name = f"top_{k}_accuracy"
self.k = k
def __call__(self, truth: torch.Tensor, pred: torch.Tensor) -> Tuple[float, str]:
with torch.no_grad():
_, pred = pred.topk(self.k, dim=1, largest=True, sorted=True)
pred = pred.t()
correct = pred.eq(
truth.view(1, -1).expand_as(pred)
).view(-1).float().sum(0, keepdim=True)
accuracy = correct.mul_(100.0 / truth.size(0)).item()
return accuracy * -1, f"{accuracy:.2f}%"
class CohenKappaScore(Metric):
name = "kappa"
def __call__(self, truth: torch.Tensor, pred: torch.Tensor) -> Tuple[float, str]:
if len(pred.size()) == 1 or pred.size(1) == 1:
regression = True
if regression:
score = cohen_kappa_score(
torch.round(pred.clamp(0, 4)).cpu().numpy(),
truth.cpu().numpy(),
weights='quadratic'
)
else:
score = cohen_kappa_score(
torch.argmax(pred, dim=1).cpu().numpy(),
truth.cpu().numpy(),
weights='quadratic'
)
return score * -1, f"{score * 100:.2f}"
|
11458128
|
import sklearn.neighbors as skl_neighbors
from Orange.base import KNNBase
from Orange.classification import SklLearner
__all__ = ["KNNLearner"]
class KNNLearner(KNNBase, SklLearner):
__wraps__ = skl_neighbors.KNeighborsClassifier
|
11458133
|
from __future__ import division, absolute_import, print_function
import time
from integration_test import *
class BadgeSpeedTestCase(IntegrationTest):
def __init__(self, device_addr, test_duration_minutes=5):
self.test_duration_minutes = test_duration_minutes
super(BadgeSpeedTestCase, self).__init__(device_addr)
def testCase(self, badge, logger):
"""
Record five minutes worth of microphone data,
make sure it pulls it at a reasonable speed.
"""
# sync time
badge.get_status()
test_start_time = time.time()
badge.start_microphone()
time.sleep(self.test_duration_minutes * 60)
badge.stop_microphone()
pull_start_time = time.time()
badge_data = badge.get_microphone_data(t=test_start_time)
pull_end_time = time.time()
pull_duration = pull_end_time - pull_start_time # seconds
print("It took {} seconds to pull {} minutes worth of data"
.format(pull_duration, self.test_duration_minutes))
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Please enter badge MAC address")
exit(1)
device_addr = sys.argv[1]
if len(sys.argv) == 3:
test_duration = sys.argv[2]
print("starting speed test w/ length {} minutes".format(test_duration))
testCase = BadgeSpeedTestCase(device_addr, test_duration_minutes=test_duration)
testCase.runTest()
else:
print("starting speed test w/ length 5 minutes")
testCase = BadgeSpeedTestCase(device_addr)
testCase.runTest()
|
11458136
|
import unittest
from unittest import TestCase
from metadata.ingestion.api.source import SourceStatus
from metadata.ingestion.source.sql_source import SQLSourceStatus
from metadata.utils.column_helpers import get_column_type
SQLTYPES = [
"ARRAY",
"BIGINT",
"BIGNUMERIC",
"BIGSERIAL",
"BINARY",
"BIT",
"BLOB",
"BOOL",
"BOOLEAN",
"BPCHAR",
"BYTEINT",
"BYTES",
"CHAR",
"CHARACTER VARYING",
"CHARACTER",
"CURSOR",
"DATE",
"DATETIME",
"DATETIME2",
"DATETIMEOFFSET",
"DECIMAL",
"DOUBLE PRECISION",
"DOUBLE",
"ENUM",
"FLOAT",
"FLOAT4",
"FLOAT64",
"FLOAT8",
"GEOGRAPHY",
"HYPERLOGLOG",
"IMAGE",
"INT",
"INT2",
"INT4",
"INT64",
"INT8",
"INTEGER",
"INTERVAL DAY TO SECOND",
"INTERVAL YEAR TO MONTH",
"INTERVAL",
"JSON",
"LONG RAW",
"LONG VARCHAR",
"LONG",
"LONGBLOB",
"MAP",
"MEDIUMBLOB",
"MEDIUMINT",
"MEDIUMTEXT",
"MONEY",
"NCHAR",
"NTEXT",
"NUMBER",
"NUMERIC",
"NVARCHAR",
"OBJECT",
"RAW",
"REAL",
"ROWID",
"ROWVERSION",
"SET",
"SMALLDATETIME",
"SMALLINT",
"SMALLMONEY",
"SMALLSERIAL",
"SQL_VARIANT",
"STRING",
"STRUCT",
"TABLE",
"TEXT",
"TIME",
"TIMESTAMP WITHOUT TIME ZONE",
"TIMESTAMP",
"TIMESTAMPTZ",
"TIMETZ",
"TINYINT",
"UNION",
"UROWID",
"VARBINARY",
"VARCHAR",
"VARIANT",
"XML",
"XMLTYPE",
"YEAR",
]
class DataTypeTest(TestCase):
def test_check_datatype_support(self):
status = SQLSourceStatus()
for types in SQLTYPES:
with self.subTest(line=types):
col_type = get_column_type(status, "Unit Test", types)
col_type = True if col_type != "NULL" else False
self.assertTrue(col_type, msg=types)
|
11458137
|
import json
import sys
import os
import random
import ruamel.yaml as yaml
from hurry.filesize import size
from ldt.helpers.exceptions import ResourceError
from ldt.load_config import config
def get_object_size(obj, seen=None):
"""
A function that recursively finds size of objects,
from https://goshippo.com/blog/measure-real-size-any-python-object/
Object sizes in Python should really not be that hard.
Warning: loading the same file into memory may result in slightly
different object sizes.
Args:
obj: the object for which the size is to be calculated
seen: helper variable
Returns:
(int): the size of the object in bytes.
"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_object_size(v, seen) for v in obj.values()])
size += sum([get_object_size(k, seen) for k in obj.keys()])
elif hasattr(obj, '__dict__'):
size += get_object_size(obj.__dict__, seen)
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
size += sum([get_object_size(i, seen) for i in obj])
return size
#todo add lowercasing
def load_resource(path, format="infer", lowercasing=config["lowercasing"],
silent=True, wordlist=None):
"""
A helper function for loading various files formats, optionally
lowercasing them, and displaying the sizes of the resulting objects
(for monitoring huge resources).
Args:
path (str): path to file with the resource
format (str): the format of the file. By default it is inderred from
the file extension, but can also be specified directly. The
following formats are supported:
:type freqdict: for tab-separated [Word <tab> Number] file
:type csv_dict: for [Word1 <tab> Word2,Word3,Word4...] or [Word1 <tab> Word2]
:type vocab: for one-word-per-line vocab file
:type json: a json dictionary
:type yaml: a yaml dictionary
:type json_freqdict: a json dictionary with frequency
dictionaries as entries
:type jsonl: a jasonlines file that will be read
line-by-line and optionally filtered by provided wordlist
wordlist (list of str): the wordlist by which to filter the contents
of a jsonl resource
Returns:
(set, dict): a set object for vocab files, a dictionary for
everything else
Todo:
repackage to functions per format
lowercasing for json freqdict and jsonl
"""
if format == "infer":
format = path.split(".")[-1]
if format in ["freqdict", "tsv_dict", "json", "yaml", "json_freqdict",
"jsonl"]:
res = {}
if format == "freqdict":
with open(path, "r", encoding="utf8") as f:
for line in f:
line = line.strip().split("\t")
try:
res[line[0]] = int(line[1])
except IndexError:
print("Wrong file format. [Word <tab> Number] per line expected.")
break
if format == "tsv_dict":
with open(path, "r", encoding="utf8") as f:
# figure out whether it's a list or one-word entry format
lines = f.readlines()
commas_present = 0
for line in lines[:5]:
line = line.strip().split("\t")
try:
commas = line[1].count(",")
except IndexError:
print("Wrong file format. [Word1 <tab> Word2] or ["
"Word1 <tab> Word2, Word3,Word4...] per line "
"expected.")
break
if commas > 0:
commas_present += 1
if commas_present > 3:
for line in lines:
try:
line = line.strip().split("\t")
words = line[1].split(",")
res[str(line[0])] = set(words)
except IndexError:
print("Wrong file format. [Word1 <tab> Word2] or ["
"Word1 <tab> Word2, Word3,Word4...] per line "
"expected.")
break
else:
for line in lines:
try:
line = line.strip().split("\t")
res[str(line[0])] = str(line[1])
except IndexError:
print("Wrong file format. [Word1 <tab> Word2] or ["
"Word1 <tab> Word2, Word3,Word4...] per line "
"expected.")
break
if format == "json":
with open(path, "r", encoding="utf8") as f:
res = json.load(f)
if format == "yaml":
# # res = yaml.load(open(path))
# yaml = YAML(typ='safe')
with open(path) as stream:
try:
res = yaml.safe_load(stream)
except yaml.YAMLError:
raise ResourceError(
"Something is wrong with the .yaml file "
"for this language.")
if "cu" in res:
if res["cu"] == "Old Church Slavonic":
silent=True
if format == "json_freqdict":
with open(path, "r", encoding="utf8") as f:
res = json.load(f)
return res
if format == "jsonl":
res = load_jsonl_with_filtering(path, wordlist=wordlist)
return res
if lowercasing:
# test if the dict keys are lists or not
random_key = random.choice(list(res))
if not isinstance(res[random_key], str):
new_res = {}
for k in res.keys():
l = str(k).lower()
if format == "freqdict":
#if lowercasing a frequency dictionary, add the
# frequencies for any merged words
if l in new_res:
total_frequency = res[k] + res[l]
new_res[l] = total_frequency
else:
new_res[l] = res[k]
elif format in ["json", "yaml", "csv_dict", "tsv_dict"]:
if not l in new_res.keys():
new_res[l] = set(str(w).lower() for w in res[k])
else:
for w in res[k]:
new_res[l].add(str(w))
res = new_res
else:
res = dict((str(k).lower(), str(v).lower()) for k, v in
res.items())
elif format == "vocab":
with open(path, "r", encoding="utf8") as f:
res = f.read().splitlines()
if lowercasing:
res = [x.lower() for x in res]
res = frozenset(res)
else:
print("Unknown format. The following formats are supported: \n"
"* [.freqdict] for tab-separated [Word <tab> Number] files;\n"
"* [.tsv_dict] for [Word1 <tab> Word2,Word3,Word4...] or [Word1 <tab> Word2];\n"
"* [.json] or [.jsonl] for json dictionaries;\n"
"* [.yaml] for yaml dictionaries;\n"
"* [.vocab] for one-word-per-line vocab files.\n")
return None
if res:
if not silent:
print(path, " loaded as ", size(get_object_size(res)))
return res
def load_language_file(resources_path, language):
if not os.path.isfile(resources_path):
raise ResourceError(language + ".yaml not found.")
with open(resources_path) as stream:
try:
resources = yaml.safe_load(stream)
return resources
except yaml.YAMLError:
raise ResourceError("Something is wrong with the .yaml file "
"for this language.")
def load_jsonl_with_filtering(path, wordlist=None):
"""Loading large jsonl files line-by-line, optionally only storing
results that are in a provided wordlist"""
res = {}
if wordlist:
with open(path, "r") as f:
for line in f:
line = json.loads(line)
for i in line:
if i in wordlist:
res[i] = line[i]
else:
with open(path, "r") as f:
for line in f:
line = json.loads(line)
for i in line:
res[i] = line[i]
return res
|
11458205
|
import scipy as sp
import numpy as np
import scipy.ndimage as spim
from skimage.segmentation import relabel_sequential
from edt import edt
from loguru import logger
from skimage.morphology import ball, disk
from ._utils import Results
from ._unpad import unpad
try:
from skimage.measure import marching_cubes
except ImportError:
from skimage.measure import marching_cubes_lewiner as marching_cubes
def isolate_object(region, i, s=None):
r"""
Given an image containing labels, removes all labels except the specified
one.
Parameters
----------
region : ndarray
An image containing labelled regions, as returned by
``scipy.ndimage.label``.
i : int
The integer value
s : tuple of slice objects, optional
If provided, then a subsection of ``region`` will be extracted and the
function will be applied to this subsection only.
Returns
-------
label : ndarray
An ndarray the same size as ``region`` containing *only* the objects
with the given value ``i``. If ``s`` is provided, the returned image
will be a subsection of ``region``.
"""
if s is not None:
region = region[s]
im = (region == i)*i
return im
def marching_map(path, start):
r"""
Use the fast marching method to find distance of each voxel from a starting
point
Parameters
----------
path : ndarray
A boolean image with ``True`` values demarcating the path along which
the march will occur
start : ndarray
A boolean image with ``True`` values indicating where the march should
start.
Returns
-------
distance : ndarray
An array the same size as ``path`` with numerical values in each voxel
indicating it's distance from the start point(s) along the given path.
Notes
-----
This function assumes ``scikit-fmm`` is installed.
"""
try:
import skfmm
except ModuleNotFoundError:
raise ModuleNotFoundError('scikit-fmm must be install to use this ' +
'function')
phi = start*2.0 - 1.0
speed = path*1.0
t = skfmm.travel_time(phi, speed)
return t.data
def align_image_with_openpnm(im):
r"""
Rotates an image to agree with the coordinates used in OpenPNM.
This is necessary for overlaying the image and the network in Paraview.
Parameters
----------
im : ndarray
The image to be rotated. Can be the Boolean image of the pore space
or any other image of interest.
Returns
-------
image : ndarray
Returns a copy of ``im`` rotated accordingly.
Examples
--------
`Click here
<https://porespy.org/examples/tools/howtos/align_image_with_openpnm.html>`_
to view online example.
"""
_check_for_singleton_axes(im)
im = np.copy(im)
if im.ndim == 2:
im = (np.swapaxes(im, 1, 0))
im = im[-1::-1, :]
elif im.ndim == 3:
im = (np.swapaxes(im, 2, 0))
im = im[:, -1::-1, :]
return im
def subdivide(im, divs=2, overlap=0):
r"""
Returns slices into an image describing the specified number of sub-arrays.
This function is useful for performing operations on smaller images for
memory or speed. Note that for most typical operations this will NOT work,
since the image borders would cause artifacts (e.g. ``distance_transform``)
Parameters
----------
im : ndarray
The image of the porous media
divs : scalar or array_like
The number of sub-divisions to create in each axis of the image. If a
scalar is given it is assumed this value applies in all dimensions.
overlap : scalar or array_like
The amount of overlap to use when dividing along each axis. If a
scalar is given it is assumed this value applies in all dimensions.
Returns
-------
slices : ndarray
An ndarray containing sets of slice objects for indexing into ``im``
that extract subdivisions of an image. If ``flatten`` was ``True``,
then this array is suitable for iterating. If ``flatten`` was
``False`` then the slice objects must be accessed by row, col, layer
indices. An ndarray is the preferred container since its shape can
be easily queried.
See Also
--------
chunked_func
Examples
--------
>>> import porespy as ps
>>> import matplotlib.pyplot as plt
>>> im = ps.generators.blobs(shape=[200, 200])
>>> s = ps.tools.subdivide(im, divs=[2, 2], flatten=True)
>>> print(len(s))
4
`Click here
<https://porespy.org/examples/tools/howtos/subdivide.html>`_
to view online example.
"""
divs = np.ones((im.ndim,), dtype=int) * np.array(divs)
overlap = overlap * (divs > 1)
s = np.zeros(shape=divs, dtype=object)
spacing = np.round(np.array(im.shape)/divs, decimals=0).astype(int)
for i in range(s.shape[0]):
x = spacing[0]
sx = slice(x*i, min(im.shape[0], x*(i+1)), None)
for j in range(s.shape[1]):
y = spacing[1]
sy = slice(y*j, min(im.shape[1], y*(j+1)), None)
if im.ndim == 3:
for k in range(s.shape[2]):
z = spacing[2]
sz = slice(z*k, min(im.shape[2], z*(k+1)), None)
s[i, j, k] = tuple([sx, sy, sz])
else:
s[i, j] = tuple([sx, sy])
s = s.flatten().tolist()
for i, item in enumerate(s):
s[i] = extend_slice(slices=item, shape=im.shape, pad=overlap)
return s
def recombine(ims, slices, overlap):
r"""
Recombines image chunks back into full image of original shape
Parameters
----------
ims : list of ndarrays
The chunks of the original image, which may or may not have been
processed.
slices : list of slice objects
The slice objects which were used to obtain the chunks in ``ims``
overlap : int of list ints
The amount of overlap used when creating chunks
Returns
-------
im : ndarray
An image constituted from the chunks in ``ims`` of the same shape
as the original image.
See Also
--------
chunked_func, subdivide
"""
shape = [0]*ims[0].ndim
for s in slices:
for dim in range(len(slices[0])):
shape[dim] = max(shape[dim], s[dim].stop)
if isinstance(overlap, int):
overlap = [overlap]*len(shape)
im = np.zeros(shape, dtype=ims[0].dtype)
for i, s in enumerate(slices):
# Prepare new slice objects into main and sub-sliced image
a = [] # Slices into original image
b = [] # Slices into chunked image
for dim in range(im.ndim):
if s[dim].start == 0:
ax = 0
bx = 0
else:
ax = s[dim].start + overlap[dim]
bx = overlap[dim]
if s[dim].stop == im.shape[dim]:
ay = im.shape[dim]
by = im.shape[dim]
else:
ay = s[dim].stop - overlap[dim]
by = s[dim].stop - s[dim].start - overlap[dim]
a.append(slice(ax, ay, None))
b.append(slice(bx, by, None))
# Convert lists of slices to tuples
a = tuple(a)
b = tuple(b)
# Insert image chunk into main image
try:
im[a] = ims[i][b]
except ValueError:
raise IndexError('The applied filter seems to have returned a '
+ 'larger image that it was sent.')
return im
def bbox_to_slices(bbox):
r"""
Given a tuple containing bounding box coordinates, return a tuple of slice
objects.
A bounding box in the form of a straight list is returned by several
functions in skimage, but these cannot be used to direct index into an
image. This function returns a tuples of slices can be, such as:
``im[bbox_to_slices([xmin, ymin, xmax, ymax])]``.
Parameters
----------
bbox : tuple of ints
The bounding box indices in the form (``xmin``, ``ymin``, ``zmin``,
``xmax``, ``ymax``, ``zmax``). For a 2D image, simply omit the
``zmin`` and ``zmax`` entries.
Returns
-------
slices : tuple
A tuple of slice objects that can be used to directly index into a
larger image.
Examples
--------
`Click here
<https://porespy.org/examples/tools/howtos/bbox_to_slices.html>`_
to view online example.
"""
if len(bbox) == 4:
ret = (slice(bbox[0], bbox[2]),
slice(bbox[1], bbox[3]))
else:
ret = (slice(bbox[0], bbox[3]),
slice(bbox[1], bbox[4]),
slice(bbox[2], bbox[5]))
return ret
def find_outer_region(im, r=None):
r"""
Find regions of the image that are outside of the solid matrix.
Parameters
----------
im : ndarray
Image of the porous material with 1's for void and 0's for solid
r : scalar
The radius of the rolling ball to use. If not specified then a value
is calculated as twice maximum of the distance transform. The image
size is padded by this amount in all directions, so the image can
become quite large and unwieldy if too large a value is given.
Returns
-------
image : ndarray
A boolean mask the same shape as ``im``, containing True in all voxels
identified as *outside* the sample.
Notes
-----
This function uses the rolling ball method to define where the outer region
ends and the void space begins.
This is particularly useful for samples that do not fill the
entire rectangular image, such as cylindrical cores or samples with non-
parallel faces.
"""
if r is None:
dt = edt(im)
r = int(np.amax(dt)) * 2
im_padded = np.pad(array=im, pad_width=r, mode='constant',
constant_values=True)
dt = edt(im_padded)
seeds = (dt >= r) + get_border(shape=im_padded.shape)
# Remove seeds not connected to edges
labels = spim.label(seeds)[0]
mask = labels == 1 # Assume label of 1 on edges, assured by adding border
dt = edt(~mask)
outer_region = dt < r
outer_region = extract_subsection(im=outer_region, shape=im.shape)
return outer_region
def extract_cylinder(im, r=None, axis=0):
r"""
Returns a cylindrical section of the image of specified radius.
This is useful for making square images look like cylindrical cores such
as those obtained from X-ray tomography.
Parameters
----------
im : ndarray
The image of the porous material. Can be any data type.
r : scalr
The radius of the cylinder to extract. If ``None`` is given then the
default is the largest cylinder that can fit inside the specified
plane.
axis : scalar
The axis along with the cylinder will be oriented.
Returns
-------
image : ndarray
A copy of ``im`` with values outside the cylindrical area set to 0 or
``False``.
Examples
--------
`Click here
<https://porespy.org/examples/tools/howtos/extract_cylinder.html>`_
to view online example.
"""
# This needs to be imported here since the tools module is imported
# before the generators module, so placing it at the top of the file
# causes an error since the generators module does not exist yet.
# Strangly, if I import the ENTIRE package at the top of the file then
# things work ok, but this seems quite silly compared to just importing
# the function on demand. This is explained in the following
# stackoverflow answer: https://stackoverflow.com/a/129810.
from porespy.generators import cylindrical_plug
mask = cylindrical_plug(shape=im.shape, r=r, axis=axis)
im_temp = im * mask
return im_temp
def extract_subsection(im, shape):
r"""
Extracts the middle section of a image
Parameters
----------
im : ndarray
Image from which to extract the subsection
shape : array_like
Can either specify the size of the extracted section or the fractional
size of the image to extact.
Returns
-------
image : ndarray
An ndarray of size given by the ``shape`` argument, taken from the
center of the image.
See Also
--------
unpad
Examples
--------
>>> import scipy as sp
>>> from porespy.tools import extract_subsection
>>> im = np.array([[1, 1, 1, 1], [1, 2, 2, 2], [1, 2, 3, 3], [1, 2, 3, 4]])
>>> print(im)
[[1 1 1 1]
[1 2 2 2]
[1 2 3 3]
[1 2 3 4]]
>>> im = extract_subsection(im=im, shape=[2, 2])
>>> print(im)
[[2 2]
[2 3]]
`Click here
<https://porespy.org/examples/tools/howtos/extract_subsection.html>`_
to view online example.
"""
# Check if shape was given as a fraction
shape = np.array(shape)
if shape[0] < 1:
shape = np.array(im.shape) * shape
center = np.array(im.shape) / 2
s_im = []
for dim in range(im.ndim):
r = shape[dim] / 2
lower_im = np.amax((center[dim] - r, 0))
upper_im = np.amin((center[dim] + r, im.shape[dim]))
s_im.append(slice(int(lower_im), int(upper_im)))
return im[tuple(s_im)]
def get_planes(im, squeeze=True):
r"""
Extracts three planar images from the volumetric image, one for each
principle axis. The planes are taken from the middle of the domain.
Parameters
----------
im : ndarray
The volumetric image from which the 3 planar images are to be obtained
squeeze : boolean, optional
If True (default) the returned images are 2D (i.e. squeezed). If
False, the images are 1 element deep along the axis where the slice
was obtained.
Returns
-------
planes : list
A list of 2D-images
Examples
--------
`Click here
<https://porespy.org/examples/tools/get_planes.html>`_
to view online example.
"""
x, y, z = (np.array(im.shape) / 2).astype(int)
planes = [im[x, :, :], im[:, y, :], im[:, :, z]]
if not squeeze:
imx = planes[0]
planes[0] = np.reshape(imx, [1, imx.shape[0], imx.shape[1]])
imy = planes[1]
planes[1] = np.reshape(imy, [imy.shape[0], 1, imy.shape[1]])
imz = planes[2]
planes[2] = np.reshape(imz, [imz.shape[0], imz.shape[1], 1])
return planes
def extend_slice(slices, shape, pad=1):
r"""
Adjust slice indices to include additional voxles around the slice.
This function does bounds checking to ensure the indices don't extend
outside the image.
Parameters
----------
slices : list of slice objects
A list (or tuple) of N slice objects, where N is the number of
dimensions in the image.
shape : array_like
The shape of the image into which the slice objects apply. This is
used to check the bounds to prevent indexing beyond the image.
pad : int or list of ints
The number of voxels to expand in each direction.
Returns
-------
slices : list of slice objects
A list slice of objects with the start and stop attributes respectively
incremented and decremented by 1, without extending beyond the image
boundaries.
Examples
--------
>>> from scipy.ndimage import label, find_objects
>>> from porespy.tools import extend_slice
>>> im = np.array([[1, 0, 0], [1, 0, 0], [0, 0, 1]])
>>> labels = label(im)[0]
>>> s = find_objects(labels)
Using the slices returned by ``find_objects``, set the first label to 3
>>> labels[s[0]] = 3
>>> print(labels)
[[3 0 0]
[3 0 0]
[0 0 2]]
Next extend the slice, and use it to set the values to 4
>>> s_ext = extend_slice(s[0], shape=im.shape, pad=1)
>>> labels[s_ext] = 4
>>> print(labels)
[[4 4 0]
[4 4 0]
[4 4 2]]
As can be seen by the location of the 4s, the slice was extended by 1, and
also handled the extension beyond the boundary correctly.
"""
shape = np.array(shape)
pad = np.array(pad).astype(int)*(shape > 0)
a = []
for i, s in enumerate(slices):
start = 0
stop = shape[i]
start = max(s.start - pad[i], 0)
stop = min(s.stop + pad[i], shape[i])
a.append(slice(start, stop, None))
return tuple(a)
def randomize_colors(im, keep_vals=[0]):
r'''
Takes a greyscale image and randomly shuffles the greyscale values, so that
all voxels labeled X will be labelled Y, and all voxels labeled Y will be
labeled Z, where X, Y, Z and so on are randomly selected from the values
in the input image.
This function is useful for improving the visibility of images with
neighboring regions that are only incrementally different from each other,
such as that returned by `scipy.ndimage.label`.
Parameters
----------
im : array_like
An ND image of greyscale values.
keep_vals : array_like
Indicate which voxel values should NOT be altered. The default is
`[0]` which is useful for leaving the background of the image
untouched.
Returns
-------
image : ndarray
An image the same size and type as ``im`` but with the greyscale values
reassigned. The unique values in both the input and output images will
be identical.
Notes
-----
If the greyscale values in the input image are not contiguous then the
neither will they be in the output.
Examples
--------
>>> import porespy as ps
>>> import scipy as sp
>>> sp.random.seed(0)
>>> im = sp.random.randint(low=0, high=5, size=[4, 4])
>>> print(im)
[[4 0 3 3]
[3 1 3 2]
[4 0 0 4]
[2 1 0 1]]
>>> im_rand = ps.tools.randomize_colors(im)
>>> print(im_rand)
[[2 0 4 4]
[4 1 4 3]
[2 0 0 2]
[3 1 0 1]]
As can be seen, the 2's have become 3, 3's have become 4, and 4's have
become 2. 1's remained 1 by random accident. 0's remain zeros by default,
but this can be controlled using the `keep_vals` argument.
'''
im_flat = im.flatten()
keep_vals = np.array(keep_vals)
swap_vals = ~np.in1d(im_flat, keep_vals)
im_vals = np.unique(im_flat[swap_vals])
new_vals = sp.random.permutation(im_vals)
im_map = np.zeros(shape=[np.amax(im_vals) + 1, ], dtype=int)
im_map[im_vals] = new_vals
im_new = im_map[im_flat]
im_new = np.reshape(im_new, newshape=np.shape(im))
return im_new
def make_contiguous(im, mode='keep_zeros'):
r"""
Take an image with arbitrary greyscale values and adjust them to ensure
all values fall in a contiguous range starting at 0.
Parameters
----------
im : array_like
An ND array containing greyscale values
mode : string
Controls how the ranking is applied in the presence of numbers less
than or equal to 0.
'keep_zeros'
(default) Voxels equal to 0 remain 0, and all other
numbers are ranked starting at 1, include negative numbers,
so [-1, 0, 4] becomes [1, 0, 2]
'symmetric'
Negative and positive voxels are ranked based on their
respective distances to 0, so [-4, -1, 0, 5] becomes
[-2, -1, 0, 1]
'clipped'
Voxels less than or equal to 0 are set to 0, while
all other numbers are ranked starting at 1, so [-3, 0, 2]
becomes [0, 0, 1].
'none'
Voxels are ranked such that the smallest or most
negative number becomes 1, so [-4, 2, 0] becomes [1, 3, 2].
This is equivalent to calling ``scipy.stats.rankdata`` directly,
and reshaping the result to match ``im``.
Returns
-------
image : ndarray
An ndarray the same size as ``im`` but with all values in contiguous
order.
Examples
--------
>>> import porespy as ps
>>> import scipy as sp
>>> im = np.array([[0, 2, 9], [6, 8, 3]])
>>> im = ps.tools.make_contiguous(im)
>>> print(im)
[[0 1 5]
[3 4 2]]
`Click here
<https://porespy.org/examples/tools/howtos/make_contiguous.html>`_
to view online example.
"""
# This is a very simple version using relabel_sequential
im = np.array(im)
if mode == 'none':
im = im + np.abs(np.min(im)) + 1
im_new = relabel_sequential(im)[0]
if mode == 'keep_zeros':
mask = im == 0
im = im + np.abs(np.min(im)) + 1
im[mask] = 0
im_new = relabel_sequential(im)[0]
if mode == 'clipped':
mask = im <= 0
im[mask] = 0
im_new = relabel_sequential(im)[0]
if mode == 'symmetric':
mask = im < 0
im_neg = relabel_sequential(-im*mask)[0]
mask = im >= 0
im_pos = relabel_sequential(im*mask)[0]
im_new = im_pos - im_neg
return im_new
def get_border(shape, thickness=1, mode='edges'):
r"""
Create an array with corners, edges or faces labelled as ``True``.
This can be used as mask to manipulate values laying on the perimeter of
an image.
Parameters
----------
shape : array_like
The shape of the array to return. Can be either 2D or 3D.
thickness : scalar (default is 1)
The number of pixels/voxels to place along perimeter.
mode : string
The type of border to create. Options are 'faces', 'edges' (default)
and 'corners'. In 2D 'faces' and 'edges' give the same result.
Returns
-------
image : ndarray
An ndarray of specified shape with ``True`` values at the perimeter
and ``False`` elsewhere.
Notes
-----
The indices of the ``True`` values can be found using ``numpy.where``.
Examples
--------
>>> import porespy as ps
>>> import scipy as sp
>>> mask = ps.tools.get_border(shape=[3, 3], mode='corners')
>>> print(mask)
[[ True False True]
[False False False]
[ True False True]]
>>> mask = ps.tools.get_border(shape=[3, 3], mode='edges')
>>> print(mask)
[[ True True True]
[ True False True]
[ True True True]]
`Click here
<https://porespy.org/examples/tools/howtos/get_border.html>`_
to view online example.
"""
from porespy.generators import borders
return borders(shape=shape, thickness=thickness, mode=mode)
def in_hull(points, hull):
"""
Test if a list of coordinates are inside a given convex hull
Parameters
----------
points : array_like (N x ndims)
The spatial coordinates of the points to check
hull : scipy.spatial.ConvexHull object **OR** array_like
Can be either a convex hull object as returned by
``scipy.spatial.ConvexHull`` or simply the coordinates of the points
that define the convex hull.
Returns
-------
result : 1D-array
A 1D-array Boolean array of length *N* indicating whether or not the
given points in ``points`` lies within the provided ``hull``.
"""
from scipy.spatial import Delaunay, ConvexHull
if isinstance(hull, ConvexHull):
hull = hull.points
hull = Delaunay(hull)
return hull.find_simplex(points) >= 0
def norm_to_uniform(im, scale=None):
r"""
Take an image with normally distributed greyscale values and convert it to
a uniform (i.e. flat) distribution.
Parameters
----------
im : ndarray
The image containing the normally distributed scalar field
scale : [low, high]
A list or array indicating the lower and upper bounds for the new
randomly distributed data. The default is ``None``, which uses the
``max`` and ``min`` of the original image as the the lower and upper
bounds, but another common option might be [0, 1].
Returns
-------
image : ndarray
A copy of ``im`` with uniformly distributed greyscale values spanning
the specified range, if given.
Examples
--------
`Click here
<https://porespy.org/examples/tools/howtos/norm_to_uniform.html>`_
to view online example.
"""
if scale is None:
scale = [im.min(), im.max()]
im = (im - np.mean(im)) / np.std(im)
im = 1 / 2 * sp.special.erfc(-im / np.sqrt(2))
im = (im - im.min()) / (im.max() - im.min())
im = im * (scale[1] - scale[0]) + scale[0]
return im
def _functions_to_table(mod, colwidth=[27, 48]):
r"""
Given a module of functions, returns a ReST formatted text string that
outputs a table when printed.
Parameters
----------
mod : module
The module containing the functions to be included in the table, such
as 'porespy.filters'.
colwidths : list of ints
The width of the first and second columns. Note that because of the
vertical lines separating columns and define the edges of the table,
the total table width will be 3 characters wider than the total sum
of the specified column widths.
"""
temp = mod.__dir__()
funcs = [i for i in temp if not i[0].startswith('_')]
funcs.sort()
row = '+' + '-' * colwidth[0] + '+' + '-' * colwidth[1] + '+'
fmt = '{0:1s} {1:' + str(colwidth[0] - 2) + 's} {2:1s} {3:' \
+ str(colwidth[1] - 2) + 's} {4:1s}'
lines = []
lines.append(row)
lines.append(fmt.format('|', 'Method', '|', 'Description', '|'))
lines.append(row.replace('-', '='))
for i, item in enumerate(funcs):
try:
s = getattr(mod, item).__doc__.strip()
end = s.find('\n')
if end > colwidth[1] - 2:
s = s[:colwidth[1] - 5] + '...'
lines.append(fmt.format('|', item, '|', s[:end], '|'))
lines.append(row)
except AttributeError:
pass
s = '\n'.join(lines)
return s
def mesh_region(region: bool, strel=None):
r"""
Creates a tri-mesh of the provided region using the marching cubes
algorithm
Parameters
----------
im : ndarray
A boolean image with ``True`` values indicating the region of interest
strel : ndarray
The structuring element to use when blurring the region. The blur is
perfomed using a simple convolution filter. The point is to create a
greyscale region to allow the marching cubes algorithm some freedom
to conform the mesh to the surface. As the size of ``strel`` increases
the region will become increasingly blurred and inaccurate. The default
is a spherical element with a radius of 1.
Returns
-------
mesh : tuple
A named-tuple containing ``faces``, ``verts``, ``norm``, and ``val``
as returned by ``scikit-image.measure.marching_cubes`` function.
"""
im = region
_check_for_singleton_axes(im)
if strel is None:
if region.ndim == 3:
strel = ball(1)
if region.ndim == 2:
strel = disk(1)
pad_width = np.amax(strel.shape)
if im.ndim == 3:
padded_mask = np.pad(im, pad_width=pad_width, mode='constant')
padded_mask = spim.convolve(padded_mask * 1.0,
weights=strel) / np.sum(strel)
else:
padded_mask = np.reshape(im, (1,) + im.shape)
padded_mask = np.pad(padded_mask, pad_width=pad_width, mode='constant')
verts, faces, norm, val = marching_cubes(padded_mask)
result = Results()
result.verts = verts - pad_width
result.faces = faces
result.norm = norm
result.val = val
return result
def ps_disk(r, smooth=True):
r"""
Creates circular disk structuring element for morphological operations
Parameters
----------
r : float or int
The desired radius of the structuring element
smooth : boolean
Indicates whether the faces of the sphere should have the little
nibs (``True``) or not (``False``, default)
Returns
-------
disk : ndarray
A 2D numpy bool array of the structring element
Examples
--------
`Click here
<https://porespy.org/examples/tools/howtos/ps_disk.html>`_
to view online example.
"""
disk = ps_round(r=r, ndim=2, smooth=smooth)
return disk
def ps_ball(r, smooth=True):
r"""
Creates spherical ball structuring element for morphological operations
Parameters
----------
r : scalar
The desired radius of the structuring element
smooth : boolean
Indicates whether the faces of the sphere should have the little
nibs (``True``) or not (``False``, default)
Returns
-------
ball : ndarray
A 3D numpy array of the structuring element
Examples
--------
`Click here
<https://porespy.org/examples/tools/howtos/ps_ball.html>`_
to view online example.
"""
ball = ps_round(r=r, ndim=3, smooth=smooth)
return ball
def ps_round(r, ndim, smooth=True):
r"""
Creates round structuring element with the given radius and dimensionality
Parameters
----------
r : scalar
The desired radius of the structuring element
ndim : int
The dimensionality of the element, either 2 or 3.
smooth : boolean
Indicates whether the faces of the sphere should have the little
nibs (``True``) or not (``False``, default)
Returns
-------
strel : ndarray
A 3D numpy array of the structuring element
Examples
--------
`Click here
<https://porespy.org/examples/tools/howtos/ps_round.html>`_
to view online example.
"""
rad = int(np.ceil(r))
other = np.ones([2*rad + 1 for i in range(ndim)], dtype=bool)
other[tuple(rad for i in range(ndim))] = False
if smooth:
ball = edt(other) < r
else:
ball = edt(other) <= r
return ball
def ps_rect(w, ndim):
r"""
Creates rectilinear structuring element with the given size and
dimensionality
Parameters
----------
w : scalar
The desired width of the structuring element
ndim : int
The dimensionality of the element, either 2 or 3.
Returns
-------
strel : D-aNrray
A numpy array of the structuring element
Examples
--------
`Click here
<https://porespy.org/examples/tools/howtos/ps_rect.html>`_
to view online example.
"""
if ndim == 2:
from skimage.morphology import square
strel = square(w)
if ndim == 3:
from skimage.morphology import cube
strel = cube(w)
return strel
def overlay(im1, im2, c):
r"""
Overlays ``im2`` onto ``im1``, given voxel coords of center of ``im2``
in ``im1``.
Parameters
----------
im1 : ndarray
Original voxelated image
im2 : ndarray
Template voxelated image
c : array_like
[x, y, z] coordinates in ``im1`` where ``im2`` will be centered
Returns
-------
image : ndarray
A modified version of ``im1``, with ``im2`` overlaid at the specified
location
Examples
--------
`Click here
<https://porespy.org/examples/tools/howtos/overlay.html>`_
to view online example.
"""
shape = im2.shape
for ni in shape:
if ni % 2 == 0:
raise Exception("Structuring element must be odd-voxeled...")
nx, ny, nz = [(ni - 1) // 2 for ni in shape]
cx, cy, cz = c
im1[cx - nx:cx + nx + 1, cy - ny:cy + ny + 1, cz - nz:cz + nz + 1] += im2
return im1
def insert_sphere(im, c, r, v=True, overwrite=True):
r"""
Inserts a sphere of a specified radius into a given image
Parameters
----------
im : array_like
Image into which the sphere should be inserted
c : array_like
The [x, y, z] coordinate indicating the center of the sphere
r : int
The radius of sphere to insert
v : int
The value to put into the sphere voxels. The default is ``True``
which corresponds to inserting spheres into a Boolean image. If
a numerical value is given, ``im`` is converted to the same type as
``v``.
overwrite : boolean
If ``True`` (default) then the sphere overwrites whatever values are
present in ``im``. If ``False`` then the sphere values are only
inserted into locations that are 0 or ``False``.
Returns
-------
image : ndarray
The original image with a sphere inerted at the specified location
Examples
--------
`Click here
<https://porespy.org/examples/tools/howtos/insert_sphere.html>`_
to view online example.
"""
# Convert image to same type os v for eventual insertion
if im.dtype != type(v):
im = im.astype(type(v))
# Parse the arugments
r = int(sp.around(r, decimals=0))
if r == 0:
return im
c = np.array(c, dtype=int)
if c.size != im.ndim:
raise Exception('Coordinates do not match dimensionality of image')
# Define a bounding box around inserted sphere, minding imaage boundaries
bbox = []
[bbox.append(np.clip(c[i] - r, 0, im.shape[i])) for i in range(im.ndim)]
[bbox.append(np.clip(c[i] + r, 0, im.shape[i])) for i in range(im.ndim)]
bbox = np.ravel(bbox)
# Obtain slices into image
s = bbox_to_slices(bbox)
# Generate sphere template within image boundaries
blank = np.ones_like(im[s], dtype=float)
blank[tuple(c - bbox[0:im.ndim])] = 0.0
sph = spim.distance_transform_edt(blank) < r
if overwrite: # Clear voxles under sphere to be zero
temp = im[s] * sph > 0
im[s][temp] = 0
else: # Clear portions of sphere to prevent overwriting
sph *= im[s] == 0
im[s] = im[s] + sph * v
return im
def insert_cylinder(im, xyz0, xyz1, r):
r"""
Inserts a cylinder of given radius onto an image
Parameters
----------
im : array_like
Original voxelated image
xyz0, xyz1 : 3-by-1 array_like
Voxel coordinates of the two end points of the cylinder
r : int
Radius of the cylinder
Returns
-------
im : ndarray
Original voxelated image overlayed with the cylinder
Notes
-----
This function is only implemented for 3D images
Examples
--------
`Click here
<https://porespy.org/examples/tools/howtos/insert_cylinder.html>`_
to view online example.
"""
if im.ndim != 3:
raise Exception('This function is only implemented for 3D images')
# Converting coordinates to numpy array
xyz0, xyz1 = [np.array(xyz).astype(int) for xyz in (xyz0, xyz1)]
r = int(r)
L = np.absolute(xyz0 - xyz1).max() + 1
xyz_line = [np.linspace(xyz0[i], xyz1[i], L).astype(int) for i in range(3)]
for i, c in enumerate(xyz_line):
if c.min() < 0:
raise Exception('Given endpoint coordinates lie outside image')
if c.max() > im.shape[i]:
raise Exception('Given endpoint coordinates lie outside image')
c += r
im = np.pad(im, r)
xyz_min = np.amin(xyz_line, axis=1) - r
xyz_max = np.amax(xyz_line, axis=1) + r
shape_template = xyz_max - xyz_min + 1
template = np.zeros(shape=shape_template)
# Shortcut for orthogonal cylinders
if (xyz0 == xyz1).sum() == 2:
unique_dim = [xyz0[i] != xyz1[i] for i in range(3)].index(True)
shape_template[unique_dim] = 1
template_2D = disk(radius=r).reshape(shape_template)
template = np.repeat(template_2D, repeats=L, axis=unique_dim)
xyz_min[unique_dim] += r
xyz_max[unique_dim] += -r
else:
xyz_line_in_template_coords = [xyz_line[i] - xyz_min[i] for i in range(3)]
template[tuple(xyz_line_in_template_coords)] = 1
template = edt(template == 0) <= r
im[xyz_min[0]: xyz_max[0] + 1,
xyz_min[1]: xyz_max[1] + 1,
xyz_min[2]: xyz_max[2] + 1] += template
im = unpad(im, r)
return im
def extract_regions(regions, labels: list, trim=True):
r"""
Combine given regions into a single boolean mask
Parameters
-----------
regions : ndarray
An image containing an arbitrary number of labeled regions
labels : array_like or scalar
A list of labels indicating which region or regions to extract
trim : bool
If ``True`` then image shape will trimmed to a bounding box around the
given regions.
Returns
-------
im : ndarray
A boolean mask with ``True`` values indicating where the given labels
exist
Examples
--------
`Click here
<https://porespy.org/examples/tools/howtos/extract_regions.html>`_
to view online example.
"""
if type(labels) is int:
labels = [labels]
s = spim.find_objects(regions)
im_new = np.zeros_like(regions)
x_min, y_min, z_min = sp.inf, sp.inf, sp.inf
x_max, y_max, z_max = 0, 0, 0
for i in labels:
im_new[s[i - 1]] = regions[s[i - 1]] == i
x_min, x_max = min(s[i - 1][0].start, x_min), max(s[i - 1][0].stop, x_max)
y_min, y_max = min(s[i - 1][1].start, y_min), max(s[i - 1][1].stop, y_max)
if regions.ndim == 3:
z_min, z_max = min(s[i - 1][2].start, z_min), max(s[i - 1][2].stop, z_max)
if trim:
if regions.ndim == 3:
bbox = bbox_to_slices([x_min, y_min, z_min, x_max, y_max, z_max])
else:
bbox = bbox_to_slices([x_min, y_min, x_max, y_max])
im_new = im_new[bbox]
return im_new
def _check_for_singleton_axes(im): # pragma: no cover
r"""
Checks for whether the input image contains singleton axes and logs
a proper warning in case found.
Parameters
----------
im : ndarray
Input image.
"""
if im.ndim != im.squeeze().ndim:
logger.warning("Input image conains a singleton axis. Reduce"
" dimensionality with np.squeeze(im) to avoid"
" unexpected behavior.")
|
11458303
|
import pytest
import jira.resources
MOCK_URL = "http://customized-jira.com/rest/"
def url_test_case(example_url: str):
return f"{MOCK_URL}{example_url}"
class TestResource:
@pytest.mark.parametrize(
["example_url", "expected_class"],
# fmt: off
[
(url_test_case("api/latest/issue/JRA-1330"), jira.resources.Issue),
(url_test_case("api/latest/project/BULK"), jira.resources.Project),
(url_test_case("api/latest/project/IMG/role/10002"), jira.resources.Role),
(url_test_case("plugin-resource/4.5/json/getMyObject"), jira.resources.UnknownResource),
(url_test_case("group?groupname=bla"), jira.resources.Group),
(url_test_case("user?username=bla"), jira.resources.User), # Jira Server / Data Center
(url_test_case("user?accountId=bla"), jira.resources.User), # Jira Cloud
],
# fmt: on
ids=[
"issue",
"project",
"role",
"unknown_resource",
"group",
"user",
"user_cloud",
],
)
def test_cls_for_resource(self, example_url, expected_class):
"""Test the regex recognizes the right class for a given URL."""
assert jira.resources.cls_for_resource(example_url) == expected_class
|
11458334
|
from .task import Task
from .cpupool import pin, CPUPool, is_runtime_ext_enabled
from .multi_stream import MultiStreamModule
from .runtime_utils import get_core_list_of_node_id
|
11458337
|
self.description = "a package conflicts with itself"
sp1 = pmpkg("pkg1")
sp1.conflicts = ["pkg1"]
self.addpkg2db("sync", sp1);
sp2 = pmpkg("pkg2", "1.0-2")
self.addpkg2db("sync", sp2)
self.args = "-S %s" % " ".join([p.name for p in (sp1, sp2)])
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_EXIST=pkg1")
self.addrule("PKG_EXIST=pkg2")
self.addrule("PKG_VERSION=pkg2|1.0-2")
|
11458342
|
from datetime import datetime, timedelta
from gremlin_python.process.graph_traversal import select, has, unfold
from gremlin_python.process.traversal import P
def get_timerange_condition(g, start_hour=16, end_hour=18, limit=1000):
dates = (
g.E()
.hasLabel("visited")
.limit(limit)
.values("ts")
.fold()
.as_("timestamps")
.project("start", "end")
.by(select("timestamps").unfold().min())
.by(select("timestamps").unfold().max())
).next()
start = dates["start"].replace(hour=start_hour, minute=0, second=0)
end = dates["end"].replace(hour=start_hour, minute=0, second=0)
return [
has(
'ts',
P.between(
start + timedelta(days=days),
start + timedelta(days=days) + timedelta(hours=end_hour - start_hour)
)
)
for days in range((end - start).days)
]
def get_user_device_statistics(g, dt_conditions, limit=10000):
return (
g.E().hasLabel("visited").or_(*dt_conditions)
.limit(limit).outV().fold()
.project("type", "device", "browser")
.by(
unfold().unfold().groupCount().by("type")
)
.by(
unfold().unfold().groupCount().by("device")
)
.by(
unfold().unfold().groupCount().by("browser")
)
)
|
11458343
|
import numpy as np
from fastai.text import SortSampler, SortishSampler
def test_sort_sampler_sorts_all_descending():
bs = 4
n = bs*100
data = 2 * np.arange(n)
samp = list(SortSampler(data, lambda i: data[i]))
# The sample is a permutation of the indices.
assert sorted(samp) == list(range(n))
# And that "permutation" is for descending data order.
assert all(s1 > s2 for s1, s2 in zip(samp, samp[1:]))
def test_sortish_sampler_sorts_each_batch_descending():
bs = 4
n = bs*100
data = 2 * np.arange(n)
samp = list(SortishSampler(data, lambda i: data[i], bs))
# The sample is a permutation of the indices.
assert sorted(samp) == list(range(n))
# And that permutation is kind of reverse sorted.
assert all(
s1 > s2 or (i+1) % bs == 0 # don't check batch boundaries
for i, (s1, s2) in enumerate(zip(samp, samp[1:]))
)
assert samp[0] == max(samp)
|
11458375
|
from torch.utils.data import Dataset
import torch
import numpy as np
from sklearn.utils import check_array
class FastTensorDataLoader:
"""
A DataLoader-like object for a set of tensors that can be much faster than
TensorDataset + DataLoader because dataloader grabs individual indices of
the dataset and calls cat (slow).
Source: https://discuss.pytorch.org/t/dataloader-much-slower-than-manual-batching/27014/6
"""
def __init__(self, *tensors, batch_size=32, shuffle=False):
"""
Initialize a FastTensorDataLoader.
:param *tensors: tensors to store. Must have the same length @ dim 0.
:param batch_size: batch size to load.
:param shuffle: if True, shuffle the data *in-place* whenever an
iterator is created out of this object.
:returns: A FastTensorDataLoader.
"""
assert all(t.shape[0] == tensors[0].shape[0] for t in tensors)
self.tensors = tensors
self.dataset_len = self.tensors[0].shape[0]
self.batch_size = batch_size
self.shuffle = shuffle
# Calculate # batches
n_batches, remainder = divmod(self.dataset_len, self.batch_size)
if remainder > 0:
n_batches += 1
self.n_batches = n_batches
def __iter__(self):
if self.shuffle:
r = torch.randperm(self.dataset_len)
self.tensors = [t[r] for t in self.tensors]
self.i = 0
return self
def __next__(self):
if self.i >= self.dataset_len:
raise StopIteration
batch = tuple(t[self.i:self.i+self.batch_size] for t in self.tensors)
self.i += self.batch_size
return batch
def __len__(self):
return self.n_batches
class PredictDataset(Dataset):
"""
Format for numpy array
Parameters
----------
X : 2D array
The input matrix
"""
def __init__(self, x):
self.x = x
def __len__(self):
return len(self.x)
def __getitem__(self, index):
x = self.x[index]
return x
def create_dataloaders(X_train, y_train, eval_set, batch_size):
"""
Create dataloaders with or without subsampling depending on weights and balanced.
Parameters
----------
X_train : np.ndarray
Training data
y_train : np.array
Mapped Training targets
eval_set : list of tuple
List of eval tuple set (X, y)
batch_size : int
how many samples per batch to load
Returns
-------
train_dataloader, valid_dataloader : torch.DataLoader, torch.DataLoader
Training and validation dataloaders
"""
X_train = torch.from_numpy(X_train).float()
y_train = torch.from_numpy(y_train)
train_dataloader = FastTensorDataLoader(X_train, y_train, batch_size=batch_size, shuffle=True)
valid_dataloaders = []
for X, y in eval_set:
X = torch.from_numpy(X).float()
y = torch.from_numpy(y)
valid_dataloaders.append(FastTensorDataLoader(X, y, batch_size=batch_size, shuffle=False))
return train_dataloader, valid_dataloaders
def validate_eval_set(eval_set, eval_name, X_train, y_train):
"""Check if the shapes of eval_set are compatible with (X_train, y_train).
Parameters
----------
eval_set : list of tuple
List of eval tuple set (X, y).
The last one is used for early stopping
eval_name : list of str
List of eval set names.
X_train : np.ndarray
Train owned products
y_train : np.array
Train targeted products
Returns
-------
eval_names : list of str
Validated list of eval_names.
eval_set : list of tuple
Validated list of eval_set.
"""
eval_name = eval_name or [f"val_{i}" for i in range(len(eval_set))]
assert len(eval_set) == len(
eval_name
), "eval_set and eval_name have not the same length"
if len(eval_set) > 0:
assert all(
len(elem) == 2 for elem in eval_set
), "Each tuple of eval_set need to have two elements"
for name, (X, y) in zip(eval_name, eval_set):
check_array(X)
msg = (
f"Dimension mismatch between X_{name} "
+ f"{X.shape} and X_train {X_train.shape}"
)
assert len(X.shape) == len(X_train.shape), msg
msg = (
f"Dimension mismatch between y_{name} "
+ f"{y.shape} and y_train {y_train.shape}"
)
assert len(y.shape) == len(y_train.shape), msg
msg = (
f"Number of columns is different between X_{name} "
+ f"({X.shape[1]}) and X_train ({X_train.shape[1]})"
)
assert X.shape[1] == X_train.shape[1], msg
if len(y_train.shape) == 2:
msg = (
f"Number of columns is different between y_{name} "
+ f"({y.shape[1]}) and y_train ({y_train.shape[1]})"
)
assert y.shape[1] == y_train.shape[1], msg
msg = (
f"You need the same number of rows between X_{name} "
+ f"({X.shape[0]}) and y_{name} ({y.shape[0]})"
)
assert X.shape[0] == y.shape[0], msg
return eval_name, eval_set
def define_device(device_name):
"""
Define the device to use during training and inference.
If auto it will detect automatically whether to use cuda or cpu
Parameters
----------
device_name : str
Either "auto", "cpu" or "cuda"
Returns
-------
str
Either "cpu" or "cuda"
"""
if device_name == "auto":
if torch.cuda.is_available():
return "cuda"
else:
return "cpu"
elif device_name == "cuda" and not torch.cuda.is_available():
return "cpu"
else:
return device_name
def normalize_reg_label(label, mu, std):
norm_label = ((label - mu) / std).astype(np.float32)
norm_label = norm_label.reshape(-1, 1)
return norm_label
|
11458382
|
from configs import PruneCartpoleConfig as student_config
from configs import CartpoleConfig as dense_config
from model import CartPoleDQNTarget, StudentCartpole
from utils.plot_utils import plot_graph
from utils.logger_utils import get_logger
from Cartpole.evaluate_cartpole import evaluate_cartepole as evaluate
from train import fit_supervised
from Cartpole.accumulate_experience_cartpole import accumulate_experience_cartpole
from prune import iterative_pruning_policy_distilliation
from argparse import ArgumentParser
from utils.tensorflow_utils import calculate_redundancy
from collections import deque
from Cartpole.copy_weights_cartpole import copy_weights
FLAGS = 0
def check_convergence(info):
diff_temp = []
for i in range(len(info) - 1):
diff_temp.append(info[i] - info[i+1])
mean_diff = sum(diff_temp) / len(diff_temp)
if mean_diff < 0.05: # a change of less then 1.0 percent in size counts as a converged model
return True
else:
return False
def main():
# ----------------- Setting initial variables Section -----------------
logger = get_logger(FLAGS.PoPS_dir + "/PoPS_ITERATIVE")
logger.info(" ------------- START: -------------")
logger.info("Setting initial data structures")
accuracy_vs_size = [[], []]
logger.info("Loading models")
teacher = CartPoleDQNTarget(input_size=dense_config.input_size, output_size=dense_config.output_size)
teacher.load_model(path=FLAGS.teacher_path) # load teacher
logger.info("----- evaluating teacher -----")
print("----- evaluating teacher -----")
teacher_score = evaluate(agent=teacher, n_epoch=FLAGS.eval_epochs)
logger.info("----- teacher evaluated with {} ------".format(teacher_score))
print("----- teacher evaluated with {} -----".format(teacher_score))
prune_step_path = FLAGS.PoPS_dir + "/prune_step_"
policy_step_path = FLAGS.PoPS_dir + "/policy_step_"
initial_path = policy_step_path + "0"
logger.info("creating policy step 0 model, which is identical in size to the original model")
copy_weights(output_path=initial_path, teacher_path=FLAGS.teacher_path) # inorder to create the initial model
compressed_agent = StudentCartpole(input_size=student_config.input_size,
output_size=student_config.output_size,
model_path=initial_path,
tau=student_config.tau,
pruning_freq=student_config.pruning_freq,
sparsity_end=student_config.sparsity_end,
target_sparsity=student_config.target_sparsity)
compressed_agent.load_model()
initial_size = compressed_agent.get_number_of_nnz_params()
accuracy_vs_size[0].append(initial_size)
accuracy_vs_size[1].append(teacher_score)
initial_number_of_params_at_each_layer = compressed_agent.get_number_of_nnz_params_per_layer()
initial_number_of_nnz = sum(initial_number_of_params_at_each_layer)
converge = False
iteration = 0
convergence_information = deque(maxlen=2)
convergence_information.append(100)
precent = 100
arch_type = 0
last_measure = initial_size
while not converge:
iteration += 1
print("----- Pruning Step {} -----".format(iteration))
logger.info(" ----- Pruning Step {} -----".format(iteration))
path_to_save_pruned_model = prune_step_path + str(iteration)
# ----------------- Pruning Section -----------------
if arch_type == 2:
arch_type = 3 # special arch_type for prune-oriented learning rate
sparsity_vs_accuracy = iterative_pruning_policy_distilliation(logger=logger, agent=compressed_agent,
target_agent=teacher,
iterations=FLAGS.iterations,
config=student_config,
best_path=path_to_save_pruned_model,
arch_type=arch_type,
lower_bound=student_config.LOWER_BOUND,
accumulate_experience_fn=accumulate_experience_cartpole,
evaluate_fn=evaluate,
objective_score=student_config.OBJECTIVE_SCORE)
plot_graph(data=sparsity_vs_accuracy, name=FLAGS.PoPS_dir + "/initial size {}%, Pruning_step number {}"
.format(precent, iteration), figure_num=iteration)
# loading model which has reasonable score with the highest sparsity
compressed_agent.load_model(path_to_save_pruned_model)
# ----------------- Measuring redundancy Section -----------------
# the amount of parameters that are not zero at each layer
nnz_params_at_each_layer = compressed_agent.get_number_of_nnz_params_per_layer()
# the amount of parameters that are not zero
nnz_params = sum(nnz_params_at_each_layer)
# redundancy is the parameters we dont need, nnz_params / initial is the params we need the opposite
redundancy = (1 - nnz_params / initial_number_of_nnz) * 100
print("----- Pruning Step {} finished, got {}% redundancy in net params -----"
.format(iteration, redundancy))
logger.info("----- Pruning Step {} finished , got {}% redundancy in net params -----"
.format(iteration, redundancy))
logger.info("----- Pruning Step {} finished with {} NNZ params at each layer"
.format(iteration, nnz_params_at_each_layer))
print(" ----- Evaluating redundancy at each layer Step {}-----".format(iteration))
logger.info(" ----- Evaluating redundancy at each layer Step {} -----".format(iteration))
redundancy_at_each_layer = calculate_redundancy(initial_nnz_params=initial_number_of_params_at_each_layer,
next_nnz_params=nnz_params_at_each_layer)
logger.info("----- redundancy for each layer at step {} is {} -----".format(iteration, redundancy_at_each_layer))
if iteration == 1:
redundancy_at_each_layer = [0.83984375, 0.8346405029296875, 0.83795166015625, 0.83984375]
# ----------------- Policy distillation Section -----------------
print(" ----- Creating Model with size according to the redundancy at each layer ----- ")
logger.info("----- Creating Model with size according to the redundancy at each layer -----")
policy_distilled_path = policy_step_path + str(iteration)
# creating the compact model where every layer size is determined by the redundancy measure
compressed_agent = StudentCartpole(input_size=student_config.input_size,
output_size=student_config.output_size,
model_path=policy_distilled_path,
tau=student_config.tau,
redundancy=redundancy_at_each_layer,
pruning_freq=student_config.pruning_freq,
sparsity_end=student_config.sparsity_end,
target_sparsity=student_config.target_sparsity,
last_measure=last_measure)
nnz_params_at_each_layer = compressed_agent.get_number_of_nnz_params_per_layer()
logger.info("----- Step {} ,Created Model with {} NNZ params at each layer"
.format(iteration, nnz_params_at_each_layer))
iterative_size = compressed_agent.get_number_of_nnz_params()
last_measure = iterative_size
precent = (iterative_size / initial_size) * 100
convergence_information.append(precent)
print(" ----- Step {}, Created Model with size {} which is {}% from original size ----- "
.format(iteration, iterative_size, precent))
logger.info("----- Created Model with size {} which is {}% from original size -----"
.format(iterative_size, precent))
# scheduling the right learning rate for the size of the model
if precent > 40:
arch_type = 0
elif 10 <= precent <= 40:
arch_type = 1
else:
arch_type = 2
print(" ----- policy distilling Step {} ----- ".format(iteration))
logger.info("----- policy distilling Step {} -----".format(iteration))
fit_supervised(logger=logger, arch_type=arch_type, student=compressed_agent, teacher=teacher,
n_epochs=FLAGS.n_epoch, evaluate_fn=evaluate,
accumulate_experience_fn=accumulate_experience_cartpole,
lower_score_bound=student_config.LOWER_BOUND, objective_score=student_config.OBJECTIVE_SCORE)
policy_distilled_score = evaluate(agent=compressed_agent, n_epoch=FLAGS.eval_epochs)
compressed_agent.reset_global_step()
print(" ----- policy distilling Step {} finished with score {} ----- "
.format(iteration, policy_distilled_score))
logger.info("----- policy distilling Step {} finished with score {} -----"
.format(iteration, policy_distilled_score))
# checking convergence
converge = check_convergence(convergence_information)
# for debugging purposes
accuracy_vs_size[0].append(iterative_size)
accuracy_vs_size[1].append(policy_distilled_score)
plot_graph(data=accuracy_vs_size, name=FLAGS.PoPS_dir + "/accuracy_vs_size", figure_num=iteration + 1, xaxis='NNZ params', yaxis='Accuracy')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
'--teacher_path',
type=str,
default=dense_config.ready_path,
help=' path where to load initial model.')
parser.add_argument(
'--PoPS_dir',
type=str,
default=student_config.iterative_PoPS,
help='Results Directory.')
parser.add_argument(
'--model_path',
type=str,
default=dense_config.model_path,
help=' Directory where to load initial model.')
parser.add_argument(
'--n_epoch',
type=int,
default=student_config.n_epoch,
help='number of epoches to do policy distillation')
parser.add_argument(
'--iterations',
type=int,
default=student_config.n_epoch,
help='number of iteration to do pruning')
parser.add_argument(
'--batch_size',
type=int,
default=dense_config.n_epoch,
help='number of epoches')
parser.add_argument(
'--eval_epochs',
type=int,
default=100,
help='number of epoches to evaluate the models during the process')
FLAGS, unparsed = parser.parse_known_args()
main()
|
11458440
|
import time, pytest
import numpy as np
from scipy import linalg
from scipy.sparse import linalg as sparselinalg
def test_eigs():
N = 1000
k = 5
ncv = 200
A = np.random.randn(N, N)
print("\n----- test_eigs -----")
print("----- Dimension of matrix A: %d -----" % N)
print("scipy.sparse.linalg.eigs time: ")
start = time.time()
righteigvals, righteigvectors = sparselinalg.eigs(A, k, ncv=ncv)
end = time.time()
print("right eigens: ", end - start)
sortidx = (righteigvals.conj() * righteigvals).real.argsort()
righteigvals = righteigvals[sortidx]
righteigvectors = righteigvectors[:, sortidx]
start = time.time()
lefteigvals, lefteigvectors = sparselinalg.eigs(A.T, k, ncv=ncv)
end = time.time()
sortidx = (lefteigvals.conj() * lefteigvals).real.argsort()
lefteigvals = lefteigvals[sortidx]
lefteigvectors = lefteigvectors[:, sortidx]
print("left eigens: ", end - start)
assert np.allclose(lefteigvals, righteigvals)
orthogonals = lefteigvectors.T.dot(righteigvectors)
mask = np.ones((k, k)) - np.eye(k)
assert np.allclose(orthogonals * mask, np.zeros((k, k)))
assert np.allclose((righteigvectors.conj() * righteigvectors).sum(axis=0),
np.ones(k))
assert np.allclose((lefteigvectors.conj() * lefteigvectors).sum(axis=0),
np.ones(k))
def test_Gong():
D = 20
d = 2
#A = np.random.randn(d, D, D) + 1.j * np.random.randn(d, D, D)
A = np.random.randn(d, D, D)
Gong = np.einsum("kij,kmn->imjn", A, A.conj()).reshape(D**2, D**2)
righteigvals, righteigvectors = sparselinalg.eigs(Gong, k=5, ncv=100)
print("\n", righteigvals)
print(righteigvals.conj() * righteigvals)
maxidx = (righteigvals.conj() * righteigvals).real.argmax()
print("maxidx =", maxidx)
#print(righteigvectors[:, maxidx])
assert np.allclose(righteigvals[maxidx].imag, 0.0)
assert np.allclose(righteigvectors[:, maxidx].imag, np.zeros(D**2))
lefteigvals, lefteigvectors = sparselinalg.eigs(Gong.T, k=5, ncv=100)
print("\n", lefteigvals)
print(lefteigvals.conj() * lefteigvals)
maxidx = (lefteigvals.conj() * lefteigvals).real.argmax()
print("maxidx =", maxidx)
#print(lefteigvectors[:, maxidx])
assert np.allclose(lefteigvals[maxidx].imag, 0.0)
assert np.allclose(lefteigvectors[:, maxidx].imag, np.zeros(D**2))
def test_eigsh():
N = 1000
k = 5
ncv = 100
A = np.random.randn(N, N)
A = 0.5 * (A + A.T)
print("\n----- test_eigsh -----")
print("----- Dimension of real symmetric matrix A: %d -----" % N)
start = time.time()
eigvals_full, eigvectors_full = linalg.eigh(A)
end = time.time()
print("scipy.linalg.eigh time: ", end - start)
start = time.time()
eigvals, eigvectors = sparselinalg.eigsh(A, k, which="SA", ncv=ncv)
end = time.time()
print("scipy.sparse.linalg.eigsh time: ", end - start)
assert np.allclose(eigvals, eigvals_full[:k])
for i in range(k):
assert np.allclose(eigvectors[:, i], eigvectors_full[:, i]) or \
np.allclose(eigvectors[:, i], -eigvectors_full[:, i])
@pytest.mark.skip(reason="Incorrect behavior of the scipy sparse linear system "
"solvers when the matrix dimension is large.")
def test_linsys_fullrank():
#from krypy.linsys import LinearSystem, Gmres
N = 20
A = np.random.randn(N, N)
b = np.random.randn(N)
print("\n----- test_linsys_fullrank -----")
print("----- Dimension of matrix A: %d -----" % N)
#linear_system = LinearSystem(A, b)
#solver = Gmres(linear_system)
#x_krypy = solver.xk[:, 0]
#print("Krypy gmres time: ", end - start)
#assert np.allclose(A.dot(x_krypy), b)
start = time.time()
x, code = sparselinalg.gmres(A, b, tol=1e-12, atol=1e-12)
end = time.time()
print("code: ", code)
print("scipy gmres time: ", end - start)
assert np.allclose(A.dot(x), b)
def test_linsys_lowrank():
N = 1000
A = np.random.randn(N, N)
ncv = 100
print("\n----- test_linsys_lowrank -----")
print("----- Dimension of matrix A: %d -----" % N)
righteigval, righteigvector = sparselinalg.eigs(A, k=1, ncv=ncv, which="LM")
lefteigval, lefteigvector = sparselinalg.eigs(A.T, k=1, ncv=ncv, which="LM")
assert np.allclose(righteigval, lefteigval)
eigval = righteigval
righteigvector = righteigvector[:, 0]
lefteigvector = lefteigvector[:, 0]
lefteigvector /= np.dot(lefteigvector, righteigvector)
print("scipy gmres time: ")
Aprime = A - eigval * np.eye(N)
b = np.random.randn(N)
b = b - righteigvector * np.dot(lefteigvector, b)
start = time.time()
x, code = sparselinalg.gmres(Aprime, b, tol=1e-12, atol=1e-12)
end = time.time()
print(end - start)
assert np.allclose(Aprime.dot(x), b)
assert np.allclose(np.dot(lefteigvector, x), 0.0)
ATprime = A.T - eigval * np.eye(N)
b = np.random.randn(N)
b = b - lefteigvector * np.dot(righteigvector, b)
start = time.time()
x, code = sparselinalg.gmres(ATprime, b, tol=1e-12, atol=1e-12)
end = time.time()
print(end - start)
assert np.allclose(ATprime.dot(x), b)
assert np.allclose(np.dot(righteigvector, x), 0.0)
|
11458462
|
import __builtin__
import compileall
import os
import py_compile
import shutil
import subprocess
import sys
import textwrap
import unittest
from test.test_support import TESTFN, is_jython, run_unittest, temp_cwd
class TestMtime(unittest.TestCase):
def test_mtime_compile(self):
"""
This test exercises the mtime annotation that is now stored in Jython
compiled files. CPython already stores an mtime in its pyc files. To
exercise this functionality, I am writing a py file, compiling it,
setting the os modified time to a very low value on the compiled file,
then changing the py file after a small sleep. On CPython, this would
still cause a re-compile. In Jython before this fix it would not.
See http://bugs.jython.org/issue1024
"""
import time
os.mkdir(TESTFN)
try:
mod = "mod1"
source_path = os.path.join(TESTFN, "%s.py" % mod)
if is_jython:
compiled_path = os.path.join(TESTFN, "%s$py.class" % mod)
else:
compiled_path = os.path.join(TESTFN, "%s.pyc" % mod)
fp = open(source_path, "w")
fp.write("def foo(): return 'first'\n")
fp.close()
py_compile.compile(source_path)
#sleep so that the internal mtime is older for the next source write.
time.sleep(1)
fp = open(source_path, "w")
fp.write("def foo(): return 'second'\n")
fp.close()
# make sure the source file's mtime is artificially younger than
# the compiled path's mtime.
os.utime(source_path, (1,1))
sys.path.append(TESTFN)
import mod1
self.assertEquals(mod1.foo(), 'second')
finally:
shutil.rmtree(TESTFN)
class TestCompileall(unittest.TestCase):
def write_code(self, package, name, code):
with open(os.path.join(package, name), "w") as f:
f.write(textwrap.dedent(code))
def test_compileall(self):
with temp_cwd():
PACKAGE = os.path.realpath("./greetings")
PYC_GREETER = os.path.join(PACKAGE, "greeter.pyc")
PYCLASS_GREETER = os.path.join(PACKAGE, "greeter$py.class")
PYCLASS_TEST = os.path.join(PACKAGE, "test$py.class")
os.mkdir(PACKAGE)
self.write_code(
PACKAGE, "greeter.py",
"""
def greet():
print 'Hello world!'
""")
self.write_code(
PACKAGE, "test.py",
"""
from greeter import greet
greet()
""")
# pretend we have a Python bytecode compiler by touching this file
open(PYC_GREETER, "a").close()
compileall.compile_dir(PACKAGE, quiet=True)
self.assertTrue(os.path.exists(PYC_GREETER)) # still exists
self.assertTrue(os.path.exists(PYCLASS_TEST)) # along with these new compiled files
self.assertTrue(os.path.exists(PYCLASS_GREETER))
# verify we can work with just compiled files
os.unlink(os.path.join(PACKAGE, "greeter.py"))
self.assertEqual(
subprocess.check_output([sys.executable, os.path.join(PACKAGE, "test.py")]).rstrip(),
"Hello world!")
def test_main():
run_unittest(TestMtime, TestCompileall)
if __name__ == "__main__":
test_main()
|
11458479
|
import uuid
import aioredis # type: ignore
import pytest
from pytest_mock import MockFixture
from requests import Response # type: ignore
from starlette.testclient import TestClient
from example.context import app
from fast_tools.context import ContextBaseModel, HeaderHelper
from .conftest import AnyStringWith # type: ignore
class TestContext:
def test_contest(self, mocker: MockFixture) -> None:
with TestClient(app) as client:
response: Response = client.get("/")
resp_dict: dict = response.json()
message_dict: dict = resp_dict["message"]
for key in ["request_id", "ip", "user_agent"]:
assert key in message_dict
with pytest.raises(RuntimeError) as e:
class NewContextModel(ContextBaseModel):
request_id: str = HeaderHelper.i("X-Request-Id", default_func=lambda request: str(uuid.uuid4()))
assert e.value.args[0] == "key:HeaderHelper:X-Request-Id already exists"
|
11458515
|
import logging
from fastapi import BackgroundTasks, FastAPI
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub_asyncio import PubNubAsyncio
import pubnub as pn
app = FastAPI()
pnconfig = PNConfiguration()
pnconfig.publish_key = "demo"
pnconfig.subscribe_key = "demo"
pnconfig.uuid = "UUID-PUB"
CHANNEL = "the_guide"
pubnub = PubNubAsyncio(pnconfig)
pn.set_stream_logger('pubnub', logging.DEBUG)
async def write_notification(email: str, message=""):
with open("/tmp/log.txt", mode="w") as email_file:
content = f"notification for {email}: {message}"
email_file.write(content)
await pubnub.publish().channel(CHANNEL).message(email).future()
@app.get("/send-notification/{email}")
async def send_notification(email: str, background_tasks: BackgroundTasks):
background_tasks.add_task(write_notification, email, message="some notification")
return {"message": "Notification sent in the background"}
@app.on_event("shutdown")
async def stop_pubnub():
print("Closing Application")
await pubnub.stop()
|
11458527
|
import os
import pickle
import h5py
import torch
import torch_em
import torch_em.shallow2deep as shallow2deep
from torch_em.shallow2deep.prepare_shallow2deep import _get_filters, _apply_filters
from torch_em.util.util import get_trainer
from tqdm import trange
TEST_OUT = "./test_data"
def require_rf(path):
rf_path = os.path.join(TEST_OUT, "rf_0.pkl")
if os.path.exists(rf_path):
return rf_path
raw_trafo = torch_em.transform.raw.normalize
label_trafo = shallow2deep.BoundaryTransform(ndim=2)
shallow2deep.prepare_shallow2deep(path, "volumes/raw", path, "volumes/labels/neuron_ids",
patch_shape_min=(1, 1000, 1000), patch_shape_max=(1, 1024, 1024), n_forests=1,
n_threads=1, output_folder=TEST_OUT, raw_transform=raw_trafo,
label_transform=label_trafo, is_seg_dataset=True, ndim=2)
return rf_path
def _predict_rf(path, rf_path):
out_path = os.path.join(TEST_OUT, "data.h5")
with h5py.File(out_path, "a") as f:
if "rf_pred" in f:
return out_path
print("Run prediction with rf...")
filters_and_sigmas = _get_filters(ndim=2, filters_and_sigmas=None)
with h5py.File(path, "r") as f:
raw = f["volumes/raw"][:]
with open(rf_path, "rb") as f:
rf = pickle.load(f)
with h5py.File(out_path, "a") as f:
ds_out = f.create_dataset("rf_pred", shape=raw.shape, dtype="float32", chunks=(1, 512, 512))
for z in trange(raw.shape[0], desc="Predict rf"):
inp = raw[z].astype("float") / raw[z].max()
feats = _apply_filters(inp, filters_and_sigmas)
pred = rf.predict_proba(feats)[:, 1].reshape(inp.shape)
ds_out[z] = pred
return out_path
def _predict_enhancer(path):
with h5py.File(path, "r") as f:
if "enhancer_pred" in f:
return
with torch.no_grad():
model = get_trainer("./checkpoints/isbi2d").model
model.eval()
model.to("cpu")
with h5py.File(path, "a") as f:
assert "rf_pred" in f
ds_rf = f["rf_pred"]
ds_out = f.require_dataset("enhancer_pred", shape=ds_rf.shape, dtype="float32", chunks=(1, 512, 512))
for z in trange(ds_rf.shape[0], desc="Predict enhancer"):
inp = ds_rf[z][:1024, :1024]
inp = torch.from_numpy(inp[None, None])
pred = model(inp)
ds_out[z, :1024, :1024] = pred
def predict_s2d(path, rf_path):
test_path = _predict_rf(path, rf_path)
_predict_enhancer(test_path)
return test_path
def check_prediction(path, test_path):
with h5py.File(path, "r") as f:
raw = f["volumes/raw"][:]
with h5py.File(test_path, "r") as f:
rf = f["rf_pred"][:]
enhancer = f["enhancer_pred"][:]
import napari
v = napari.Viewer()
v.add_image(raw)
v.add_image(rf)
v.add_image(enhancer)
napari.run()
def main():
os.makedirs(TEST_OUT, exist_ok=True)
# path = "/scratch/pape/cremi/sampleA.h5"
path = "/home/pape/Work/data/cremi/sample_A_20160501.hdf"
rf_path = require_rf(path)
test_path = predict_s2d(path, rf_path)
check_prediction(path, test_path)
if __name__ == "__main__":
main()
|
11458534
|
import logging
from pgdrive.component.algorithm.BIG import BIG
from pgdrive.component.road.road_network import RoadNetwork
from pgdrive.tests.vis_block.vis_block_base import TestBlock
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
test = TestBlock(True)
global_network = RoadNetwork()
big = BIG(2, 5, global_network, test.render, test.world, 1010)
# Since some change to generate function, specify the block num to the big
big.block_num = len("CrTRXOS")
big._block_sequence = "CrTRXOS"
test.vis_big(big)
# big.generate(BigGenerateMethod.BLOCK_NUM, 10)
test.run()
|
11458549
|
import numpy as np
import math
import torch
from torch import nn
from torch.nn import functional as F
from models.utils import loss_functions as lf, modules
from models.conv.nets import ConvLayers,DeconvLayers
from models.fc.nets import MLP, MLP_gates
from models.fc.layers import fc_layer,fc_layer_split, fc_layer_fixed_gates
from utils import get_data_loader
from models.cl.continual_learner import ContinualLearner
class AutoEncoder(ContinualLearner):
"""Class for variational auto-encoder (VAE) models with classifier added to top of encoder."""
def __init__(self, image_size, image_channels, classes,
# -conv-layers
conv_type="standard", depth=0, start_channels=64, reducing_layers=3, conv_bn=True, conv_nl="relu",
num_blocks=2, global_pooling=False, no_fnl=True, convE=None, conv_gated=False,
# -fc-layers
fc_layers=3, fc_units=1000, h_dim=400, fc_drop=0, fc_bn=False, fc_nl="relu", excit_buffer=False,
fc_gated=False,
# -prior
z_dim=20, prior="standard", n_modes=1, per_class=True,
# -decoder
recon_loss='BCE', network_output="sigmoid", deconv_type="standard",
dg_gates=False, dg_prop=0., device='cuda',
# -classifer
classifier=True, classify_opt="beforeZ", lamda_pl=1., neg_samples="all-so-far"):
# Set configurations for setting up the model
super().__init__()
self.label = "VAE_classifier"
self.image_size = image_size
self.image_channels = image_channels
self.fc_layers = fc_layers
self.z_dim = z_dim
self.h_dim = h_dim
self.fc_units = fc_units
self.fc_drop = fc_drop
self.depth = depth if convE is None else convE.depth
# -type of loss to be used for reconstruction
self.recon_loss = recon_loss # options: BCE|MSE
self.network_output = network_output
# Classifier
self.classes = classes
self.classify_opt = classify_opt
self.neg_samples = neg_samples
self.lamda_pl = lamda_pl # weight of classification-loss
# Settings for class-specific gates in fully-connected hidden layers of decoder
self.dg_prop = dg_prop
self.dg_gates = dg_gates if dg_prop>0. else False
self.gate_size = classes if self.dg_gates else 0
# Optimizer (needs to be set before training starts))
self.optimizer = None
self.optim_list = []
# Prior-related parameters (for "vamp-prior" / "GMM")
self.prior = prior
self.per_class = per_class
self.n_modes = n_modes*classes if self.per_class else n_modes
self.modes_per_class = n_modes if self.per_class else None
# Check whether there is at least 1 fc-layer
if fc_layers<1:
raise ValueError("VAE cannot have 0 fully-connected layers!")
######------SPECIFY MODEL------######
##>----Encoder (= q[z|x])----<##
self.convE = ConvLayers(conv_type=conv_type, block_type="basic", num_blocks=num_blocks,
image_channels=image_channels, depth=self.depth, start_channels=start_channels,
reducing_layers=reducing_layers, batch_norm=conv_bn, nl=conv_nl,
output="none" if no_fnl else "normal", global_pooling=global_pooling,
gated=conv_gated) if (convE is None) else convE
self.flatten = modules.Flatten()
#------------------------------calculate input/output-sizes--------------------------------#
self.conv_out_units = self.convE.out_units(image_size)
self.conv_out_size = self.convE.out_size(image_size)
self.conv_out_channels = self.convE.out_channels
if fc_layers<2:
self.fc_layer_sizes = [self.conv_out_units] #--> this results in self.fcE = modules.Identity()
elif fc_layers==2:
self.fc_layer_sizes = [self.conv_out_units, h_dim]
else:
self.fc_layer_sizes = [self.conv_out_units]+[int(x) for x in np.linspace(fc_units, h_dim, num=fc_layers-1)]
real_h_dim = h_dim if fc_layers>1 else self.conv_out_units
#------------------------------------------------------------------------------------------#
self.fcE = MLP(size_per_layer=self.fc_layer_sizes, drop=fc_drop, batch_norm=fc_bn, nl=fc_nl,
excit_buffer=excit_buffer, gated=fc_gated)
# to z
self.toZ = fc_layer_split(real_h_dim, z_dim, nl_mean='none', nl_logvar='none')#, drop=fc_drop)
##>----Classifier----<##
if classifier:
self.units_before_classifier = real_h_dim if self.classify_opt=='beforeZ' else z_dim
self.classifier = fc_layer(self.units_before_classifier, classes, excit_buffer=True, nl='none')
##>----Decoder (= p[x|z])----<##
out_nl = True if fc_layers > 1 else (True if (self.depth > 0 and not no_fnl) else False)
real_h_dim_down = h_dim if fc_layers > 1 else self.convE.out_units(image_size, ignore_gp=True)
if self.dg_gates:
self.fromZ = fc_layer_fixed_gates(
z_dim, real_h_dim_down, batch_norm=(out_nl and fc_bn), nl=fc_nl if out_nl else "none",
gate_size=self.gate_size, gating_prop=dg_prop, device=device
)
else:
self.fromZ = fc_layer(z_dim, real_h_dim_down, batch_norm=(out_nl and fc_bn), nl=fc_nl if out_nl else "none")
fc_layer_sizes_down = self.fc_layer_sizes
fc_layer_sizes_down[0] = self.convE.out_units(image_size, ignore_gp=True)
# -> if 'gp' is used in forward pass, size of first/final hidden layer differs between forward and backward pass
if self.dg_gates:
self.fcD = MLP_gates(
size_per_layer=[x for x in reversed(fc_layer_sizes_down)], drop=fc_drop, batch_norm=fc_bn, nl=fc_nl,
gate_size=self.gate_size, gating_prop=dg_prop, device=device,
output=self.network_output if self.depth==0 else 'normal',
)
else:
self.fcD = MLP(
size_per_layer=[x for x in reversed(fc_layer_sizes_down)], drop=fc_drop, batch_norm=fc_bn, nl=fc_nl,
gated=fc_gated, output=self.network_output if self.depth==0 else 'normal',
)
# to image-shape
self.to_image = modules.Reshape(image_channels=self.convE.out_channels if self.depth>0 else image_channels)
# through deconv-layers
self.convD = DeconvLayers(
image_channels=image_channels, final_channels=start_channels, depth=self.depth,
reducing_layers=reducing_layers, batch_norm=conv_bn, nl=conv_nl, gated=conv_gated,
output=self.network_output, deconv_type=deconv_type,
)
##>----Prior----<##
# -if using the GMM-prior, add its parameters
if self.prior=="GMM":
# -create
self.z_class_means = nn.Parameter(torch.Tensor(self.n_modes, self.z_dim))
self.z_class_logvars = nn.Parameter(torch.Tensor(self.n_modes, self.z_dim))
# -initialize
self.z_class_means.data.normal_()
self.z_class_logvars.data.normal_()
# Flags whether parts of the network are frozen (so they can be set to evaluation mode during training)
self.convE.frozen = False
self.fcE.frozen = False
##------ NAMES --------##
def get_name(self):
convE_label = "{}_".format(self.convE.name) if self.depth>0 else ""
fcE_label = "{}_".format(self.fcE.name) if self.fc_layers>1 else "{}{}_".format("h" if self.depth>0 else "i",
self.conv_out_units)
z_label = "z{}{}".format(self.z_dim, "" if self.prior=="standard" else "-{}{}{}".format(
self.prior, self.n_modes, "pc" if self.per_class else ""
))
class_label = "_c{}{}".format(
self.classes, "" if self.classify_opt=="beforeZ" else self.classify_opt
) if hasattr(self, "classifier") else ""
decoder_label = "_cg{}".format(self.dg_prop) if self.dg_gates else ""
return "{}={}{}{}{}{}".format(self.label, convE_label, fcE_label, z_label, class_label, decoder_label)
@property
def name(self):
return self.get_name()
##------ UTILITIES --------##
def _device(self):
return next(self.parameters()).device
def _is_on_cuda(self):
return next(self.parameters()).is_cuda
##------ LAYERS --------##
def list_init_layers(self):
'''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
list = []
list += self.convE.list_init_layers()
list += self.fcE.list_init_layers()
if hasattr(self, "classifier"):
list += self.classifier.list_init_layers()
list += self.toZ.list_init_layers()
list += self.fromZ.list_init_layers()
list += self.fcD.list_init_layers()
list += self.convD.list_init_layers()
return list
def layer_info(self):
'''Return list with shape of all hidden layers.'''
# create list with hidden convolutional layers
layer_list = self.convE.layer_info(image_size=self.image_size)
# add output of final convolutional layer (if there was at least one conv-layer and there's fc-layers after)
if (self.fc_layers>0 and self.depth>0):
layer_list.append([self.conv_out_channels, self.conv_out_size, self.conv_out_size])
# add layers of the MLP
if self.fc_layers>1:
for layer_id in range(1, self.fc_layers):
layer_list.append([self.fc_layer_sizes[layer_id]])
return layer_list
##------ FORWARD FUNCTIONS --------##
def encode(self, x):
'''Pass input through feed-forward connections, to get [z_mean], [z_logvar] and [hE].'''
# Forward-pass through conv-layers
image_features = self.flatten(self.convE(x))
# Forward-pass through fc-layers
hE = self.fcE(image_features)
# Get parameters for reparametrization
(z_mean, z_logvar) = self.toZ(hE)
return z_mean, z_logvar, hE
def classify(self, x, reparameterize=True, **kwargs):
'''For input [x] (image or extracted "internal" image features), return all predicted "scores"/"logits".'''
if hasattr(self, "classifier"):
image_features = self.flatten(self.convE(x))
hE = self.fcE(image_features)
if self.classify_opt=="beforeZ":
return self.classifier(hE)
else:
(mu, logvar) = self.toZ(hE)
z = mu if (self.classify_opt=="fromZ" or (not reparameterize)) else self.reparameterize(mu, logvar)
return self.classifier(z)
else:
return None
def reparameterize(self, mu, logvar):
'''Perform "reparametrization trick" to make these stochastic variables differentiable.'''
std = logvar.mul(0.5).exp_()
eps = std.new(std.size()).normal_()#.requires_grad_()
return eps.mul(std).add_(mu)
def decode(self, z, gate_input=None):
'''Decode latent variable activations.
INPUT: - [z] <2D-tensor>; latent variables to be decoded
- [gate_input] <1D-tensor> or <np.ndarray>; for each batch-element in [x] its class-/taskID ---OR---
<2D-tensor>; for each batch-element in [x] a probability for every class-/task-ID
OUTPUT: - [image_recon] <4D-tensor>'''
# -if needed, convert [gate_input] to one-hot vector
if self.dg_gates and (gate_input is not None) and (type(gate_input)==np.ndarray or gate_input.dim()<2):
gate_input = lf.to_one_hot(gate_input, classes=self.gate_size, device=self._device())
# -put inputs through decoder
hD = self.fromZ(z, gate_input=gate_input) if self.dg_gates else self.fromZ(z)
image_features = self.fcD(hD, gate_input=gate_input) if self.dg_gates else self.fcD(hD)
image_recon = self.convD(self.to_image(image_features))
return image_recon
def forward(self, x, gate_input=None, full=False, reparameterize=True, **kwargs):
'''Forward function to propagate [x] through the encoder, reparametrization and decoder.
Input: - [x] <4D-tensor> of shape [batch_size]x[channels]x[image_size]x[image_size]
- [gate_input] <1D-tensor> or <np.ndarray>; for each batch-element in [x] its class-ID (eg, [y]) ---OR---
<2D-tensor>; for each batch-element in [x] a probability for each class-ID (eg, [y_hat])
If [full] is True, output should be a <tuple> consisting of:
- [x_recon] <4D-tensor> reconstructed image (features) in same shape as [x] (or 2 of those: mean & logvar)
- [y_hat] <2D-tensor> with predicted logits for each class
- [mu] <2D-tensor> with either [z] or the estimated mean of [z]
- [logvar] None or <2D-tensor> estimated log(SD^2) of [z]
- [z] <2D-tensor> reparameterized [z] used for reconstruction
If [full] is False, output is simply the predicted logits (i.e., [y_hat]).'''
if full:
# -encode (forward), reparameterize and decode (backward)
mu, logvar, hE = self.encode(x)
z = self.reparameterize(mu, logvar) if reparameterize else mu
gate_input = gate_input if self.dg_gates else None
x_recon = self.decode(z, gate_input=gate_input)
# -classify
if hasattr(self, "classifier"):
if self.classify_opt in ["beforeZ", "fromZ"]:
y_hat = self.classifier(hE) if self.classify_opt=="beforeZ" else self.classifier(mu)
else:
raise NotImplementedError("Classification-option {} not implemented.".format(self.classify_opt))
else:
y_hat = None
# -return
return (x_recon, y_hat, mu, logvar, z)
else:
return self.classify(x, reparameterize=reparameterize) #-> if [full]=False, only forward pass for prediction
def feature_extractor(self, images):
'''Extract "final features" (i.e., after both conv- and fc-layers of forward pass) from provided images.'''
return self.fcE(self.flatten(self.convE(images)))
##------ SAMPLE FUNCTIONS --------##
def sample(self, size, allowed_classes=None, class_probs=None, sample_mode=None, only_x=False, **kwargs):
'''Generate [size] samples from the model. Outputs are tensors (not "requiring grad"), on same device as <self>.
INPUT: - [allowed_classes] <list> of [class_ids] from which to sample
- [class_probs] <list> with for each class the probability it is sampled from it
- [sample_mode] <int> to sample from specific mode of [z]-distr'n, overwrites [allowed_classes]
OUTPUT: - [X] <4D-tensor> generated images / image-features
- [y_used] <ndarray> labels of classes intended to be sampled (using <class_ids>)'''
# set model to eval()-mode
self.eval()
# pick for each sample the prior-mode to be used
if self.prior=="GMM":
if sample_mode is None:
if (allowed_classes is None and class_probs is None) or (not self.per_class):
# -randomly sample modes from all possible modes (and find their corresponding class, if applicable)
sampled_modes = np.random.randint(0, self.n_modes, size)
y_used = np.array(
[int(mode / self.modes_per_class) for mode in sampled_modes]
) if self.per_class else None
else:
if allowed_classes is None:
allowed_classes = [i for i in range(len(class_probs))]
# -sample from modes belonging to [allowed_classes], possibly weighted according to [class_probs]
allowed_modes = [] # -collect all allowed modes
unweighted_probs = [] # -collect unweighted sample-probabilities of those modes
for index, class_id in enumerate(allowed_classes):
allowed_modes += list(range(class_id * self.modes_per_class, (class_id+1)*self.modes_per_class))
if class_probs is not None:
for i in range(self.modes_per_class):
unweighted_probs.append(class_probs[index].item())
mode_probs = None if class_probs is None else [p / sum(unweighted_probs) for p in unweighted_probs]
sampled_modes = np.random.choice(allowed_modes, size, p=mode_probs, replace=True)
y_used = np.array([int(mode / self.modes_per_class) for mode in sampled_modes])
else:
# -always sample from the provided mode
sampled_modes = np.repeat(sample_mode, size)
y_used = np.repeat(int(sample_mode / self.modes_per_class), size) if self.per_class else None
else:
y_used = None
# sample z
if self.prior=="GMM":
prior_means = self.z_class_means
prior_logvars = self.z_class_logvars
# -for each sample to be generated, select the previously sampled mode
z_means = prior_means[sampled_modes, :]
z_logvars = prior_logvars[sampled_modes, :]
with torch.no_grad():
z = self.reparameterize(z_means, z_logvars)
else:
z = torch.randn(size, self.z_dim).to(self._device())
# if no classes are selected yet, but they are needed for the "decoder-gates", select classes to be sampled
if (y_used is None) and (self.dg_gates):
if allowed_classes is None and class_probs is None:
y_used = np.random.randint(0, self.classes, size)
else:
if allowed_classes is None:
allowed_classes = [i for i in range(len(class_probs))]
y_used = np.random.choice(allowed_classes, size, p=class_probs, replace=True)
# decode z into image X
with torch.no_grad():
X = self.decode(z, gate_input=y_used if self.dg_gates else None)
# return samples as [batch_size]x[channels]x[image_size]x[image_size] tensor, plus requested additional info
return X if only_x else (X, y_used)
##------ LOSS FUNCTIONS --------##
def calculate_recon_loss(self, x, x_recon, average=False):
'''Calculate reconstruction loss for each element in the batch.
INPUT: - [x] <tensor> with original input (1st dimension (ie, dim=0) is "batch-dimension")
- [x_recon] (tuple of 2x) <tensor> with reconstructed input in same shape as [x]
- [average] <bool>, if True, loss is average over all pixels; otherwise it is summed
OUTPUT: - [reconL] <1D-tensor> of length [batch_size]'''
batch_size = x.size(0)
if self.recon_loss=="MSE":
# reconL = F.mse_loss(input=x_recon.view(batch_size, -1), target=x.view(batch_size, -1), reduction='none')
# reconL = torch.mean(reconL, dim=1) if average else torch.sum(reconL, dim=1)
reconL = -lf.log_Normal_standard(x=x, mean=x_recon, average=average, dim=-1)
elif self.recon_loss=="BCE":
reconL = F.binary_cross_entropy(input=x_recon.view(batch_size, -1), target=x.view(batch_size, -1),
reduction='none')
reconL = torch.mean(reconL, dim=1) if average else torch.sum(reconL, dim=1)
else:
raise NotImplementedError("Wrong choice for type of reconstruction-loss!")
# --> if [average]=True, reconstruction loss is averaged over all pixels/elements (otherwise it is summed)
# (averaging over all elements in the batch will be done later)
return reconL
def calculate_log_p_z(self, z, y=None, y_prob=None, allowed_classes=None):
'''Calculate log-likelihood of sampled [z] under the prior distirbution.
INPUT: - [z] <2D-tensor> with sampled latent variables (1st dimension (ie, dim=0) is "batch-dimension")
OPTIONS THAT ARE RELEVANT ONLY IF self.per_class IS TRUE:
- [y] None or <1D-tensor> with target-classes (as integers)
- [y_prob] None or <2D-tensor> with probabilities for each class (in [allowed_classes])
- [allowed_classes] None or <list> with class-IDs to use for selecting prior-mode(s)
OUTPUT: - [log_p_z] <1D-tensor> of length [batch_size]'''
if self.prior == "standard":
log_p_z = lf.log_Normal_standard(z, average=False, dim=1) # [batch_size]
if self.prior == "GMM":
## Get [means] and [logvars] of all (possible) modes
allowed_modes = list(range(self.n_modes))
# -if we don't use the specific modes of a target, we could select modes based on list of classes
if (y is None) and (allowed_classes is not None) and self.per_class:
allowed_modes = []
for class_id in allowed_classes:
allowed_modes += list(range(class_id * self.modes_per_class, (class_id + 1) * self.modes_per_class))
# -calculate/retireve the means and logvars for the selected modes
prior_means = self.z_class_means[allowed_modes, :]
prior_logvars = self.z_class_logvars[allowed_modes, :]
# -rearrange / select for each batch prior-modes to be used
z_expand = z.unsqueeze(1) # [batch_size] x 1 x [z_dim]
means = prior_means.unsqueeze(0) # 1 x [n_modes] x [z_dim]
logvars = prior_logvars.unsqueeze(0) # 1 x [n_modes] x [z_dim]
## Calculate "log_p_z" (log-likelihood of "reparameterized" [z] based on selected priors)
n_modes = self.modes_per_class if (
((y is not None) or (y_prob is not None)) and self.per_class
) else len(allowed_modes)
a = lf.log_Normal_diag(z_expand, mean=means, log_var=logvars, average=False, dim=2) - math.log(n_modes)
# --> for each element in batch, calculate log-likelihood for all modes: [batch_size] x [n_modes]
if (y is not None) and self.per_class:
modes_list = list()
for i in range(len(y)):
target = y[i].item()
modes_list.append(list(range(target * self.modes_per_class, (target + 1) * self.modes_per_class)))
modes_tensor = torch.LongTensor(modes_list).to(self._device())
a = a.gather(dim=1, index=modes_tensor)
# --> reduce [a] to size [batch_size]x[modes_per_class] (ie, per batch only keep modes of [y])
# but within the batch, elements can have different [y], so this reduction couldn't be done before
a_max, _ = torch.max(a, dim=1) # [batch_size]
# --> for each element in batch, take highest log-likelihood over all modes
# this is calculated and used to avoid underflow in the below computation
a_exp = torch.exp(a - a_max.unsqueeze(1)) # [batch_size] x [n_modes]
if (y is None) and (y_prob is not None) and self.per_class:
batch_size = y_prob.size(0)
y_prob = y_prob.view(-1, 1).repeat(1, self.modes_per_class).view(batch_size, -1)
# ----> extend probabilities per class to probabilities per mode; y_prob: [batch_size] x [n_modes]
a_logsum = torch.log(torch.clamp(torch.sum(y_prob * a_exp, dim=1), min=1e-40))
else:
a_logsum = torch.log(torch.clamp(torch.sum(a_exp, dim=1), min=1e-40)) # -> sum over modes: [batch_size]
log_p_z = a_logsum + a_max # [batch_size]
return log_p_z
def calculate_variat_loss(self, z, mu, logvar, y=None, y_prob=None, allowed_classes=None):
'''Calculate reconstruction loss for each element in the batch.
INPUT: - [z] <2D-tensor> with sampled latent variables (1st dimension (ie, dim=0) is "batch-dimension")
- [mu] <2D-tensor> by encoder predicted mean for [z]
- [logvar] <2D-tensor> by encoder predicted logvar for [z]
OPTIONS THAT ARE RELEVANT ONLY IF self.per_class IS TRUE:
- [y] None or <1D-tensor> with target-classes (as integers)
- [y_prob] None or <2D-tensor> with probabilities for each class (in [allowed_classes])
- [allowed_classes] None or <list> with class-IDs to use for selecting prior-mode(s)
OUTPUT: - [variatL] <1D-tensor> of length [batch_size]'''
if self.prior == "standard":
# --> calculate analytically
# ---- see Appendix B from: Kingma & Welling (2014) Auto-Encoding Variational Bayes, ICLR ----#
variatL = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
elif self.prior=="GMM":
# --> calculate "by estimation"
## Calculate "log_p_z" (log-likelihood of "reparameterized" [z] based on selected priors)
log_p_z = self.calculate_log_p_z(z, y=y, y_prob=y_prob, allowed_classes=allowed_classes)
# -----> log_p_z: [batch_size]
## Calculate "log_q_z_x" (entropy of "reparameterized" [z] given [x])
log_q_z_x = lf.log_Normal_diag(z, mean=mu, log_var=logvar, average=False, dim=1)
# -----> mu: [batch_size] x [z_dim]; logvar: [batch_size] x [z_dim]; z: [batch_size] x [z_dim]
# -----> log_q_z_x: [batch_size]
## Combine
variatL = -(log_p_z - log_q_z_x)
return variatL
def loss_function(self, x, y, x_recon, y_hat, scores, mu, z, logvar=None, allowed_classes=None, batch_weights=None):
'''Calculate and return various losses that could be used for training and/or evaluating the model.
INPUT: - [x] <4D-tensor> original image
- [y] <1D-tensor> with target-classes (as integers, corresponding to [allowed_classes])
- [x_recon] (tuple of 2x) <4D-tensor> reconstructed image in same shape as [x]
- [y_hat] <2D-tensor> with predicted "logits" for each class (corresponding to [allowed_classes])
- [scores] <2D-tensor> with target "logits" for each class (corresponding to [allowed_classes])
(if len(scores)<len(y_hat), 0 probs are added during distillation step at the end)
- [mu] <2D-tensor> with either [z] or the estimated mean of [z]
- [z] <2D-tensor> with reparameterized [z]
- [logvar] None or <2D-tensor> with estimated log(SD^2) of [z]
- [batch_weights] <1D-tensor> with a weight for each batch-element (if None, normal average over batch)
- [allowed_classes]None or <list> with class-IDs to use for selecting prior-mode(s)
OUTPUT: - [reconL] reconstruction loss indicating how well [x] and [x_recon] match
- [variatL] variational (KL-divergence) loss "indicating how close distribion [z] is to prior"
- [predL] prediction loss indicating how well targets [y] are predicted
- [distilL] knowledge distillation (KD) loss indicating how well the predicted "logits" ([y_hat])
match the target "logits" ([scores])'''
###-----Reconstruction loss-----###
batch_size = x.size(0)
reconL = self.calculate_recon_loss(x=x.view(batch_size, -1), average=True,
x_recon=x_recon.view(batch_size, -1)) # -> average over pixels
reconL = lf.weighted_average(reconL, weights=batch_weights, dim=0) # -> average over batch
###-----Variational loss-----###
if logvar is not None:
actual_y = torch.tensor([allowed_classes[i.item()] for i in y]).to(self._device()) if (
(allowed_classes is not None) and (y is not None)
) else y
if (y is None and scores is not None):
y_prob = F.softmax(scores / self.KD_temp, dim=1)
if allowed_classes is not None and len(allowed_classes) > y_prob.size(1):
n_batch = y_prob.size(0)
zeros_to_add = torch.zeros(n_batch, len(allowed_classes) - y_prob.size(1))
zeros_to_add = zeros_to_add.to(self._device())
y_prob = torch.cat([y_prob, zeros_to_add], dim=1)
else:
y_prob = None
# ---> if [y] is not provided but [scores] is, calculate variational loss using weighted sum of prior-modes
variatL = self.calculate_variat_loss(z=z, mu=mu, logvar=logvar, y=actual_y, y_prob=y_prob,
allowed_classes=allowed_classes)
variatL = lf.weighted_average(variatL, weights=batch_weights, dim=0) # -> average over batch
variatL /= (self.image_channels * self.image_size ** 2) # -> divide by # of input-pixels
else:
variatL = torch.tensor(0., device=self._device())
###-----Prediction loss-----###
if y is not None and y_hat is not None:
predL = F.cross_entropy(input=y_hat, target=y, reduction='none')
#--> no reduction needed, summing over classes is "implicit"
predL = lf.weighted_average(predL, weights=batch_weights, dim=0) # -> average over batch
else:
predL = torch.tensor(0., device=self._device())
###-----Distilliation loss-----###
if scores is not None and y_hat is not None:
# n_classes_to_consider = scores.size(1) #--> with this version, no zeroes would be added to [scores]!
n_classes_to_consider = y_hat.size(1) #--> zeros will be added to [scores] to make it this size!
distilL = lf.loss_fn_kd(scores=y_hat[:, :n_classes_to_consider], target_scores=scores, T=self.KD_temp,
weights=batch_weights) #--> summing over classes & averaging over batch in function
else:
distilL = torch.tensor(0., device=self._device())
# Return a tuple of the calculated losses
return reconL, variatL, predL, distilL
##------ EVALUATION FUNCTIONS --------##
def calculate_recon_error(self, dataset, batch_size=128, max_batches=None, average=False, feature_extractor=None):
'''Calculate reconstruction error of the model for each datapoint in [dataset].
[average] <bool>, if True, reconstruction-error is averaged over all pixels/units; otherwise it is summed'''
# Create data-loader
data_loader = get_data_loader(dataset, batch_size=batch_size, cuda=self._is_on_cuda())
# Break loop if max number of batches has been reached
for index, (x, _) in enumerate(data_loader):
if max_batches is not None and index >= max_batches:
break
# Move [x] to correct device
x = x.to(self._device())
# Preprocess, if required
if feature_extractor is not None:
with torch.no_grad():
x = feature_extractor(x)
# Run forward pass of model to get [z_mean]
with torch.no_grad():
z_mean, _, _ = self.encode(x)
# Run backward pass of model to reconstruct input
with torch.no_grad():
x_recon = self.decode(z_mean)
# Calculate reconstruction error
recon_error = self.calculate_recon_loss(x.view(x.size(0), -1), x_recon.view(x.size(0), -1), average=average)
# Concatanate the calculated reconstruction errors for all evaluated samples
all_res = torch.cat([all_res, recon_error]) if index > 0 else recon_error
# Convert to <np-array> (with one entry for each evaluated sample in [dataset]) and return
return all_res.cpu().numpy()
def estimate_loglikelihood_single(self, x, S=5000, batch_size=128):
'''Estimate average marginal log-likelihood for [x] using [S] importance samples.'''
# Move [x] to correct device
x = x.to(self._device())
# Run forward pass of model to get [z_mu] and [z_logvar]
with torch.no_grad():
z_mu, z_logvar, _ = self.encode(x)
# Importance samples will be calcualted in batches, get number of required batches
repeats = int(np.ceil(S / batch_size))
# For each importance sample, calculate log_likelihood
for rep in range(repeats):
batch_size_current = (S % batch_size) if rep==(repeats-1) else batch_size
# Reparameterize (i.e., sample z_s)
z = self.reparameterize(z_mu.expand(batch_size_current, -1), z_logvar.expand(batch_size_current, -1))
# Calculate log_p_z
with torch.no_grad():
log_p_z = self.calculate_log_p_z(z)
# Calculate log_q_z_x
log_q_z_x = lf.log_Normal_diag(z, mean=z_mu, log_var=z_logvar, average=False, dim=1)
# Calcuate log_p_x_z
# -reconstruct input
with torch.no_grad():
x_recon = self.decode(z)
# -calculate p_x_z (under Gaussian observation model with unit variance)
log_p_x_z = lf.log_Normal_standard(x=x, mean=x_recon, average=False, dim=-1)
# Calculate log-likelihood for each importance sample
log_likelihoods = log_p_x_z + log_p_z - log_q_z_x
# Concatanate the log-likelihoods of all importance samples
all_lls = torch.cat([all_lls, log_likelihoods]) if rep > 0 else log_likelihoods
# Calculate average log-likelihood over all importance samples for this test sample
# (for this, convert log-likelihoods back to likelihoods before summing them!)
log_likelihood = all_lls.logsumexp(dim=0) - np.log(S)
return log_likelihood
def estimate_loglikelihood(self, dataset, S=5000, batch_size=128, max_n=None, feature_extractor=None):
'''Estimate average marginal log-likelihood for x|y of the model on [dataset] using [S] importance samples.'''
# Create data-loader to give batches of size 1
data_loader = get_data_loader(dataset, batch_size=1, cuda=self._is_on_cuda())
# List to store estimated log-likelihood for each datapoint
ll_per_datapoint = []
# Break loop if max number of samples has been reached
for index, (x, _) in enumerate(data_loader):
if max_n is not None and index >= max_n:
break
# Preprocess, if required
if feature_extractor is not None:
x = x.to(self._device())
with torch.no_grad():
x = feature_extractor(x)
# Estimate log-likelihood for the input-output pair (x,y)
log_likelihood = self.estimate_loglikelihood_single(x, S=S, batch_size=batch_size)
# Add it to list
ll_per_datapoint.append(log_likelihood.cpu().numpy())
return ll_per_datapoint
##------ TRAINING FUNCTIONS --------##
def train_a_batch(self, x, y=None, x_=None, y_=None, scores_=None, rnt=0.5, classes_so_far=None, **kwargs):
'''Train model for one batch ([x],[y]), possibly supplemented with replayed data ([x_],[y_]).
[x] <tensor> batch of inputs
[y] None or <tensor> batch of corresponding labels
[x_] None or <tensor> batch of replayed inputs
[y_] None or <1Dtensor>:[batch] of corresponding "replayed" labels
[scores_] None or <2Dtensor>:[batch]x[classes] target "scores"/"logits" for [x_]
[rnt] <number> in [0,1], relative importance of new task
[classes_so_far] None or (<list> of) <list> with all classes seen so far'''
# Set model to training-mode
self.train()
# -however, if some layers are frozen, they shoud be set to eval() to prevent batch-norm layers from changing
if self.convE.frozen:
self.convE.eval()
if self.fcE.frozen:
self.fcE.eval()
# Reset optimizer
self.optimizer.zero_grad()
##-- CURRENT DATA --##
# Run the model
recon_batch, y_hat, mu, logvar, z = self(x, gate_input=y if self.dg_gates else None, full=True,
reparameterize=True)
# Remove predictions for classes not to be trained on
if self.neg_samples == "all-so-far":
# -train on all classes so far
class_entries = classes_so_far
elif self.neg_samples == "all":
# -train on all classes (also those not yet seen)
class_entries = None
y_hat = y_hat[:, class_entries] if class_entries is not None else y_hat
# Calculate all losses
reconL, variatL, predL, _ = self.loss_function(
x=x, y=y, x_recon=recon_batch, y_hat=y_hat, scores=None, mu=mu, z=z, logvar=logvar,
allowed_classes=class_entries if classes_so_far is not None else None
) # --> [allowed_classes] will be used only if [y] is not provided
# Weigh losses as requested
loss_cur = reconL + variatL + self.lamda_pl * predL
# Calculate training-accuracy
if y is not None and y_hat is not None:
_, predicted = y_hat.max(1)
accuracy = (y == predicted).sum().item() / x.size(0)
##-- REPLAYED DATA --##
if x_ is not None:
# -if needed in the decoder-gates, find class-tensor [y_predicted]
y_predicted = None
if self.dg_gates:
if y_ is not None:
y_predicted = y_
else:
y_predicted = F.softmax(scores_ / self.KD_temp, dim=1)
if y_predicted.size(1) < self.classes:
# in case of Class-IL, add zeros at the end:
n_batch = y_predicted.size(0)
zeros_to_add = torch.zeros(n_batch, self.classes - y_predicted.size(1))
zeros_to_add = zeros_to_add.to(self._device())
y_predicted = torch.cat([y_predicted, zeros_to_add], dim=1)
# -run the full model
gate_input = y_predicted if self.dg_gates else None
recon_batch, y_hat, mu, logvar, z = self(x_, gate_input=gate_input, full=True, reparameterize=True)
# -remove predictions for classes not to be trained on
if self.neg_samples=="all-so-far":
class_entries = classes_so_far
elif self.neg_samples=="all":
class_entries = None
y_hat = y_hat[:, class_entries] if class_entries is not None else y_hat
# Calculate all losses
reconL_r, variatL_r, predL_r, distilL_r = self.loss_function(
x=x_, y=y_ if (y_ is not None) else None, x_recon=recon_batch, y_hat=y_hat,
scores=scores_ if (scores_ is not None) else None, mu=mu, z=z, logvar=logvar,
allowed_classes=classes_so_far if classes_so_far is not None else None,
)
# Weigh losses as requested
loss_replay = reconL_r + variatL_r
if self.replay_targets == "hard":
loss_replay += self.lamda_pl * predL_r
elif self.replay_targets == "soft":
loss_replay += self.lamda_pl * distilL_r
# Calculate total loss
loss = loss_cur if x_ is None else rnt*loss_cur + (1-rnt)*loss_replay
##--(3)-- ALLOCATION LOSSES --##
# Add SI-loss
surrogate_loss = self.surrogate_loss()
if self.si_c>0:
loss += self.si_c * surrogate_loss
# Add EWC-loss
ewc_loss = self.ewc_loss()
if self.ewc_lambda>0:
loss += self.ewc_lambda * ewc_loss
# Backpropagate gradients
loss.backward()
# Take optimization-step
self.optimizer.step()
# Return the dictionary with different training-loss split in categories
return {
'loss_total': loss.item(),
'pred': predL.item(),
'ewc': ewc_loss.item(),
'si_loss': surrogate_loss.item(),
'accuracy': accuracy if accuracy is not None else 0.,
'recon': reconL.item() if x is not None else 0,
'variat': variatL.item() if x is not None else 0,
}
|
11458561
|
import json
import posixpath
import re
import time
def test_simple(pyscript):
script = pyscript("""
import click
from daemonocle.cli import DaemonCLI
@click.command(cls=DaemonCLI,
daemon_params={'name': 'foo', 'pid_file': 'foo.pid'})
def main():
\"\"\"My awesome daemon\"\"\"
pass
if __name__ == '__main__':
main()
""")
result = script.run('--help')
assert result.returncode == 0
assert b'My awesome daemon' in result.stdout
assert re.search((
br'\s*start\s+Start the daemon\.\n'
br'\s*stop\s+Stop the daemon\.\n'
br'\s*restart\s+Stop then start the daemon\.\n'
br'\s*status\s+Get the status of the daemon\.\n'),
result.stdout)
result = script.run('start', '--help')
assert result.returncode == 0
assert re.search(
br'\s*--debug\s+Do NOT detach and run in the background\.\n',
result.stdout)
assert script.run('stop', '--help').returncode == 0
assert script.run('restart', '--help').returncode == 0
assert script.run('status', '--help').returncode == 0
def test_debug(pyscript):
script = pyscript("""
import click
from daemonocle.cli import DaemonCLI
@click.command(cls=DaemonCLI, daemon_params={'name': 'foo'})
def main():
\"\"\"My awesome daemon\"\"\"
print('hello world')
if __name__ == '__main__':
main()
""")
result = script.run('start', '--debug')
assert result.returncode == 0
assert result.stdout == (
b'Starting foo ... OK\n'
b'hello world\n'
b'All children are gone. Parent is exiting...\n')
assert result.stderr == b''
def test_shorthand_1(pyscript):
script = pyscript("""
import click
from daemonocle.cli import cli
@cli(name='foo')
def main():
\"\"\"My awesome daemon\"\"\"
print('hello world')
if __name__ == '__main__':
main()
""")
result = script.run('start', '--debug')
assert result.returncode == 0
assert result.stdout == (
b'Starting foo ... OK\n'
b'hello world\n'
b'All children are gone. Parent is exiting...\n')
assert result.stderr == b''
def test_shorthand_2(pyscript):
script = pyscript("""
from daemonocle import Daemon
from daemonocle.cli import DaemonCLI
def main():
\"\"\"My awesome daemon\"\"\"
print('hello world')
if __name__ == '__main__':
cli = DaemonCLI(daemon=Daemon(name='foo', worker=main))
cli()
""")
result = script.run('--help')
assert result.returncode == 0
assert b'My awesome daemon' in result.stdout
result = script.run('start', '--debug')
assert result.returncode == 0
assert result.stdout == (
b'Starting foo ... OK\n'
b'hello world\n'
b'All children are gone. Parent is exiting...\n')
assert result.stderr == b''
def test_force_stop(pyscript):
script = pyscript("""
import signal
import sys
import time
from daemonocle import Daemon, DaemonCLI
def worker():
def handle_sigterm(*args, **kwargs):
time.sleep(10)
signal.signal(signal.SIGTERM, handle_sigterm)
time.sleep(10)
if __name__ == '__main__':
cli = DaemonCLI(daemon=Daemon(
worker=worker, name='foo', pid_file='foo.pid', stop_timeout=1))
cli()
""")
pid_file = posixpath.realpath(posixpath.join(script.dirname, 'foo.pid'))
script.run('start')
with open(pid_file, 'rb') as f:
pid = int(f.read())
t1 = time.monotonic()
result = script.run('stop', '--force')
t2 = time.monotonic()
assert result.returncode == 0
assert result.stdout == b'Stopping foo ... FAILED\nKilling foo ... OK\n'
assert result.stderr == ('ERROR: Timed out while waiting for process '
'(PID {pid}) to terminate\n').format(
pid=pid).encode('utf-8')
assert 1.0 <= (t2 - t1) < 2.0
def test_force_stop_custom_timeout(pyscript):
script = pyscript("""
import signal
import sys
import time
from daemonocle import Daemon, DaemonCLI
def worker():
def handle_sigterm(*args, **kwargs):
time.sleep(10)
signal.signal(signal.SIGTERM, handle_sigterm)
time.sleep(10)
if __name__ == '__main__':
cli = DaemonCLI(daemon=Daemon(
worker=worker, name='foo', pid_file='foo.pid', stop_timeout=5))
cli()
""")
pid_file = posixpath.realpath(posixpath.join(script.dirname, 'foo.pid'))
script.run('start')
with open(pid_file, 'rb') as f:
pid = int(f.read())
t1 = time.monotonic()
result = script.run('stop', '--force', '--timeout=1')
t2 = time.monotonic()
assert result.returncode == 0
assert result.stdout == b'Stopping foo ... FAILED\nKilling foo ... OK\n'
assert result.stderr == ('ERROR: Timed out while waiting for process '
'(PID {pid}) to terminate\n').format(
pid=pid).encode('utf-8')
assert 1.0 <= (t2 - t1) < 2.0
def test_status_json(pyscript):
script = pyscript("""
import time
from daemonocle.cli import cli
@cli(name='foo', pid_file='foo.pid')
def main():
time.sleep(10)
if __name__ == '__main__':
main()
""")
pid_file = posixpath.realpath(posixpath.join(script.dirname, 'foo.pid'))
script.run('start')
with open(pid_file, 'rb') as f:
pid = int(f.read())
result = script.run('status', '--json')
assert result.returncode == 0
status = json.loads(result.stdout.decode('ascii').rstrip('\n'))
assert status['name'] == 'foo'
assert status['pid'] == pid
assert status['status'] in {'running', 'sleeping'}
assert isinstance(status['uptime'], float)
assert isinstance(status['cpu_percent'], float)
assert isinstance(status['memory_percent'], float)
script.run('stop')
result = script.run('status', '--json')
assert result.returncode == 1
status = json.loads(result.stdout.decode('ascii').rstrip('\n'))
assert status['name'] == 'foo'
assert status['status'] == 'dead'
def test_status_fields(pyscript):
script = pyscript("""
import subprocess
from daemonocle.cli import cli
@cli(name='foo', pid_file='foo.pid')
def main():
subprocess.check_call(['sleep', '10'])
if __name__ == '__main__':
main()
""")
pid_file = posixpath.realpath(posixpath.join(script.dirname, 'foo.pid'))
script.run('start')
result = script.run(
'status', '--json', '--fields=group_num_procs,open_files')
assert result.returncode == 0
status = json.loads(result.stdout.decode('ascii').rstrip('\n'))
assert status['group_num_procs'] == 2
open_file_paths = set()
for item in status['open_files']:
try:
path = posixpath.realpath(item[0])
except OSError:
continue
else:
open_file_paths.add(path)
assert pid_file in open_file_paths
script.run('stop')
def test_custom_actions(pyscript):
script = pyscript("""
import click
from daemonocle import Daemon, expose_action
from daemonocle.cli import DaemonCLI
class BananaDaemon(Daemon):
@expose_action
def banana(self):
\"\"\"Go bananas.\"\"\"
pass
def plantain(self):
pass
@click.command(cls=DaemonCLI, daemon_class=BananaDaemon,
daemon_params={'name': 'foo', 'pid_file': 'foo.pid'})
def main():
\"\"\"The banana daemon\"\"\"
pass
if __name__ == '__main__':
main()
""")
result = script.run('--help')
assert result.returncode == 0
assert b'The banana daemon' in result.stdout
assert re.search(br'\s*banana\s+Go bananas\.\n', result.stdout)
assert script.run('banana', '--help').returncode == 0
result = script.run('plantain', '--help')
assert result.returncode != 0
assert b'No such command' in result.stderr
def test_custom_actions_with_options(pyscript):
script = pyscript("""
import daemonocle
class MyDaemon(daemonocle.Daemon):
name = 'my_daemon'
@daemonocle.expose_action
def foo(self, wibble: int,
wobble: str = '1ErrJ5QgasJKkcMdRBrEQHtyGqkWLa1sSJS'):
\"\"\"2ScD2S4w44jivwVNAamYdCVUU8afdDqTsGU\"\"\"
print(wibble * 24369)
print(sum(map(ord, wobble)))
@daemonocle.expose_action
def bar(self, wubble=False, flub=True, **kwargs):
\"\"\"1R3YQRaAMU2inZ7mhtZC96MTiaykPYGCqC9\"\"\"
print(repr(wubble))
print(repr(flub))
print(repr(kwargs))
def worker(self):
\"\"\"2PfZ4gSZaghZXK3VuDqbD82ZGqpqDLAKPpj\"\"\"
pass
if __name__ == '__main__':
MyDaemon(pid_file='foo.pid').cli()
""")
result = script.run('--help')
assert result.returncode == 0
assert b'2PfZ4gSZaghZXK3VuDqbD82ZGqpqDLAKPpj' in result.stdout
assert re.search(
br'\s*foo\s+2ScD2S4w44jivwVNAamYdCVUU8afdDqTsGU\n', result.stdout)
assert re.search(
br'\s*bar\s+1R3YQRaAMU2inZ7mhtZC96MTiaykPYGCqC9\n', result.stdout)
result = script.run('foo', '--help')
assert result.returncode == 0
assert b'2ScD2S4w44jivwVNAamYdCVUU8afdDqTsGU' in result.stdout
assert b'--wibble' in result.stdout
assert b'--wobble' in result.stdout
result = script.run('foo')
assert result.returncode == 2
assert b'Missing option \'--wibble\'' in result.stderr
result = script.run('foo', '--wibble=9055')
assert result.returncode == 0
assert result.stdout == b'220661295\n3077\n'
result = script.run('foo', '--wibble=1850',
'--wobble=26hevLhGzeeX7dNqAtdXjKBtmevsxgBvWNG')
assert result.returncode == 0
assert result.stdout == b'45082650\n3254\n'
result = script.run('bar')
assert result.returncode == 0
assert result.stdout == b'False\nTrue\n{}\n'
result = script.run('bar', '--flub')
assert result.returncode == 0
assert result.stdout == b'False\nTrue\n{}\n'
result = script.run('bar', '--wubble', '--no-flub')
assert result.returncode == 0
assert result.stdout == b'True\nFalse\n{}\n'
|
11458576
|
from pymoo.core.problem import calc_constr
from pymoo.problems.meta import MetaProblem
from pymoo.util.misc import from_dict
class ConstraintViolationAsObjective(MetaProblem):
def __init__(self, problem, eps=1e-6):
super().__init__(problem)
self.n_obj = 1
self.n_constr = 0
self.eps = eps
def do(self, x, out, *args, **kwargs):
super().do(x, out, *args, **kwargs)
F, G = from_dict(out, "F", "G")
assert G is not None, "To converge a function's constraint to objective it needs G to be set!"
out["__F__"] = out["F"]
out["__G__"] = out["G"]
out["F"] = calc_constr(G, eps=self.eps, beta=1.0)
del out["G"]
|
11458583
|
from __future__ import print_function, division
import torch
import numpy as np
import torch.nn as nn
import os
import shutil
from sklearn.metrics.pairwise import euclidean_distances
import torch.nn.functional as F
from Options import Config
config = Config().parse()
def to_np(x):
return x.data.cpu().numpy()
def save_checkpoint(state, epoch, is_best, filename=config.name + '_checkpoint.pth.tar'):
if not os.path.exists(config.checkpoints_dir):
os.mkdir(config.checkpoints_dir)
torch.save(state, os.path.join(config.checkpoints_dir, str(epoch) + "_" + filename))
if is_best:
shutil.copyfile(os.path.join(config.checkpoints_dir, str(epoch) + "_" + filename), config.name + '_model_best.pth.tar')
def copy_state_dict(state_dict, model, strip=None):
tgt_state = model.state_dict()
copied_names = set()
for name, param in state_dict.items():
if strip is not None and name.startswith(strip):
name = name[len(strip):]
if name not in tgt_state:
continue
if isinstance(param, nn.Parameter):
param = param.data
if param.size() != tgt_state[name].size():
print('mismatch:', name, param.size(), tgt_state[name].size())
continue
tgt_state[name].copy_(param)
copied_names.add(name)
missing = set(tgt_state.keys()) - copied_names
if len(missing) > 0:
print("missing keys in state_dict:", missing)
return model
def load_checkpoint(resume_path, model):
if os.path.isfile(resume_path):
print("=> loading checkpoint '{}'".format(resume_path))
checkpoint = torch.load(resume_path)
model.start_step = checkpoint['step']
epoch = checkpoint['epoch']
model.best_acc = checkpoint['best_acc']
model.min_loss = checkpoint['min_loss']
model.mfcc_encoder = copy_state_dict(checkpoint['mfcc_encoder'], model.mfcc_encoder)
model.model_fusion = copy_state_dict(checkpoint['model_fusion'], model.model_fusion)
model.face_encoder = copy_state_dict(checkpoint['face_encoder'], model.face_encoder)
model.face_fusion = copy_state_dict(checkpoint['face_fusion'], model.face_fusion)
model.discriminator_audio = copy_state_dict(checkpoint['discriminator_audio'], model.discriminator_audio)
# model.discriminator_image = copy_state_dict(checkpoint['discriminator_image'], model.discriminator_image)
model.optimizer_G.load_state_dict(checkpoint['optimizer_G'])
model.optimizer_D.load_state_dict(checkpoint['optimizer_D'])
print("=> loaded checkpoint '{}' (step {})"
.format(resume_path, checkpoint['step']))
return model, epoch
else:
print("=> no checkpoint found at '{}'".format(resume_path))
def load_ini(resume_path1, resume_path2, model):
print("=> loading checkpoint '{}'".format(resume_path1))
checkpoint1 = torch.load(resume_path1)
print("=> loading checkpoint '{}'".format(resume_path2))
checkpoint2 = torch.load(resume_path2)
model.mfcc_encoder = copy_state_dict(checkpoint1['image_model'], model.mfcc_encoder)
model.model_fusion = copy_state_dict(checkpoint2['mfcc_fusion'], model.model_fusion)
return model
def adjust_learning_rate(audio_model, config, loss):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
if loss < config.loss_buffer:
# config.lr1 = config.lr1 + 1e-8
pass
else:
config.lr = config.lr * 0.5
config.loss_buffer = loss
for param_group in audio_model.optimizer.param_groups:
param_group['lr'] = config.lr
def load_synthesis_checkpoint(resume_path, model):
if os.path.isfile(resume_path):
print("=> loading checkpoint '{}'".format(resume_path))
checkpoint = torch.load(resume_path)
model.mfcc_encoder = copy_state_dict(checkpoint['mfcc_encoder'], model.mfcc_encoder)
return model
else:
print("=> no checkpoint found at '{}'".format(resume_path))
def l2_sim(feature1, feature2):
Feature = feature1.expand(feature1.size(0), feature1.size(0), feature1.size(1)).transpose(0, 1)
return torch.norm(Feature - feature2, p=2, dim=2)
def l2_norm(x):
x_norm = F.normalize(x, p=2, dim=1)
return x_norm
def sim(feature1, feature2):
"""Cosine similarity between all the image and sentence pairs
"""
return feature1.mm(feature2.t())
def sentence_to_video(clips_embed, captions_embed, return_ranks = False):
captions_num = captions_embed.shape[0]
#index_list = []
ranks = np.zeros(captions_num)
top1 = np.zeros(captions_num)
for i in range(captions_num):
# caption dim : 1 * embed_size; clips_embed dim: num * embed_size
# d : 1 * num : represent the similarity between this caption and each clip
caption = captions_embed[i]
d = np.dot(caption, clips_embed.T).flatten()
inds = np.argsort(d)[::-1]
rank = np.where(inds == i)[0][0]
ranks[i] = rank
top1[i] = inds[0]
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
r50 = 100.0 * len(np.where(ranks < 50)[0]) / len(ranks)
# r100 = 100.0 * len(np.where(ranks < 100)[0]) / len(ranks)
#plus 1 because the index starts from 0
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, r50, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, r50, medr, meanr)
def L2retrieval(clips_embed, captions_embed, return_ranks = False):
captions_num = captions_embed.shape[0]
#index_list = []
ranks = np.zeros(captions_num)
top1 = np.zeros(captions_num)
import time
t1 = time.time()
d = euclidean_distances(captions_embed, clips_embed)
inds = np.argsort(d)
num = np.arange(captions_num).reshape(captions_num, 1)
ranks = np.where(inds == num)[1]
top1 = inds[:, 0]
t2 = time.time()
print((t2 - t1))
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
r50 = 100.0 * len(np.where(ranks < 50)[0]) / len(ranks)
# r100 = 100.0 * len(np.where(ranks < 100)[0]) / len(ranks)
#plus 1 because the index starts from 0
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, r50, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, r50, medr, meanr)
|
11458586
|
import textwrap
import pytest
from mock import patch
from conans.errors import ConanException
from conans.util.files import save
from conans.test.utils.tools import TestClient
@pytest.fixture
def client():
client = TestClient()
conanfile = textwrap.dedent("""
from conans import ConanFile
class Pkg(ConanFile):
def generate(self):
for k, v in self.conf.items():
self.output.info("{}${}".format(k, v))
""")
client.save({"conanfile.py": conanfile})
return client
def test_basic_composition(client):
profile1 = textwrap.dedent("""\
[conf]
tools.microsoft.msbuild:verbosity=Quiet
tools.microsoft.msbuild:performance=Slow
tools.cmake.cmake:verbosity=Extra
""")
profile2 = textwrap.dedent("""\
[conf]
tools.microsoft.msbuild:verbosity=Minimal
tools.microsoft.msbuild:robustness=High
tools.meson.meson:verbosity=Super
""")
client.save({"profile1": profile1,
"profile2": profile2})
client.run("install . -pr=profile1")
assert "tools.microsoft.msbuild:verbosity$Quiet" in client.out
assert "tools.microsoft.msbuild:performance$Slow" in client.out
assert "tools.cmake.cmake:verbosity$Extra" in client.out
client.run("install . -pr=profile1 -pr=profile2")
assert "tools.microsoft.msbuild:verbosity$Minimal" in client.out
assert "tools.microsoft.msbuild:performance$Slow" in client.out
assert "tools.microsoft.msbuild:robustness$High" in client.out
assert "tools.cmake.cmake:verbosity$Extra" in client.out
assert "tools.meson.meson:verbosity$Super" in client.out
client.run("install . -pr=profile2 -pr=profile1")
assert "tools.microsoft.msbuild:verbosity$Quiet" in client.out
assert "tools.microsoft.msbuild:performance$Slow" in client.out
assert "tools.microsoft.msbuild:robustness$High" in client.out
assert "tools.cmake.cmake:verbosity$Extra" in client.out
assert "tools.meson.meson:verbosity$Super" in client.out
def test_basic_inclusion(client):
profile1 = textwrap.dedent("""\
[conf]
tools.microsoft.msbuild:verbosity=Quiet
tools.microsoft.msbuild:performance=Slow
tools.cmake.cmake:verbosity=Extra
""")
profile2 = textwrap.dedent("""\
include(profile1)
[conf]
tools.microsoft.msbuild:verbosity=Minimal
tools.microsoft.msbuild:robustness=High
tools.meson.meson:verbosity=Super
""")
client.save({"profile1": profile1,
"profile2": profile2})
client.run("install . -pr=profile2")
assert "tools.microsoft.msbuild:verbosity$Minimal" in client.out
assert "tools.microsoft.msbuild:performance$Slow" in client.out
assert "tools.microsoft.msbuild:robustness$High" in client.out
assert "tools.cmake.cmake:verbosity$Extra" in client.out
assert "tools.meson.meson:verbosity$Super" in client.out
def test_composition_conan_conf(client):
conf = textwrap.dedent("""\
tools.microsoft.msbuild:verbosity=Quiet
tools.microsoft.msbuild:performance=Slow
tools.cmake.cmake:verbosity=Extra
""")
save(client.cache.new_config_path, conf)
profile = textwrap.dedent("""\
[conf]
tools.microsoft.msbuild:verbosity=Minimal
tools.microsoft.msbuild:robustness=High
tools.meson.meson:verbosity=Super
""")
client.save({"profile": profile})
client.run("install . -pr=profile")
assert "tools.microsoft.msbuild:verbosity$Minimal" in client.out
assert "tools.microsoft.msbuild:performance$Slow" in client.out
assert "tools.microsoft.msbuild:robustness$High" in client.out
assert "tools.cmake.cmake:verbosity$Extra" in client.out
assert "tools.meson.meson:verbosity$Super" in client.out
def test_new_config_file(client):
conf = textwrap.dedent("""\
tools.microsoft.msbuild:verbosity=Minimal
user.mycompany.myhelper:myconfig=myvalue
*:tools.cmake.cmake:generator=X
cache:no_locks=True
cache:read_only=True
""")
save(client.cache.new_config_path, conf)
client.run("install .")
assert "tools.microsoft.msbuild:verbosity$Minimal" in client.out
assert "user.mycompany.myhelper:myconfig$myvalue" in client.out
assert "tools.cmake.cmake:generator$X" in client.out
assert "no_locks" not in client.out
assert "read_only" not in client.out
@patch("conans.client.conf.required_version.client_version", "1.26.0")
def test_new_config_file_required_version():
client = TestClient()
conf = textwrap.dedent("""\
core:required_conan_version=>=2.0
""")
save(client.cache.new_config_path, conf)
with pytest.raises(ConanException) as excinfo:
client.run("install .")
assert ("Current Conan version (1.26.0) does not satisfy the defined one (>=2.0)"
in str(excinfo.value))
def test_composition_conan_conf_overwritten_by_cli_arg(client):
conf = textwrap.dedent("""\
tools.microsoft.msbuild:verbosity=Quiet
tools.microsoft.msbuild:performance=Slow
""")
save(client.cache.new_config_path, conf)
profile = textwrap.dedent("""\
[conf]
tools.microsoft.msbuild:verbosity=Minimal
tools.microsoft.msbuild:robustness=High
""")
client.save({"profile": profile})
client.run("install . -pr=profile -c tools.microsoft.msbuild:verbosity=Detailed "
"-c tools.meson.meson:verbosity=Super")
assert "tools.microsoft.msbuild:verbosity$Detailed" in client.out
assert "tools.microsoft.msbuild:performance$Slow" in client.out
assert "tools.microsoft.msbuild:robustness$High" in client.out
assert "tools.meson.meson:verbosity$Super" in client.out
|
11458617
|
import django_filters
from django.db.models import Q
from . import models
class CharInFilter(django_filters.BaseInFilter, django_filters.CharFilter):
pass
class CityFilter(django_filters.rest_framework.FilterSet):
provinceNames = CharInFilter(
field_name='provinceName', lookup_expr='in')
provinceCodes = CharInFilter(
field_name='provinceCode', lookup_expr='in')
cityNames = CharInFilter(
field_name='cityName', lookup_expr='in')
class Meta:
model = models.City
fields = ['provinceName', 'provinceCode', 'cityName']
class ProvinceFilter(django_filters.rest_framework.FilterSet):
provinceNames = CharInFilter(
field_name='provinceName', lookup_expr='in')
provinceCodes = CharInFilter(
field_name='provinceCode', lookup_expr='in')
class Meta:
model = models.Province
fields = ['provinceName', 'provinceCode']
class CountryFilter(django_filters.rest_framework.FilterSet):
continents = CharInFilter(
field_name='continents', lookup_expr='in')
countryCodes = CharInFilter(
field_name='countryCode', lookup_expr='in')
countryNames = CharInFilter(
field_name='countryName', lookup_expr='in')
class Meta:
model = models.Country
fields = [
'continents', 'countryCode', 'countryName'
]
class CountryCodeFilter(django_filters.rest_framework.FilterSet):
numericCodes = CharInFilter(
field_name='numericCode', lookup_expr='in')
countryCodes = CharInFilter(
field_name='countryCode', lookup_expr='in')
shortCountryCodes = CharInFilter(
field_name='shortCountryCode', lookup_expr='in')
class Meta:
model = models.CountryCode
fields = [
'numericCode', 'countryCode', 'shortCountryCode'
]
|
11458672
|
import pytest
from oslash.either import Right
from jsonrpcserver.async_main import (
dispatch_to_response,
dispatch_to_serializable,
dispatch_to_json,
)
from jsonrpcserver.response import SuccessResponse
from jsonrpcserver.result import Result, Success
async def ping() -> Result:
return Success("pong")
@pytest.mark.asyncio
async def test_dispatch_to_response():
assert await dispatch_to_response(
'{"jsonrpc": "2.0", "method": "ping", "id": 1}', {"ping": ping}
) == Right(SuccessResponse("pong", 1))
@pytest.mark.asyncio
async def test_dispatch_to_serializable():
assert await dispatch_to_serializable(
'{"jsonrpc": "2.0", "method": "ping", "id": 1}', {"ping": ping}
) == {"jsonrpc": "2.0", "result": "pong", "id": 1}
@pytest.mark.asyncio
async def test_dispatch_to_json():
assert (
await dispatch_to_json(
'{"jsonrpc": "2.0", "method": "ping", "id": 1}', {"ping": ping}
)
== '{"jsonrpc": "2.0", "result": "pong", "id": 1}'
)
@pytest.mark.asyncio
async def test_dispatch_to_json_notification():
assert (
await dispatch_to_json('{"jsonrpc": "2.0", "method": "ping"}', {"ping": ping})
== ""
)
|
11458677
|
import balanced
balanced.configure('ak-test-2eKlj1ZDfAcZSARMf3NMhBHywDej0avSY')
card = balanced.Card.fetch('/cards/CC4zyuNpxY0A0eAf87SeULCR')
card.meta = {
'twitter.id': '1234987650',
'facebook.user_id': '0192837465',
'my-own-customer-id': '12345'
}
card.save()
|
11458681
|
import re
from alexa.utils.config import LocalConfig
from apiclient.discovery import build
config = LocalConfig()
class YoutubeVideoInformation:
def __init__(self, video=None):
if video is not None:
self.__id = str(video['id']['videoId'])
self.__title = video['snippet']['title']
@property
def id(self):
return self.__id
@id.setter
def id(self, val):
self.__id = val
@property
def title(self):
return self.__title
@title.setter
def title(self, title):
pattern = re.compile('[\W_]+')
self.__title = pattern.sub(' ', title)
@property
def stream_url(self):
return '%s/stream/%s.mp3' % (config.general['url'], self.id)
def __str__(self):
return self.title
def __repr__(self):
return self.__str__()
def to_list(self):
return {
'id': self.id,
'title': self.__str__()
}
class Youtube:
def __init__(self, items=None, session=None):
if items is None:
items = []
self.__items = items
self.__current = 0
self.__y = None
self.__session = session
def to_list(self):
item_list = []
for item in self.__items:
item_list.append(item.to_list())
return item_list
@property
def current_index(self):
return self.__current
@property
def __length(self):
# TODO: maybe cache this value?
return len(self.__items)
def __rebuild(self):
if self.__session is None or self.__length > 0:
return
for item in self.__session.attributes['playlist']:
video = YoutubeVideoInformation()
video.id = item.id
video.title = item.title
self.__items.append(video)
def current(self):
self.__rebuild()
if self.__length == 0:
return None
return self.__items[self.__current]
def next(self):
self.__rebuild()
self.__current += 1
if self.__current >= self.__length:
return None
return self.current()
def prev(self):
self.__rebuild()
self.__current -= 1
if self.__current <= 0:
return None
return self.current()
def clear(self):
self.__items = []
self.__current = 0
self.__session = None
@property
def y(self):
if self.__y is not None:
return self.__y
api_key = config.youtube['api_key']
api_service_name = config.youtube['api_service_name']
api_version = config.youtube['api_version']
self.__y = build(api_service_name, api_version,
developerKey=api_key)
return self.__y
def save_session(self):
if self.__session is not None:
self.__session.attributes['playlist'] = self.to_list()
self.__session.attributes['current'] = self.current_index
def search(self, query):
search_response = self.y.search().list(
q=query,
part="id,snippet",
maxResults=1,
type="video",
fields="items(id(videoId),snippet(title))"
).execute()
result_list = search_response.get("items", [])
self.__items.append(YoutubeVideoInformation(result_list[0]))
search_response = self.y.search().list(
relatedToVideoId=self.current().id,
part="id,snippet",
maxResults=9,
type="video",
fields="items(id(videoId),snippet(title))"
).execute()
result_list = search_response.get("items", [])
for item in result_list:
self.__items.append(YoutubeVideoInformation(item))
|
11458704
|
import tensorflow as tf
import numpy as np
import os
from keras import backend as K
from util import *
from constants import *
# Visualize using:
# http://projector.tensorflow.org/
def main():
models = build_or_load()
style_layer = models[0].get_layer('style')
print('Creating input')
style_in = tf.placeholder(tf.float32, shape=(NUM_STYLES, NUM_STYLES))
style_out = style_layer(style_in)
# All possible styles
all_styles = np.identity(NUM_STYLES)
with K.get_session() as sess:
embedding = sess.run(style_out, { style_in: all_styles })
print('Writing to out directory')
np.savetxt(os.path.join(OUT_DIR, 'style_embedding_vec.tsv'), embedding, delimiter='\t')
labels = [[g] * len(styles[i]) for i, g in enumerate(genre)]
# Flatten
labels = [y for x in labels for y in x]
# Retreive specific artists
styles_labels = [y for x in styles for y in x]
styles_labels = np.reshape(styles_labels, [-1, 1])
labels = np.reshape(labels, [-1, 1])
labels = np.hstack([labels, styles_labels])
# Add metadata header
header = ['Genre', 'Artist']
labels = np.vstack([header, labels])
np.savetxt(os.path.join(OUT_DIR, 'style_embedding_labels.tsv'), labels, delimiter='\t', fmt='%s')
if __name__ == '__main__':
main()
|
11458707
|
from django.core.exceptions import ValidationError
from django.forms import ModelForm
from django.forms.fields import ChoiceField, MultipleChoiceField
from django.forms.widgets import CheckboxSelectMultiple, Textarea
from export.forms import ExportImageStatsForm
from upload.forms import CsvFileField
from .models import CalcifyRateTable
from .utils import get_default_calcify_tables
def get_calcify_table_choices(source):
# Source's tables
choices = [
(table.pk, table.name)
for table in source.calcifyratetable_set.order_by('name')
]
# Default tables
default_tables = get_default_calcify_tables()
choices.extend([
(table.pk, table.name)
for table in default_tables
])
return choices
class ExportCalcifyStatsForm(ExportImageStatsForm):
rate_table_id = ChoiceField(
label="Label rates to use")
optional_columns = MultipleChoiceField(
widget=CheckboxSelectMultiple, required=False)
field_order = ['rate_table_id', 'optional_columns', 'label_display']
def __init__(self, source, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO: The best thing to do is to remove all possibility of
# initializing this form without a labelset, and then remove this
# conditional.
if source.labelset:
labelset_size = source.labelset.get_labels().count()
else:
labelset_size = 0
self.fields['rate_table_id'].choices = \
get_calcify_table_choices(source)
self.fields['optional_columns'].choices = (
('per_label_mean',
"Per-label contributions to mean rate"
f" (adds {labelset_size} columns)"),
('per_label_bounds',
"Per-label contributions to confidence bounds"
f" (adds {labelset_size * 2} columns)"),
)
class CalcifyRateTableForm(ModelForm):
csv_file = CsvFileField(label='CSV file')
class Meta:
model = CalcifyRateTable
fields = ['name', 'description']
widgets = {
'description': Textarea(),
}
def __init__(self, source, *args, **kwargs):
self.source = source
super().__init__(*args, **kwargs)
def clean_name(self):
"""
Check for uniqueness within the source. The ModelForm doesn't
validate this automatically because the source isn't a field in the
form.
"""
name = self.cleaned_data['name']
try:
CalcifyRateTable.objects.get(source=self.source, name=name)
raise ValidationError(
"This source already has a rate table with the same name.")
except CalcifyRateTable.DoesNotExist:
# This name isn't taken yet, so it's valid.
return name
|
11458744
|
from __future__ import print_function, absolute_import
from .reid_loaders import ReIDLoaders
from .incremental_reid_loaders import IncrementalReIDLoaders
from .customed_loaders import CustomedLoaders
from .transforms2 import RandomErasing
from .incremental_datasets import IncrementalReIDDataSet, Incremental_combine_train_samples, Incremental_combine_test_samples
|
11458746
|
import logging
from flask import request
from flask_restplus import Resource, inputs
from biolink.datamodel.serializers import association, association_results
from biolink.api.restplus import api
from ontobio.golr.golr_associations import get_association, search_associations
from biolink import USER_AGENT
log = logging.getLogger(__name__)
core_parser = api.parser()
core_parser.add_argument('rows', type=int, required=False, default=100, help='number of rows')
core_parser.add_argument('start', type=int, required=False, help='beginning row')
core_parser.add_argument('evidence', help='Object id, e.g. ECO:0000501 (for IEA; Includes inferred by default) or a specific publication or other supporting object, e.g. ZFIN:ZDB-PUB-060503-2')
core_parser.add_argument('unselect_evidence', type=inputs.boolean, default=False, help='If true, excludes evidence objects in response')
core_parser.add_argument('exclude_automatic_assertions', type=inputs.boolean, default=False, help='If true, excludes associations that involve IEAs (ECO:0000501)')
core_parser.add_argument('use_compact_associations', type=inputs.boolean, default=False, help='If true, returns results in compact associations format')
@api.doc(params={'subject': 'Return associations emanating from this node, e.g. NCBIGene:84570, ZFIN:ZDB-GENE-050417-357 (If ID is from an ontology then results would include inferred associations, by default)'})
class AssociationsFrom(Resource):
parser = core_parser.copy()
parser.add_argument('object_taxon', help='Object taxon ID, e.g. NCBITaxon:10090 (Includes inferred associations, by default)')
parser.add_argument('relation', help='Filter by relation CURIE, e.g. RO:0002200 (has_phenotype), RO:0002607 (is marker for), RO:HOM0000017 (orthologous to), etc.')
@api.expect(parser)
@api.marshal_list_with(association_results)
def get(self, subject):
"""
Returns list of matching associations starting from a given subject (source)
"""
args = self.parser.parse_args()
return search_associations(subject=subject, user_agent=USER_AGENT, **args)
@api.doc(params={'object': 'Return associations pointing to this node, e.g. specifying MP:0013765 will return all genes, variants, strains, etc. annotated with this term. Can also be a biological entity such as a gene'})
class AssociationsTo(Resource):
parser = core_parser.copy()
parser.add_argument('subject_taxon', help='Subject taxon ID, e.g. NCBITaxon:9606 (Includes inferred associations, by default)')
parser.add_argument('relation', help='Filter by relation CURIE, e.g. RO:0002200 (has_phenotype), RO:0002607 (is marker for), RO:HOM0000017 (orthologous to), etc.')
@api.expect(core_parser)
@api.marshal_list_with(association_results)
def get(self, object):
"""
Returns list of matching associations pointing to a given object (target)
"""
args = self.parser.parse_args()
return search_associations(object=object, user_agent=USER_AGENT, **args)
@api.doc(params={'subject': 'Return associations emanating from this node, e.g. MGI:1342287 (If ID is from an ontology then results would include inferred associations, by default)'})
@api.doc(params={'object': 'Return associations pointing to this node, e.g. MP:0013765. Can also be a biological entity such as a gene'})
class AssociationsBetween(Resource):
@api.expect(core_parser)
@api.marshal_list_with(association_results)
def get(self, subject, object):
"""
Returns associations connecting two entities
Given two entities (e.g. a particular gene and a particular disease), if these two entities
are connected (directly or indirectly), then return the association objects describing
the connection.
"""
args = core_parser.parse_args()
return search_associations(subject=subject, object=object, user_agent=USER_AGENT, **args)
@api.doc(params={'association_type': 'Association type, eg gene_phenotype'})
class AssociationBySubjectAndAssocType(Resource):
parser = core_parser.copy()
parser.add_argument('subject', help='Subject CURIE')
parser.add_argument('object', help='Object CURIE')
@api.expect(parser)
@api.marshal_list_with(association_results)
def get(self, association_type):
"""
Returns list of matching associations of a given type
"""
args = self.parser.parse_args()
return search_associations(
association_type=association_type,
sort="source_count desc",
user_agent=USER_AGENT,
**args
)
|
11458763
|
import copy
import functools
from .compat import build_opener, HTTPCookieProcessor, URLError, \
urlencode, CookieJar, HTTPError, BadStatusLine
from .utils import parse_content_type, NOT_A_PAGE_CONTENT_TYPES
import gzip
import zlib
import webvulnscan.log
from .page import Page
from .request import Request
class NotAPage(Exception):
""" The content at the URL in question is not a webpage, but something
static (image, text, etc.) """
class Client(object):
""" Client provides a easy interface for accessing web content. """
def __init__(self, log=webvulnscan.log):
self.cookie_jar = CookieJar()
self.opener = self.setup_opener()
self.additional_headers = {}
self.log = log
def setup_opener(self):
""" Builds the opener for the class. """
cookie_handler = HTTPCookieProcessor(self.cookie_jar)
opener = build_opener(cookie_handler)
return opener
def _download(self, request):
self.log('info', request.url, "request", "Trying to request")
try:
response = self.opener.open(request)
except HTTPError as error:
response = error
except URLError as error:
if hasattr(self.log, 'warn'):
self.log.warn(url, "unreachable")
raise URLError(request.url + ' is unreachable: {0}'.format(error))
except BadStatusLine as e:
self.log('warn', request.url, 'Bad status line sent')
return (request, 0, "", {})
status_code = response.code
headers = response.info()
if headers.get('Content-Encoding') == "gzip":
sim_file = gzip.GzipFile(fileobj=response)
response_data = sim_file.read()
elif headers.get('Content-Encoding') == "deflate":
response_data = zlib.decompress(response.read())
else:
response_data = response.read()
return (request, status_code, response_data, headers)
def download(self, url_or_request, parameters=None, headers=None):
"""
Downloads a URL, returns (request, status_code, response_data, headers)
"""
if isinstance(url_or_request, Request):
assert parameters is None
assert headers is None
request = url_or_request.copy()
else:
request = Request(url_or_request, parameters, headers)
for header, value in self.additional_headers.items():
request.add_header(header, value)
msg = ('Requesting with parameters %s' % (request.parameters,)
if request.parameters else
'Requesting')
self.log('info', request.url, 'client status', msg)
return self._download(request)
def download_page(self, url_or_request, parameters=None, req_headers=None):
""" Downloads the content of a site, returns it as page.
Throws NotAPage if the content is not a webpage.
"""
request, status_code, html_bytes, headers = self.download(
url_or_request, parameters, req_headers)
content_type, charset = parse_content_type(
headers.get('Content-Type'),
logfunc=functools.partial(self.log, 'warn', request.url))
if content_type in NOT_A_PAGE_CONTENT_TYPES:
raise NotAPage()
try:
html = html_bytes.decode(charset, 'strict')
except UnicodeDecodeError as ude:
self.log('warn', request.url, 'Incorrect encoding', str(ude))
html = html_bytes.decode(charset, 'replace')
return Page(self.log, request, html, headers, status_code)
|
11458788
|
import json
from core.redis import rds
from core.triage import Triage
from core.parser import ScanParser
class Rule:
def __init__(self):
self.rule = 'CFG_ESTR'
self.rule_severity = 4
self.rule_description = 'Thisr ule checks for NodeJS Server.js file exposures'
self.rule_confirm = 'Remote NodeJS Server is leaking server.js'
self.rule_details = ''
self.rule_mitigation = '''NodeJS has been configured to serve server.js which may allow attackers access to backend code.'''
self.rule_match_string = [
"require('http')",
"module.exports",
"server.listen",
"http-proxy-middleware"
]
self.intensity = 1
def check_rule(self, ip, port, values, conf):
t = Triage()
p = ScanParser(port, values)
module = p.get_module()
domain = p.get_domain()
if 'http' in module:
resp = t.http_request(ip, port, uri='/server.js', follow_redirects=False)
if resp is None:
return
for i in self.rule_match_string:
if i in resp.text:
self.rule_details = 'Identified a NodeJS Leakage at {} Indicator: {}'.format(resp.url, i)
rds.store_vuln({
'ip':ip,
'port':port,
'domain':domain,
'rule_id':self.rule,
'rule_sev':self.rule_severity,
'rule_desc':self.rule_description,
'rule_confirm':self.rule_confirm,
'rule_details':self.rule_details,
'rule_mitigation':self.rule_mitigation
})
return
|
11458794
|
import utils.logging_data as LOG
import cv2
from imutils import face_utils
import dlib
from keras.models import load_model
from scipy.spatial import distance as dist
import imutils
import os
import sys
import threading
import numpy as np
import re
import time
import datetime
'''
Dlib detection
This file contains a dlib detection implementation
Make sure 68 face landmark model is reachable!
'''
#Detection
# Class that handle detection in own thread
class Detection(threading.Thread):
pnet = None
rnet = None
onet = None
landmarks_model_path = '../../model/shape_predictor_68_face_landmarks.dat'
face_detector = None
landmarks_predictor = None
#face_cascade_path = 'model/haarcascade_frontalface_alt.xml'
# Flipp testing camera
flipp_test_nr = 1
flipp_test_degree = 90
do_flipp_test = False
flipp_test_long_intervall = 6
# Calculate time
start_time = None
end_time = None
# Thread sleep times
sleep_time = 0.1
LONG_SLEEP = 2
SHORT_SLEEP = 0.5
# Number of detection fails to start energy save
no_face_count = 0
NO_FACE_MAX = 4
Loaded_model = False
index = 0
# Initiate thread
# parameters name, and shared_variables reference
def __init__(self, name=None, shared_variables = None):
threading.Thread.__init__(self)
self.name = name
self.shared_variables = shared_variables
self.sleep_time = self.SHORT_SLEEP
self.index = int(name)
LOG.info("Create dlib detection" + str(self.index), "SYSTEM-"+self.shared_variables.name)
# Convert_dlib_box_to_OpenCV_box(box)
# @param takes in a dlib box
# @return returns a box for OpenCV
def convert_dlib_box_to_openCV_box(self, box):
return (int(box.left()), int(box.top()), int(box.right() - box.left()),
int(box.bottom() - box.top()) )
# Object_detection
# @ returns True if detections successful
# @ returns False if no face found
#
# This function uses dlib to make a face detection.
# Then transform the result to OpenCV
#
def object_detection(self, frame):
#image = imutils.resize(image, width=500)
# gray = cv2.cvtColor(self.shared_variables.frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale image
box_arr = self.face_detector(frame, 1)
# No face
if(not len(box_arr) ):
face_found = False
return face_found, None, None, None
# determine the facial landmarks for the face region
shape = self.landmarks_predictor(frame, box_arr[0])
landmarks = face_utils.shape_to_np(shape)
# convert box
face_box = self.convert_dlib_box_to_openCV_box(box_arr[0])
face_found = True
# Fix score later!
score = 100
#success, face_box, landmarks, score
return face_found, face_box, landmarks, score
#Run
#Detection function
def run(self):
if not self.Loaded_model:
LOG.info("Loading Dlib model" + str(self.index),"SYSTEM-"+self.shared_variables.name)
# Load model
self.face_detector = dlib.get_frontal_face_detector()
self.landmarks_predictor = dlib.shape_predictor(self.landmarks_model_path)
#face_cascade = cv2.CascadeClassifier(face_cascade_path)
self.Loaded_model = True
LOG.info("Start dlib detections" + str(self.index),"SYSTEM-"+self.shared_variables.name)
#wait for first cam frame
while self.shared_variables.frame[self.index] is None:
pass
# Start Loop
while self.shared_variables.system_running:
self.start_time = datetime.datetime.now()
frame = self.shared_variables.frame[self.index]
if self.do_flipp_test:
frame = imutils.rotate(frame, self.flipp_test_degree*self.flipp_test_nr)
# Do detection
success, face_box, landmarks, score = self.object_detection(frame)
# if found faces
if success:
self.shared_variables.detection_score[self.index] = score
self.no_face_count = 0
# Save landmark
#self.shared_variables.landmarks[self.index] = landmarks
self.shared_variables.set_landmarks(landmarks, self.index)
# Save boxes
self.shared_variables.face_box[self.index] = [face_box]
self.shared_variables.set_detection_box([face_box], self.index)
self.shared_variables.face_found[self.index] = True
# Do flipp test on detection
if self.shared_variables.flipp_test[self.index] and self.do_flipp_test:
# save flipp as success
degree = self.shared_variables.flipp_test_degree[self.index] + self.flipp_test_nr*self.flipp_test_degree
degree = degree - (degree % 360)*360
self.shared_variables.flipp_test_degree[self.index] = degree
# log frame change
LOG.info("Flipp test successful add degree :" + str(self.flipp_test_nr*self.flipp_test_degree),self.shared_variables.name)
# end flipp test
self.do_flipp_test = False
self.flipp_test_nr = 1
# Wake tracking thread
#if not self.shared_variables.tracking_running[self.index]:
# self.sleep_time = self.SHORT_SLEEP
else:
# No face
self.shared_variables.face_found[self.index] = False
# if max face misses has been done, do less detections
if self.no_face_count >= self.NO_FACE_MAX:
# do flipp test
if self.shared_variables.flipp_test[self.index]:
# doing flipp test
if self.do_flipp_test:
self.flipp_test_nr = self.flipp_test_nr + 1
# flipp test did not find anything
if self.flipp_test_nr*self.flipp_test_degree >= 360:
self.do_flipp_test = False
self.flipp_test_nr = 1
if self.sleep_time == self.SHORT_SLEEP:
#LOG.log("Initiate energy save",self.shared_variables.name)
#self.sleep_time = self.LONG_SLEEP
pass
else:
self.do_flipp_test = True
else:
#self.sleep_time = self.LONG_SLEEP
#self.shared_variables.tracking_running[self.index] = False
#LOG.log("Initiate energy save",self.shared_variables.name)
pass
else:
self.no_face_count = self.no_face_count + 1
if self.no_face_count >= self.flipp_test_long_intervall and self.shared_variables.flipp_test[self.index]:
self.no_face_count = 0
self.end_time = datetime.datetime.now()
# Debug detection time
if self.shared_variables.debug:
LOG.debug('Dlib Detection time:' + str(self.end_time - self.start_time),self.shared_variables.name)
time.sleep(self.sleep_time) # sleep if wanted
LOG.info("Ending dlib detection " + str(self.index), "SYSTEM-"+self.shared_variables.name )
|
11458803
|
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
process = cms.Process("OccupancyPlotsTest")
#prepare options
options = VarParsing.VarParsing("analysis")
options.register ('globalTag',
"DONOTEXIST",
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"GlobalTag")
options.register ('HLTprocess',
"HLT",
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"HLTProcess")
options.register ('triggerPath',
"HLT_*",
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"list of HLT paths")
options.parseArguments()
#
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
fileMode = cms.untracked.string("FULLMERGE")
)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cout.enable = cms.untracked.bool(True)
process.MessageLogger.cout.threshold = cms.untracked.string("WARNING")
process.MessageLogger.cout.default = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
)
process.MessageLogger.cout.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(10000)
)
process.MessageLogger.cerr.enable = cms.untracked.bool(True)
process.MessageLogger.cerr.threshold = cms.untracked.string("WARNING")
process.MessageLogger.cerr.default = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
)
process.MessageLogger.cerr.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(100000)
)
#------------------------------------------------------------------
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(options.inputFiles),
# skipBadFiles = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*")
)
# HLT Selection ------------------------------------------------------------
process.load("HLTrigger.HLTfilters.triggerResultsFilter_cfi")
process.triggerResultsFilter.triggerConditions = cms.vstring(options.triggerPath)
process.triggerResultsFilter.hltResults = cms.InputTag( "TriggerResults", "", options.HLTprocess )
process.triggerResultsFilter.l1tResults = cms.InputTag( "" )
process.triggerResultsFilter.throw = cms.bool(False)
process.seqHLTSelection = cms.Sequence(process.triggerResultsFilter)
#--------------------------------------
#from DPGAnalysis.SiStripTools.occupancyplotsselections_cff import *
from DPGAnalysis.SiStripTools.occupancyplotsselections_simplified_cff import *
process.ssclusmultprod = cms.EDProducer("SiStripClusterMultiplicityProducer",
clusterdigiCollection = cms.InputTag("siStripClusters"),
wantedSubDets = cms.VPSet()
)
process.ssclusmultprod.wantedSubDets.extend(OccupancyPlotsStripWantedSubDets)
process.ssclusoccuprod = cms.EDProducer("SiStripClusterMultiplicityProducer",
clusterdigiCollection = cms.InputTag("siStripClusters"),
withClusterSize = cms.untracked.bool(True),
wantedSubDets = cms.VPSet()
)
process.ssclusoccuprod.wantedSubDets.extend(OccupancyPlotsStripWantedSubDets)
process.spclusmultprod = cms.EDProducer("SiPixelClusterMultiplicityProducer",
clusterdigiCollection = cms.InputTag("siPixelClusters"),
wantedSubDets = cms.VPSet()
)
process.spclusmultprod.wantedSubDets.extend(OccupancyPlotsPixelWantedSubDets)
process.spclusmultprod.wantedSubDets.extend(cms.VPSet(
cms.PSet(detSelection = cms.uint32(0),detLabel = cms.string("Pixel")),
cms.PSet(detSelection = cms.uint32(1),detLabel = cms.string("BPIX")),
cms.PSet(detSelection = cms.uint32(2),detLabel = cms.string("FPIX")),
cms.PSet(detSelection=cms.uint32(11),detLabel=cms.string("BPIX_L1"),selection=cms.untracked.vstring("0x1e0f0000-0x12010000")),
cms.PSet(detSelection=cms.uint32(12),detLabel=cms.string("BPIX_L2"),selection=cms.untracked.vstring("0x1e0f0000-0x12020000")),
cms.PSet(detSelection=cms.uint32(13),detLabel=cms.string("BPIX_L3"),selection=cms.untracked.vstring("0x1e0f0000-0x12030000")),
cms.PSet(detSelection=cms.uint32(21),detLabel=cms.string("FPIX_m"),selection=cms.untracked.vstring("0x1f800000-0x14800000")),
cms.PSet(detSelection=cms.uint32(22),detLabel=cms.string("FPIX_p"),selection=cms.untracked.vstring("0x1f800000-0x15000000")),
cms.PSet(detSelection=cms.uint32(99),detLabel=cms.string("Lumi"),selection=cms.untracked.vstring("0x1e0f0000-0x12020000",
"0x1e0f0000-0x12030000",
"0x1f800000-0x14800000",
"0x1f800000-0x15000000"))
))
process.spclusoccuprod = cms.EDProducer("SiPixelClusterMultiplicityProducer",
clusterdigiCollection = cms.InputTag("siPixelClusters"),
withClusterSize = cms.untracked.bool(True),
wantedSubDets = cms.VPSet()
)
process.spclusoccuprod.wantedSubDets.extend(OccupancyPlotsPixelWantedSubDets)
process.seqMultProd = cms.Sequence(process.ssclusmultprod + process.ssclusoccuprod +
process.spclusmultprod + process.spclusoccuprod)
process.load("DPGAnalysis.SiStripTools.occupancyplots_cfi")
process.occupancyplots.wantedSubDets = OccupancyPlotsStripWantedSubDets
process.pixeloccupancyplots = process.occupancyplots.clone()
process.pixeloccupancyplots.wantedSubDets = cms.VPSet()
process.pixeloccupancyplots.wantedSubDets.extend(OccupancyPlotsPixelWantedSubDets)
process.pixeloccupancyplots.multiplicityMaps = cms.VInputTag(cms.InputTag("spclusmultprod"))
process.pixeloccupancyplots.occupancyMaps = cms.VInputTag(cms.InputTag("spclusoccuprod"))
process.alloccupancyplots = process.occupancyplots.clone()
process.alloccupancyplots.wantedSubDets = cms.VPSet()
process.alloccupancyplots.wantedSubDets.extend(OccupancyPlotsPixelWantedSubDets)
process.alloccupancyplots.wantedSubDets.extend(OccupancyPlotsStripWantedSubDets)
process.alloccupancyplots.multiplicityMaps = cms.VInputTag(cms.InputTag("spclusmultprod"),cms.InputTag("ssclusmultprod"))
process.alloccupancyplots.occupancyMaps = cms.VInputTag(cms.InputTag("spclusoccuprod"),cms.InputTag("ssclusoccuprod"))
process.load("DPGAnalysis.SiStripTools.spclusmultvtxposcorr_cfi")
process.spclusmultvtxposcorr.multiplicityMap = cms.InputTag("spclusmultprod")
process.spclusmultvtxposcorr.digiVtxPosCorrConfig.wantedSubDets = cms.untracked.VPSet(
cms.PSet(detSelection = cms.uint32(0),detLabel = cms.string("Pixel"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(1),detLabel = cms.string("BPIX"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(11),detLabel = cms.string("BPIX_L1"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(12),detLabel = cms.string("BPIX_L2"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(13),detLabel = cms.string("BPIX_L3"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(2),detLabel = cms.string("FPIX"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(21),detLabel = cms.string("FPIX_m"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(22),detLabel = cms.string("FPIX_p"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(99),detLabel = cms.string("Lumi"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(111),detLabel = cms.string("BPIX_L1_mod_1"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(112),detLabel = cms.string("BPIX_L1_mod_2"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(113),detLabel = cms.string("BPIX_L1_mod_3"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(114),detLabel = cms.string("BPIX_L1_mod_4"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(115),detLabel = cms.string("BPIX_L1_mod_5"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(116),detLabel = cms.string("BPIX_L1_mod_6"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(117),detLabel = cms.string("BPIX_L1_mod_7"), binMax = cms.int32(200000)),
cms.PSet(detSelection = cms.uint32(118),detLabel = cms.string("BPIX_L1_mod_8"), binMax = cms.int32(200000))
)
process.load("TrackingPFG.Utilities.bxlumianalyzer_cfi")
process.load("Validation.RecoVertex.mcverticesanalyzer_cfi")
process.goodVertices = cms.EDFilter("VertexSelector",
src = cms.InputTag("offlinePrimaryVertices"),
cut = cms.string("!isFake && ndof > 4 && abs(z) <= 24 && position.Rho <= 2"),
filter = cms.bool(False), # otherwise it won't filter the events, just produce an empty vertex collection.
)
process.load("Validation.RecoVertex.anotherprimaryvertexanalyzer_cfi")
process.primaryvertexanalyzer.pvCollection=cms.InputTag("goodVertices")
process.primaryvertexanalyzer.vHistogramMakerPSet.runHisto=cms.untracked.bool(False)
process.primaryvertexanalyzer.vHistogramMakerPSet.runHistoProfile=cms.untracked.bool(False)
process.primaryvertexanalyzer.vHistogramMakerPSet.runHistoBXProfile=cms.untracked.bool(False)
process.seqAnalyzers = cms.Sequence(process.bxlumianalyzer + process.goodVertices + process.primaryvertexanalyzer +
process.occupancyplots + process.pixeloccupancyplots + process.alloccupancyplots +
process.spclusmultvtxposcorr + process.mcverticesanalyzer )
#-------------------------------------------------------------------------------------------
process.seqProducers = cms.Sequence(process.seqMultProd)
process.p0 = cms.Path(
process.seqHLTSelection +
process.seqProducers +
process.seqAnalyzers
)
#----GlobalTag ------------------------
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, options.globalTag, '')
process.siStripQualityESProducer.ListOfRecordToMerge=cms.VPSet(
# cms.PSet( record = cms.string("SiStripDetVOffRcd"), tag = cms.string("") ),
cms.PSet( record = cms.string("SiStripDetCablingRcd"), tag = cms.string("") ),
cms.PSet( record = cms.string("RunInfoRcd"), tag = cms.string("") ),
cms.PSet( record = cms.string("SiStripBadChannelRcd"), tag = cms.string("") ),
cms.PSet( record = cms.string("SiStripBadFiberRcd"), tag = cms.string("") ),
cms.PSet( record = cms.string("SiStripBadModuleRcd"), tag = cms.string("") )
)
process.TFileService = cms.Service('TFileService',
# fileName = cms.string('OccupancyPlotsTest_newschema.root')
fileName = cms.string('OccupancyPlotsTest_vtxpos.root')
)
#print process.dumpPython()
|
11458821
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Phase2C11I13_cff import Phase2C11I13
from Configuration.Eras.Modifier_phase2_brickedPixels_cff import phase2_brickedPixels
from Configuration.Eras.Modifier_phase2_GE0_cff import phase2_GE0
Phase2C11I13T27M9 = cms.ModifierChain(Phase2C11I13, phase2_brickedPixels, phase2_GE0)
|
11458823
|
from .asset import Asset
from .collection import Collection
from .item import Item
collection_store = {}
item_store = {}
asset_store = {}
def get_collection(title: str) -> Collection:
if title not in collection_store:
collection = Collection(title)
collection_store[title] = collection
return collection_store[title]
def get_asset(source_path: str) -> Asset:
if source_path not in asset_store:
asset = Asset(source_path)
asset_store[source_path] = asset
return asset_store[source_path]
def get_item(item_id: str) -> Item:
if item_id not in item_store:
item = Item(item_id)
item_store[item_id] = item
return item_store[item_id]
|
11458825
|
from django.forms.utils import flatatt
from django.utils.html import format_html
from django_icons.css import merge_css_list, merge_css_text
class IconRenderer(object):
"""Render an icon as an HTML element."""
tag = "i"
format_string = "<{tag}{attrs}>{content}</{tag}>"
def __init__(self, name, **kwargs):
"""Set name and kwargs."""
super(IconRenderer, self).__init__()
self.name = name
self.content = ""
self.kwargs = kwargs
def get_tag(self):
"""Return default tag for HTML builder."""
return self.tag
def get_class(self):
"""Return primary CSS class for this icon."""
return self.name
def get_extra_classes(self):
"""Return list of other classes for this icon."""
return merge_css_list(self.kwargs.get("extra_classes", None))
def get_css_classes(self):
"""Return list of all CSS classes for this icon."""
return merge_css_list(self.get_class(), self.get_extra_classes())
def get_attrs(self):
"""Return HTML attributes for this icon."""
attrs = {}
title = self.kwargs.get("title")
if title:
attrs["title"] = title
css_classes = merge_css_text(self.get_css_classes())
if css_classes:
attrs["class"] = css_classes
return attrs
def get_content(self):
"""Return content for the HTML element."""
return self.content or ""
def get_format_string(self):
"""Return format string for HTML output."""
return self.format_string
def get_format_context(self):
"""Return context for HTML output."""
return {
"tag": self.get_tag(),
"attrs": flatatt(self.get_attrs()),
"content": self.get_content(),
}
def render(self):
"""Return HTML output for icon."""
return format_html(self.get_format_string(), **self.get_format_context())
|
11458831
|
from datetime import datetime, timedelta
from urllib.parse import urlencode
import colander
import kinto.core
from cornice.validators import colander_validator
from kinto.authorization import RouteFactory
from kinto.core import resource
from kinto.core import utils as core_utils
from kinto.core.storage import Filter, Sort
from kinto.core.storage import exceptions as storage_exceptions
from kinto.core.storage.memory import extract_object_set
from kinto.core.utils import COMPARISON, instance_uri
from pyramid import httpexceptions
from pyramid.security import IAuthorizationPolicy
from zope.interface import implementer
from . import (
CHANGES_COLLECTION,
CHANGES_COLLECTION_PATH,
CHANGES_RECORDS_PATH,
CHANGESET_PATH,
MONITOR_BUCKET,
)
from .utils import changes_object, monitored_collections
class ChangesModel(object):
id_field = "id"
modified_field = "last_modified"
deleted_field = "deleted"
permissions_field = "__permissions__"
def __init__(self, request):
self.request = request
self.storage = request.registry.storage
self.__entries = None
def timestamp(self):
if not self._entries():
return core_utils.msec_time()
max_value = max([e["last_modified"] for e in self._entries()])
return max_value
def get_objects(
self,
filters=None,
sorting=None,
pagination_rules=None,
limit=None,
include_deleted=False,
parent_id=None,
):
objs, _ = extract_object_set(
objects=self._entries(),
filters=filters,
sorting=sorting,
pagination_rules=pagination_rules,
limit=limit,
)
return objs
def _entries(self):
if self.__entries is None:
self.__entries = {}
for (bucket_id, collection_id) in monitored_collections(
self.request.registry
):
collection_uri = core_utils.instance_uri(
self.request, "collection", bucket_id=bucket_id, id=collection_id
)
timestamp = self.storage.resource_timestamp(
parent_id=collection_uri, resource_name="record"
)
entry = changes_object(
self.request, bucket_id, collection_id, timestamp
)
self.__entries[entry[self.id_field]] = entry
return self.__entries.values()
class ChangesSchema(resource.ResourceSchema):
host = colander.SchemaNode(colander.String())
bucket = colander.SchemaNode(colander.String())
collection = colander.SchemaNode(colander.String())
class Options:
preserve_unknown = False
@implementer(IAuthorizationPolicy)
class AnonymousRoute(RouteFactory):
def check_permission(self, principals, bound_perms):
# Bypass permissions check on /buckets/monitor.
return True
@resource.register(
name="changes",
description="List of changes",
plural_path=CHANGES_RECORDS_PATH,
object_path=None,
plural_methods=("GET",),
factory=AnonymousRoute,
)
class Changes(resource.Resource):
schema = ChangesSchema
def __init__(self, request, context=None):
# Bypass call to storage if _since is too old.
_handle_old_since_redirect(request)
# Inject custom model.
self.model = ChangesModel(request)
super(Changes, self).__init__(request, context)
def plural_get(self):
result = super().plural_get()
_handle_cache_expires(self.request, MONITOR_BUCKET, CHANGES_COLLECTION)
return result
def _handle_cache_expires(request, bid, cid):
# If the client sends cache busting query parameters, then we can cache more
# aggressively.
settings = request.registry.settings
prefix = f"{bid}.{cid}.record_cache"
default_expires = settings.get(f"{prefix}_expires_seconds")
maximum_expires = settings.get(f"{prefix}_maximum_expires_seconds", default_expires)
has_cache_busting = "_expected" in request.GET
cache_expires = maximum_expires if has_cache_busting else default_expires
if cache_expires is not None:
request.response.cache_expires(seconds=int(cache_expires))
def _handle_old_since_redirect(request):
"""
In order to limit the number of possible combinations
of `_since` and `_expected` querystring parameters,
and thus maximize the effect of caching, we redirect the clients
that arrive here with a very old `_since` value.
This simply means that these clients will have to iterate
and compare the local timestamps of the whole list of changes
instead of a filtered subset.
https://searchfox.org/mozilla-central/rev/b58ca450/services/settings/remote-settings.js#299
See https://bugzilla.mozilla.org/show_bug.cgi?id=1529685
and https://bugzilla.mozilla.org/show_bug.cgi?id=1665319#c2
"""
try:
# request.validated is not populated yet (resource was not instantiated yet,
# we want to bypass storage).
qs_since_str = request.GET.get("_since", "")
qs_since = int(qs_since_str.strip('"'))
except ValueError:
# Will fail later during resource querystring validation.
return
settings = request.registry.settings
max_age_since = int(settings.get("changes.since_max_age_days", 21))
if max_age_since < 0:
# Redirect is disabled.
return
min_since_dt = datetime.now() - timedelta(days=max_age_since)
min_since = min_since_dt.timestamp() * 1000
if qs_since >= min_since:
# Since value is recent. No redirect.
return
http_scheme = settings.get("http_scheme") or "https"
http_host = settings.get(
"changes.http_host", request.registry.settings.get("http_host")
)
host_uri = f"{http_scheme}://{http_host}"
redirect = host_uri + request.matched_route.generate(request.matchdict)
queryparams = request.GET.copy()
del queryparams["_since"]
if queryparams:
redirect += "?" + urlencode(queryparams)
# Serve a redirection, with optional cache control headers.
response = httpexceptions.HTTPTemporaryRedirect(redirect)
cache_seconds = int(
settings.get("changes.since_max_age_redirect_ttl_seconds", 86400)
)
if cache_seconds >= 0:
response.cache_expires(cache_seconds)
raise response
@implementer(IAuthorizationPolicy)
class ChangeSetRoute(RouteFactory):
"""The changeset endpoint should have the same permissions as the collection
metadata.
The permission to read records is implicit when metadata are readable.
"""
def __init__(self, request):
super().__init__(request)
bid = request.matchdict["bid"]
cid = request.matchdict["cid"]
collection_uri = instance_uri(request, "collection", bucket_id=bid, id=cid)
# This route context will be the same as when reaching the collection URI.
self.permission_object_id = collection_uri
self.required_permission = "read"
def check_permission(self, principals, bound_perms):
# The monitor/changes changeset endpoint is publicly accesible.
if self.permission_object_id == CHANGES_COLLECTION_PATH:
return True
# Otherwise rely on the collection permissions.
return super().check_permission(principals, bound_perms)
changeset = kinto.core.Service(
name="collection-changeset", path=CHANGESET_PATH, factory=ChangeSetRoute
)
class QuotedTimestamp(colander.SchemaNode):
"""Integer between "" used in _since querystring."""
schema_type = colander.String
error_message = "The value should be integer between double quotes."
validator = colander.Regex('^"([0-9]+?)"$', msg=error_message)
def deserialize(self, cstruct=colander.null):
param = super(QuotedTimestamp, self).deserialize(cstruct)
if param is colander.drop:
return param
return int(param.strip('"'))
class ChangeSetQuerystring(colander.MappingSchema):
_since = QuotedTimestamp(missing=colander.drop)
_expected = colander.SchemaNode(colander.String())
_limit = colander.SchemaNode(colander.Integer(), missing=colander.drop)
# Query parameters used on monitor/changes endpoint.
bucket = colander.SchemaNode(colander.String(), missing=colander.drop)
collection = colander.SchemaNode(colander.String(), missing=colander.drop)
class ChangeSetSchema(colander.MappingSchema):
querystring = ChangeSetQuerystring()
@changeset.get(
schema=ChangeSetSchema(), permission="read", validators=(colander_validator,)
)
def get_changeset(request):
bid = request.matchdict["bid"]
cid = request.matchdict["cid"]
storage = request.registry.storage
queryparams = request.validated["querystring"]
limit = queryparams.get("_limit")
filters = []
include_deleted = False
if "_since" in queryparams:
filters = [Filter("last_modified", queryparams["_since"], COMPARISON.GT)]
# Include tombstones when querying with _since
include_deleted = True
if (bid, cid) == (MONITOR_BUCKET, CHANGES_COLLECTION):
# Redirect old since, on monitor/changes only.
_handle_old_since_redirect(request)
if "bucket" in queryparams:
filters.append(Filter("bucket", queryparams["bucket"], COMPARISON.EQ))
if "collection" in queryparams:
filters.append(
Filter("collection", queryparams["collection"], COMPARISON.EQ)
)
model = ChangesModel(request)
metadata = {}
timestamp = model.timestamp()
changes = model.get_objects(
filters=filters, limit=limit, include_deleted=include_deleted
)
else:
bucket_uri = instance_uri(request, "bucket", id=bid)
collection_uri = instance_uri(request, "collection", bucket_id=bid, id=cid)
try:
# We'll make sure that data isn't changed while we read metadata, changes,
# etc.
before = storage.resource_timestamp(
resource_name="record", parent_id=collection_uri
)
# Fetch collection metadata.
metadata = storage.get(
resource_name="collection", parent_id=bucket_uri, object_id=cid
)
except storage_exceptions.ObjectNotFoundError:
raise httpexceptions.HTTPNotFound()
except storage_exceptions.BackendError as e:
# The call to `resource_timestamp()` on an empty collection will try
# initialize it. If the instance is read-only, it fails with a backend
# error. Raise 404 in this case otherwise raise the original backend error.
if "when running in readonly" in str(e):
raise httpexceptions.HTTPNotFound()
raise
# Fetch list of changes.
changes = storage.list_all(
resource_name="record",
parent_id=collection_uri,
filters=filters,
limit=limit,
id_field="id",
modified_field="last_modified",
deleted_field="deleted",
sorting=[Sort("last_modified", -1)],
include_deleted=include_deleted,
)
# Fetch current collection timestamp.
timestamp = storage.resource_timestamp(
resource_name="record", parent_id=collection_uri
)
# Do not serve inconsistent data.
if before != timestamp: # pragma: no cover
raise storage_exceptions.IntegrityError(message="Inconsistent data. Retry.")
# Cache control.
_handle_cache_expires(request, bid, cid)
data = {
"metadata": metadata,
"timestamp": timestamp,
"changes": changes,
}
return data
|
11458839
|
import brownie
from brownie import ZERO_ADDRESS, Contract, Settler, accounts, chain
from brownie.test import strategy
from brownie_tokens import MintableForkToken
TOKENS = [
(
"0x57ab1ec28d129707052df4df418d58a2d46d5f51", # sUSD
"0xdac17f958d2ee523a2206206994597c13d831ec7", # DAI
"0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48", # USDC
"0x6b175474e89094c44da98b954eedeac495271d0f", # USDT
),
(
"0xfe18be6b3bd88a2d2a7f928d00292e7a9963cfc6", # sBTC
"0xeb4c2781e4eba804ce9a9803c67d0893436bb27d", # renBTC
"0x2260fac5e5542a773aa44fbcfedf7c193bc2c599", # wBTC
),
(
"0xD71eCFF9342A5Ced620049e616c5035F1dB98620", # sEUR
"0xdB25f211AB05b1c97D595516F45794528a807ad8", # EURS
),
]
class StateMachine:
st_acct = strategy("address", length=5)
st_acct2 = strategy("address", length=5)
st_token = strategy("uint", max_value=2)
st_synth = strategy("uint", max_value=2)
st_idx = strategy("decimal", min_value=0, max_value="0.99", places=2)
st_amount = strategy("decimal", min_value=1, max_value=10, places=3)
def __init__(cls, swap):
cls.swap = swap
def setup(self):
self.settlers = [i["addr"] for i in self.swap.tx.events["NewSettler"]]
self.used_token_ids = []
self.active_token_ids = {}
# "Marty - you gotta come back with me!"
# we're doing this because SNX oracle rates expire in 25 hours
# it's weird and hacky but it works ¯\_(ツ)_/¯
chain.mine(timestamp=1600000000)
def _mint(self, acct, token, amount):
token = MintableForkToken(token)
amount = int(amount * 10 ** token.decimals())
if not token.allowance(acct, self.swap):
token.approve(self.swap, 2 ** 256 - 1, {"from": acct})
balance = token.balanceOf(acct)
if balance < amount:
token._mint_for_testing(acct, amount - balance)
return amount
def _all_token_ids(self):
return (
[x for v in self.active_token_ids.values() for x in v]
+ self.used_token_ids
+ [int(i, 16) for i in self.settlers]
)
def rule_swap_into(self, st_acct, st_token, st_synth, st_idx, st_amount):
"""
Generate a new NFT via a cross-asset swap.
"""
idx = int(st_idx * len(TOKENS[st_token]))
initial = TOKENS[st_token][idx]
synth = TOKENS[st_synth][0]
amount = self._mint(st_acct, initial, st_amount)
if st_token == st_synth:
# initial token and target synth come from the same asset class
# no cross-asset swap is possible
with brownie.reverts():
self.swap.swap_into_synth(initial, synth, amount, 0, {"from": st_acct})
else:
tx = self.swap.swap_into_synth(initial, synth, amount, 0, {"from": st_acct})
token_id = tx.events["Transfer"][-1]["token_id"]
assert token_id != 0
if "NewSettler" in tx.events:
settler = tx.events["NewSettler"]["addr"]
assert settler not in self.settlers
self.settlers.append(settler)
# make sure `token_id` isn't previously assigned
assert (
token_id
not in list(self.active_token_ids.values()) + self.used_token_ids
)
self.active_token_ids.setdefault(st_acct, []).append(token_id)
chain.mine(timedelta=600)
def rule_swap_into_existing(self, st_acct, st_token, st_amount, st_idx):
"""
Increase the underyling balance of an existing NFT via a cross-asset swap.
"""
if self.active_token_ids.get(st_acct):
idx = int(st_idx * len(self.active_token_ids[st_acct]))
token_id = self.active_token_ids[st_acct][idx]
else:
token_ids = self._all_token_ids()
idx = int(st_idx * len(token_ids))
token_id = token_ids[idx]
synth = Settler.at(hex(token_id % 2 ** 160)).synth()
idx = int(st_idx * len(TOKENS[st_token]))
initial = TOKENS[st_token][idx]
amount = self._mint(st_acct, initial, st_amount)
if self.active_token_ids.get(st_acct) and TOKENS[st_token][0] != synth:
self.swap.swap_into_synth(
initial, synth, amount, 0, st_acct, token_id, {"from": st_acct}
)
chain.mine(timedelta=600)
else:
with brownie.reverts():
self.swap.swap_into_synth(
initial, synth, amount, 0, st_acct, token_id, {"from": st_acct}
)
def rule_transfer(self, st_acct, st_acct2, st_idx):
"""
Transfer ownership of an NFT.
"""
if self.active_token_ids.get(st_acct):
# choose from the caller's valid NFT token IDs, if there are any
idx = int(st_idx * len(self.active_token_ids[st_acct]))
token_id = self.active_token_ids[st_acct][idx]
self.swap.transferFrom(st_acct, st_acct2, token_id, {"from": st_acct})
self.active_token_ids[st_acct].remove(token_id)
self.active_token_ids.setdefault(st_acct2, []).append(token_id)
else:
# if the caller does not own any NFTs, choose from any token ID
token_ids = self._all_token_ids()
idx = int(st_idx * len(token_ids))
token_id = token_ids[idx]
with brownie.reverts():
self.swap.transferFrom(st_acct, st_acct2, token_id, {"from": st_acct})
def rule_withdraw(self, st_acct, st_amount, st_idx):
"""
Withdraw a synth from an NFT.
"""
if self.active_token_ids.get(st_acct):
# choose from the caller's valid NFT token IDs, if there are any
idx = int(st_idx * len(self.active_token_ids[st_acct]))
token_id = self.active_token_ids[st_acct][idx]
else:
# if the caller does not own any NFTs, choose from any token ID
token_ids = self._all_token_ids()
idx = int(st_idx * len(token_ids))
token_id = token_ids[idx]
amount = int(st_amount * 10 ** 18)
if self.active_token_ids.get(st_acct):
# when the action is possible, don't exceed the max underlying balance
balance = self.swap.token_info(token_id)["underlying_balance"]
amount = min(amount, balance)
if self.active_token_ids.get(st_acct):
self.swap.withdraw(token_id, amount, {"from": st_acct})
if balance == amount:
self.active_token_ids[st_acct].remove(token_id)
self.used_token_ids.append(token_id)
else:
with brownie.reverts():
self.swap.withdraw(token_id, amount, {"from": st_acct})
def rule_swap_from(self, st_acct, st_token, st_amount, st_idx):
"""
Swap a synth out of an NFT.
"""
if self.active_token_ids.get(st_acct):
# choose from the caller's valid NFT token IDs, if there are any
idx = int(st_idx * len(self.active_token_ids[st_acct]))
token_id = self.active_token_ids[st_acct][idx]
else:
# if the caller does not own any NFTs, choose from any token ID
token_ids = self._all_token_ids()
idx = int(st_idx * len(token_ids))
token_id = token_ids[idx]
# choose a target coin for the swap
synth = Settler.at(hex(token_id % 2 ** 160)).synth()
if synth == ZERO_ADDRESS:
# if the token ID is not active, choose from any possible token - all should fail
token_list = [x for v in TOKENS for x in v]
else:
# if the token ID is active, choose from the list of possible targets
token_list = next(i for i in TOKENS if i[0] == synth)
idx = int(st_idx * len(token_list))
target = token_list[idx]
amount = int(st_amount * 10 ** 18)
if self.active_token_ids.get(st_acct):
# when the action is possible, don't exceed the max underlying balance
balance = self.swap.token_info(token_id)["underlying_balance"]
amount = min(amount, balance)
if self.active_token_ids.get(st_acct) and synth != target:
# sender own the NFT, target is not the same as the underlying synth
self.swap.swap_from_synth(token_id, target, amount, 0, {"from": st_acct})
if balance == amount:
self.active_token_ids[st_acct].remove(token_id)
self.used_token_ids.append(token_id)
else:
with brownie.reverts():
self.swap.swap_from_synth(
token_id, target, amount, 0, {"from": st_acct}
)
def teardown(self):
"""
Verify balances and ownership of active and burned NFTs.
"""
for acct, token_id in [
(k, x) for k, v in self.active_token_ids.items() for x in v
]:
token_info = self.swap.token_info(token_id)
synth = Contract(token_info["synth"])
settler = hex(token_id % 2 ** 160)
assert self.swap.ownerOf(token_id) == acct == token_info["owner"]
assert synth.balanceOf(settler) == token_info["underlying_balance"]
assert len(self.used_token_ids) == len(set(self.used_token_ids))
for token_id in self.used_token_ids:
with brownie.reverts():
self.swap.ownerOf(token_id)
for acct in accounts[:5]:
assert self.swap.balanceOf(acct) == len(self.active_token_ids.get(acct, []))
def test_stateful(state_machine, swap, add_synths):
state_machine(StateMachine, swap, settings={"stateful_step_count": 30})
|
11458847
|
class Solution:
def luckyNumbers (self, matrix: List[List[int]]) -> List[int]:
if len(matrix) == 0 or len(matrix[0]) == 0:
return 0
l = collections.defaultdict(int)
n, m = len(matrix), len(matrix[0])
def findMax(j):
if j in l:
return l[j]
maxn = matrix[0][j]
for i in range(1,n):
if matrix[i][j] > maxn:
maxn = matrix[i][j]
l[j] = maxn
return maxn
res = []
for r in matrix:
minn = min(r)
ind = r.index(minn)
if findMax(ind) == minn:
res.append(minn)
return res
“”“
We can create a set of the minimum row values using set comprehension.
To create a set of the maximum column values we can use the unpacking operator (*) on the matrix
and use zip to iterate over the unpacked rows in parallel.
We can then get a set of our lucky numbers by intersecting those two sets using the intersection operator (&).
Then all that is left to do is to convert the set back into a list.
”“”
class Solution:
def luckyNumbers(self, matrix: List[List[int]]) -> List[int]:
return list({min(row) for row in matrix} & {max(col) for col in zip(*matrix)})
|
11458902
|
import pytest
import boost_histogram as bh
@pytest.fixture(params=(False, True), ids=("no_growth", "growth"))
def growth(request):
return request.param
@pytest.fixture(params=(False, True), ids=("no_overflow", "overflow"))
def overflow(request):
return request.param
@pytest.fixture(params=(False, True), ids=("no_underflow", "underflow"))
def underflow(request):
return request.param
@pytest.fixture(params=(False, True), ids=("no_flow", "flow"))
def flow(request):
return request.param
@pytest.fixture(
params=(None, "str", 1, {"a": 1}),
ids=("no_metadata", "str_metadata", "int_metadata", "dict_metadata"),
)
def metadata(request):
return request.param
@pytest.fixture(
params=(
bh.storage.Double,
bh.storage.Int64,
bh.storage.AtomicInt64,
bh.storage.Weight,
bh.storage.Unlimited,
),
ids=("Double", "Int64", "AtomicInt64", "Weight", "Unlimited"),
)
def count_storage(request):
return request.param
@pytest.fixture(
params=(
bh.storage.Double,
bh.storage.Int64,
bh.storage.AtomicInt64,
bh.storage.Unlimited,
),
ids=("Double", "Int64", "AtomicInt64", "Unlimited"),
)
def count_single_storage(request):
return request.param
|
11458909
|
import collections
import logging
from django.db import models
from django.db.migrations.topological_sort import stable_topological_sort
from django.utils.lru_cache import lru_cache
import requests
from . import SupportedServices
logger = logging.getLogger(__name__)
Coordinates = collections.namedtuple('Coordinates', ('latitude', 'longitude'))
FieldInfo = collections.namedtuple('FieldInfo', 'fields fields_required extra_fields_required')
class GeoIpException(Exception):
pass
def get_coordinates_by_ip(ip_address):
url = 'http://freegeoip.net/json/{}'.format(ip_address)
try:
response = requests.get(url)
except requests.exceptions.RequestException as e:
raise GeoIpException("Request to geoip API %s failed: %s" % (url, e))
if response.ok:
data = response.json()
return Coordinates(latitude=data['latitude'],
longitude=data['longitude'])
else:
params = (url, response.status_code, response.text)
raise GeoIpException("Request to geoip API %s failed: %s %s" % params)
@lru_cache(maxsize=1)
def get_sorted_dependencies(service_model):
"""
Returns list of application models in topological order.
It is used in order to correctly delete dependent resources.
"""
app_models = list(service_model._meta.app_config.get_models())
dependencies = {model: set() for model in app_models}
relations = (
relation
for model in app_models
for relation in model._meta.related_objects
if relation.on_delete in (models.PROTECT, models.CASCADE)
)
for rel in relations:
dependencies[rel.model].add(rel.related_model)
return stable_topological_sort(app_models, dependencies)
def sort_dependencies(service_model, resources):
ordering = get_sorted_dependencies(service_model)
resources.sort(key=lambda resource: ordering.index(resource._meta.model))
return resources
@lru_cache(maxsize=1)
def get_all_services_field_info():
services_fields = dict()
services_fields_required = dict()
services_extra_fields_required = dict()
service_models = SupportedServices.get_service_models()
for service_name in service_models:
service_model = service_models[service_name]['service']
service_serializer = SupportedServices.get_service_serializer(service_model)
fields = service_serializer.SERVICE_ACCOUNT_FIELDS.keys() \
if service_serializer.SERVICE_ACCOUNT_FIELDS is not NotImplemented else []
fields_extra = service_serializer.SERVICE_ACCOUNT_EXTRA_FIELDS.keys() \
if service_serializer.SERVICE_ACCOUNT_EXTRA_FIELDS is not NotImplemented else []
fields_required = service_serializer.Meta.required_fields \
if hasattr(service_serializer.Meta, 'required_fields') else []
services_fields[service_name] = list(fields)
services_fields_required[service_name] = list(set(fields) & set(fields_required))
services_extra_fields_required[service_name] = list(set(fields_extra) & set(fields_required))
return FieldInfo(fields=services_fields,
fields_required=services_fields_required,
extra_fields_required=services_extra_fields_required)
def update_pulled_fields(instance, imported_instance, fields):
"""
Update instance fields based on imported from backend data.
Save changes to DB only one or more fields were changed.
"""
modified = False
for field in fields:
pulled_value = getattr(imported_instance, field)
current_value = getattr(instance, field)
if current_value != pulled_value:
setattr(instance, field, pulled_value)
logger.info("%s's with PK %s %s field updated from value '%s' to value '%s'",
instance.__class__.__name__, instance.pk, field, current_value, pulled_value)
modified = True
error_message = getattr(imported_instance, 'error_message', '') or getattr(instance, 'error_message', '')
if error_message and instance.error_message != error_message:
instance.error_message = imported_instance.error_message
modified = True
if modified:
instance.save()
def handle_resource_not_found(resource):
"""
Set resource state to ERRED and append/create "not found" error message.
"""
resource.set_erred()
resource.runtime_state = ''
message = 'Does not exist at backend.'
if message not in resource.error_message:
if not resource.error_message:
resource.error_message = message
else:
resource.error_message += ' (%s)' % message
resource.save()
logger.warning('%s %s (PK: %s) does not exist at backend.' % (
resource.__class__.__name__, resource, resource.pk))
def handle_resource_update_success(resource):
"""
Recover resource if its state is ERRED and clear error message.
"""
update_fields = []
if resource.state == resource.States.ERRED:
resource.recover()
update_fields.append('state')
if resource.state in (resource.States.UPDATING, resource.States.CREATING):
resource.set_ok()
update_fields.append('state')
if resource.error_message:
resource.error_message = ''
update_fields.append('error_message')
if update_fields:
resource.save(update_fields=update_fields)
logger.warning('%s %s (PK: %s) was successfully updated.' % (
resource.__class__.__name__, resource, resource.pk))
|
11458985
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
os.environ['MALAYA_USE_HUGGINGFACE'] = 'true'
import sys
import malaya
import logging
logging.basicConfig(level=logging.DEBUG)
text = 'Jabatan Penjara Malaysia diperuntukkan RM20 juta laksana program pembangunan Insan kepada banduan. Majikan yang menggaji bekas banduan, bekas penagih dadah diberi potongan cukai tambahan sehingga 2025.'
def test_transformer():
models = malaya.relevancy.available_transformer()
for m in models.index:
print(m)
model = malaya.relevancy.transformer(model=m, gpu_limit=0.3)
print(model.predict_proba([text]))
try:
print(model.predict_words(text, visualization=False))
print(model.vectorize([text]))
except Exception as e:
print(m, e)
os.system('rm -f ~/.cache/huggingface/hub/*')
del model
|
11459012
|
from typing import Callable, Iterable, TypeVar
from expression.collections import seq
from expression.core import Builder, identity
TSource = TypeVar("TSource")
TResult = TypeVar("TResult")
TState = TypeVar("TState")
class SeqBuilder(Builder[Iterable[TSource], TSource]):
def bind(self, xs: Iterable[TSource], fn: Callable[[TSource], Iterable[TResult]]) -> Iterable[TResult]:
return list(seq.collect(fn)(xs))
def return_(self, x: TSource) -> Iterable[TSource]:
return seq.singleton(x)
def return_from(self, xs: Iterable[TSource]) -> Iterable[TSource]:
return xs
def combine(self, xs: Iterable[TSource], ys: Iterable[TSource]) -> Iterable[TSource]:
return list(seq.concat(xs, ys))
def zero(self) -> Iterable[TSource]:
return seq.empty
# seq_builder: SeqBuilder[Any] = SeqBuilder()
seq_effect = identity # For now
__all__ = ["seq"]
|
11459017
|
import cv2
from frame import Frame
class Video:
videoPath = ""
framesDirectory = "/var/src/output/frames"
video
frameCount = 0
cols = 0
rows = 0
frames = {}
def __init__(self, videoPath, slowInport=false):
self.videoPath = videoPath
self.video = cv2.VideoCapture(videoPath)
self.frameCount = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))
self.cols = int(self.video.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
self.rows = int(self.video.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
if(slowInport):
self.loadInFrames()
def buildFramePath(self, key):
return framesDirectory + "/" + str(key) + ".jpg"
def getFrame(self, key):
frame = frames[key]
if(frame == None):
loadInFrame(key):
return frames[key]
def loadInFrame(self, key):
self.video.set(1, key)
image = video.read()[1]
framePath = self.buildFramePath(key)
frames[key] = Frame(z, framePath, image)
video.set(1, 0)
def loadInFrames(self, endFrameKey=self.frameCount-1):
for key in range(endFrameKey):
image = video.read()[1]
framePath = self.buildFramePath(key)
frames[key] = Frame(z, framePath, image)
|
11459104
|
import os
import sys
from pathlib import Path
import os.path as osp
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch import nn
from torchvision.utils import make_grid
from tqdm import tqdm
from advent.model.discriminator import get_fc_discriminator
from advent.utils.func import adjust_learning_rate, adjust_learning_rate_discriminator
from advent.utils.func import loss_calc, bce_loss
from advent.utils.loss import entropy_loss
from advent.utils.func import prob_2_entropy
from advent.utils.viz_segmask import colorize_mask
def train_domain_adaptation(model, source_loader, target_loader, cfg):
if cfg.TRAIN.DA_METHOD == 'DAVSN':
train_DAVSN(model, source_loader, target_loader, cfg)
else:
raise NotImplementedError(f"Not yet supported DA method {cfg.TRAIN.DA_METHOD}")
def train_DAVSN(model, source_loader, target_loader, cfg):
# Create the model and start the training.
input_size_source = cfg.TRAIN.INPUT_SIZE_SOURCE
input_size_target = cfg.TRAIN.INPUT_SIZE_TARGET
device = cfg.GPU_ID
num_classes = cfg.NUM_CLASSES
viz_tensorboard = os.path.exists(cfg.TRAIN.TENSORBOARD_LOGDIR)
if viz_tensorboard:
writer = SummaryWriter(log_dir=cfg.TRAIN.TENSORBOARD_LOGDIR)
# SEGMNETATION NETWORK
model.train()
model.to(device)
cudnn.benchmark = True
cudnn.enabled = True
# DISCRIMINATOR NETWORK
d_sta_aux = get_fc_discriminator(num_classes=num_classes*2)
d_sta_aux.train()
d_sta_aux.to(device)
d_sta_main = get_fc_discriminator(num_classes=num_classes*2)
d_sta_main.train()
d_sta_main.to(device)
d_sa_aux = get_fc_discriminator(num_classes=num_classes*2)
d_sa_aux.train()
d_sa_aux.to(device)
d_sa_main = get_fc_discriminator(num_classes=num_classes*2)
d_sa_main.train()
d_sa_main.to(device)
# OPTIMIZERS
optimizer = optim.SGD(model.optim_parameters(cfg.TRAIN.LEARNING_RATE),
lr=cfg.TRAIN.LEARNING_RATE,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
# discriminators' optimizers
optimizer_d_sta_aux = optim.Adam(d_sta_aux.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
betas=(0.9, 0.99))
optimizer_d_sta_main = optim.Adam(d_sta_main.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
betas=(0.9, 0.99))
optimizer_d_sa_aux = optim.Adam(d_sa_aux.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
betas=(0.9, 0.99))
optimizer_d_sa_main = optim.Adam(d_sa_main.parameters(), lr=cfg.TRAIN.LEARNING_RATE_D,
betas=(0.9, 0.99))
# interpolate output segmaps
interp_source = nn.Upsample(size=(input_size_source[1], input_size_source[0]), mode='bilinear',
align_corners=True)
interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear',
align_corners=True)
# labels for adversarial training
source_label = 0
target_label = 1
source_loader_iter = enumerate(source_loader)
target_loader_iter = enumerate(target_loader)
for i_iter in tqdm(range(cfg.TRAIN.EARLY_STOP + 1)):
# reset optimizers
optimizer.zero_grad()
optimizer_d_sta_aux.zero_grad()
optimizer_d_sta_main.zero_grad()
optimizer_d_sa_aux.zero_grad()
optimizer_d_sa_main.zero_grad()
# adapt LR if needed
adjust_learning_rate(optimizer, i_iter, cfg)
adjust_learning_rate_discriminator(optimizer_d_sta_aux, i_iter, cfg)
adjust_learning_rate_discriminator(optimizer_d_sta_main, i_iter, cfg)
adjust_learning_rate_discriminator(optimizer_d_sa_aux, i_iter, cfg)
adjust_learning_rate_discriminator(optimizer_d_sa_main, i_iter, cfg)
######### Source-domain supervised training
for param in d_sta_aux.parameters():
param.requires_grad = False
for param in d_sta_main.parameters():
param.requires_grad = False
for param in d_sa_aux.parameters():
param.requires_grad = False
for param in d_sa_main.parameters():
param.requires_grad = False
_, source_batch = source_loader_iter.__next__()
src_img_cf, src_label, src_img_kf, _, src_img_name = source_batch
if src_label.dim() == 4:
src_label = src_label.squeeze(-1)
file_name = src_img_name[0].split('/')[-1]
if cfg.SOURCE == 'Viper':
frame = int(file_name.replace('.jpg', '')[-5:])
frame1 = frame - 1
flow_int16_x10_name = file_name.replace('.jpg', str(frame1).zfill(5) + '_int16_x10')
elif cfg.SOURCE == 'SynthiaSeq':
flow_int16_x10_name = file_name.replace('.png', '_int16_x10')
flow_int16_x10 = np.load(os.path.join(cfg.TRAIN.flow_path_src, flow_int16_x10_name + '.npy'))
src_flow = torch.from_numpy(flow_int16_x10 / 10.0).permute(2, 0, 1).unsqueeze(0)
src_pred_aux, src_pred, src_pred_cf_aux, src_pred_cf, src_pred_kf_aux, src_pred_kf = model(src_img_cf.cuda(device), src_img_kf.cuda(device), src_flow, device)
src_pred = interp_source(src_pred)
loss_seg_src_main = loss_calc(src_pred, src_label, device)
if cfg.TRAIN.MULTI_LEVEL:
src_pred_aux = interp_source(src_pred_aux)
loss_seg_src_aux = loss_calc(src_pred_aux, src_label, device)
else:
loss_seg_src_aux = 0
loss = (cfg.TRAIN.LAMBDA_SEG_MAIN * loss_seg_src_main
+ cfg.TRAIN.LAMBDA_SEG_AUX * loss_seg_src_aux)
loss.backward()
######### Usupervised domain adaptation
_, target_batch = target_loader_iter.__next__()
trg_img_cf, _, image_trg_kf, _, name = target_batch
file_name = name[0].split('/')[-1]
frame = int(file_name.replace('_leftImg8bit.png', '')[-6:])
frame1 = frame - 1
flow_int16_x10_name_trg = file_name.replace('leftImg8bit.png', str(frame1).zfill(6) + '_int16_x10')
flow_int16_x10_trg = np.load(os.path.join(cfg.TRAIN.flow_path, flow_int16_x10_name_trg + '.npy'))
trg_flow = torch.from_numpy(flow_int16_x10_trg / 10.0).permute(2, 0, 1).unsqueeze(0)
trg_pred_aux, trg_pred, trg_pred_cf_aux, trg_pred_cf, trg_pred_kf_aux, trg_pred_kf = model(trg_img_cf.cuda(device), image_trg_kf.cuda(device), trg_flow, device)
###### Intra-domain TCR
adversarial_factor_aux = cfg.TRAIN.LAMBDA_ADV_AUX / cfg.TRAIN.LAMBDA_ADV_MAIN # as in Advent
# for current frame (cf)
trg_prob_cf = F.softmax(trg_pred_cf)
trg_prob_cf_aux = F.softmax(trg_pred_cf_aux)
trg_ent_cf = torch.mean(prob_2_entropy(trg_prob_cf), dim=1).detach().cpu()
trg_ent_cf_aux = torch.mean(prob_2_entropy(trg_prob_cf_aux), dim=1).detach().cpu()
# for key frame (kf)
trg_prob_kf = F.softmax(trg_pred_kf).cpu().numpy()
trg_prob_aux_kf = F.softmax(trg_pred_kf_aux).cpu().numpy()
trg_ent_kf = torch.mean(prob_2_entropy(F.softmax(trg_pred_kf)), dim=1).cpu().numpy()
trg_ent_kf_aux = torch.mean(prob_2_entropy(F.softmax(trg_pred_kf_aux)), dim=1).cpu().numpy()
# generate propogated prediction via optical flow
interp_flow2trg = nn.Upsample(size=(trg_prob_cf.shape[-2], trg_prob_cf.shape[-1]), mode='bilinear', align_corners=True)
interp_flow2trg_ratio = trg_prob_cf.shape[-2] / trg_flow.shape[-2]
trg_flow_interp = interp_flow2trg(trg_flow) * interp_flow2trg_ratio
trg_flow_interp = trg_flow_interp.cpu().numpy()
trg_prob_propagated = np.zeros(trg_prob_cf.shape)
trg_prob_propagated_aux = np.zeros(trg_prob_cf_aux.shape)
trg_ent_propagated = np.zeros(trg_ent_cf.shape)
trg_ent_propagated_aux = np.zeros(trg_ent_cf_aux.shape)
for x in range(trg_prob_kf.shape[-1]):
for y in range(trg_prob_kf.shape[-2]):
x_flow = int(round(x - trg_flow_interp[:, 0, y, x][0]))
y_flow = int(round(y - trg_flow_interp[:, 1, y, x][0]))
if x_flow >= 0 and x_flow < trg_prob_kf.shape[-1] and y_flow >= 0 and y_flow < trg_prob_kf.shape[-2]:
trg_prob_propagated[:, :, y_flow, x_flow] = trg_prob_kf[:, :, y, x]
trg_prob_propagated_aux[:, :, y_flow, x_flow] = trg_prob_aux_kf[:, :, y, x]
trg_ent_propagated[:,y_flow,x_flow] = trg_ent_kf[:,y,x]
trg_ent_propagated_aux[:,y_flow,x_flow] = trg_ent_kf_aux[:,y,x]
trg_prob_propagated = torch.from_numpy(trg_prob_propagated)
trg_prob_propagated_aux = torch.from_numpy(trg_prob_propagated_aux)
trg_propagated_positions = torch.sum(trg_prob_propagated,1)
trg_ent_propagated = torch.from_numpy(trg_ent_propagated)
trg_ent_propagated_aux = torch.from_numpy(trg_ent_propagated_aux)
# force unconfident predictions in the current frame to be consistent with confident predictions propagated from the previous frames
loss_itcr_weights = trg_propagated_positions.float()*(trg_ent_propagated.float() < trg_ent_cf).float()
loss_itcr = weighted_l1_loss(trg_prob_cf, trg_prob_propagated.float().cuda(device), loss_itcr_weights.float().cuda(device))
if cfg.TRAIN.MULTI_LEVEL:
loss_itcr_aux_weights = trg_propagated_positions.float() * (trg_ent_propagated_aux.float() < trg_ent_cf_aux).float()
loss_itcr_aux = weighted_l1_loss(trg_prob_cf_aux, trg_prob_propagated_aux.float().cuda(device), loss_itcr_aux_weights.float().cuda(device))
else:
loss_itcr_aux = 0
loss = (cfg.TRAIN.lamda_u * loss_itcr + cfg.TRAIN.lamda_u * loss_itcr_aux)
###### Cross-domain TCR
### adversarial training ot fool the discriminator
# spatial-temporal alignment (sta)
src_sta_pred = torch.cat((src_pred_cf, src_pred_kf), dim=1)
trg_sta_pred = torch.cat((trg_pred_cf, trg_pred_kf), dim=1)
src_sta_pred = interp_source(src_sta_pred)
trg_sta_pred = interp_target(trg_sta_pred)
d_out_sta = d_sta_main(F.softmax(trg_sta_pred))
loss_sta = bce_loss(d_out_sta, source_label)
if cfg.TRAIN.MULTI_LEVEL:
src_sta_pred_aux = torch.cat((src_pred_cf_aux, src_pred_kf_aux), dim=1)
trg_sta_pred_aux = torch.cat((trg_pred_cf_aux, trg_pred_kf_aux), dim=1)
src_sta_pred_aux = interp_source(src_sta_pred_aux)
trg_sta_pred_aux = interp_target(trg_sta_pred_aux)
d_out_sta_aux = d_sta_aux(F.softmax(trg_sta_pred_aux))
loss_sta_aux = bce_loss(d_out_sta_aux, source_label)
else:
loss_sta_aux = 0
loss = loss + (cfg.TRAIN.lamda_u * loss_sta
+ cfg.TRAIN.lamda_u * adversarial_factor_aux * loss_sta_aux)
# spatial alignment (sa)
src_sa_pred = torch.cat((src_pred_cf, src_pred_cf), dim=1)
trg_sa_pred = torch.cat((trg_pred_cf, trg_pred_cf), dim=1)
src_sa_pred = interp_source(src_sa_pred)
trg_sa_pred = interp_target(trg_sa_pred)
d_out_sa = d_sa_main(F.softmax(trg_sa_pred))
loss_sa = bce_loss(d_out_sa, source_label)
if cfg.TRAIN.MULTI_LEVEL:
src_sa_pred_aux = torch.cat((src_pred_cf_aux, src_pred_cf_aux), dim=1)
trg_sa_pred_aux = torch.cat((trg_pred_cf_aux, trg_pred_cf_aux), dim=1)
src_sa_pred_aux = interp_source(src_sa_pred_aux)
trg_sa_pred_aux = interp_target(trg_sa_pred_aux)
d_out_sa_aux = d_sa_aux(F.softmax(trg_sa_pred_aux))
loss_sa_aux = bce_loss(d_out_sa_aux, source_label)
else:
loss_sa_aux = 0
loss = loss + (cfg.TRAIN.lamda_u * cfg.TRAIN.lamda_sa * loss_sa
+ cfg.TRAIN.lamda_u * cfg.TRAIN.lamda_sa * adversarial_factor_aux * loss_sa_aux)
loss.backward()
### Train discriminator networks (Enable training mode on discriminator networks)
for param in d_sta_aux.parameters():
param.requires_grad = True
for param in d_sta_main.parameters():
param.requires_grad = True
for param in d_sa_aux.parameters():
param.requires_grad = True
for param in d_sa_main.parameters():
param.requires_grad = True
## Train with source
# spatial-temporal alignment (sta)
src_sta_pred = src_sta_pred.detach()
d_out_sta = d_sta_main(F.softmax(src_sta_pred))
loss_d_sta = bce_loss(d_out_sta, source_label) / 2
loss_d_sta.backward()
if cfg.TRAIN.MULTI_LEVEL:
src_sta_pred_aux = src_sta_pred_aux.detach()
d_out_sta_aux = d_sta_aux(F.softmax(src_sta_pred_aux))
loss_d_sta_aux = bce_loss(d_out_sta_aux, source_label) / 2
loss_d_sta_aux.backward()
# spatial alignment (sa)
src_sa_pred = src_sa_pred.detach()
d_out_sa = d_sa_main(F.softmax(src_sa_pred))
loss_d_sa = bce_loss(d_out_sa, source_label) / 2
loss_d_sa.backward()
if cfg.TRAIN.MULTI_LEVEL:
src_sa_pred_aux = src_sa_pred_aux.detach()
d_out_sa_aux = d_sa_aux(F.softmax(src_sa_pred_aux))
loss_d_sa_aux = bce_loss(d_out_sa_aux, source_label) / 2
loss_d_sa_aux.backward()
## Train with target
# spatial-temporal alignment (sta)
trg_sta_pred = trg_sta_pred.detach()
d_out_sta = d_sta_main(F.softmax(trg_sta_pred))
loss_d_sta = bce_loss(d_out_sta, target_label) / 2
loss_d_sta.backward()
if cfg.TRAIN.MULTI_LEVEL:
trg_sta_pred_aux = trg_sta_pred_aux.detach()
d_out_sta_aux = d_sta_aux(F.softmax(trg_sta_pred_aux))
loss_d_sta_aux = bce_loss(d_out_sta_aux, target_label) / 2
loss_d_sta_aux.backward()
else:
loss_d_sta_aux = 0
# spatial alignment (sa)
trg_sa_pred = trg_sa_pred.detach()
d_out_sa = d_sa_main(F.softmax(trg_sa_pred))
loss_d_sa = bce_loss(d_out_sa, target_label) / 2
loss_d_sa.backward()
if cfg.TRAIN.MULTI_LEVEL:
trg_sa_pred_aux = trg_sa_pred_aux.detach()
d_out_sa_aux = d_sa_aux(F.softmax(trg_sa_pred_aux))
loss_d_sa_aux = bce_loss(d_out_sa_aux, target_label) / 2
loss_d_sa_aux.backward()
else:
loss_d_sa_aux = 0
# Discriminators' weights discrepancy (wd)
k = 0
loss_wd = 0
for (W1, W2) in zip(d_sta_main.parameters(), d_sa_main.parameters()):
W1 = W1.view(-1)
W2 = W2.view(-1)
loss_wd = loss_wd + (torch.matmul(W1, W2) / (torch.norm(W1) * torch.norm(W2)) + 1)
k += 1
loss_wd = loss_wd / k
if cfg.TRAIN.MULTI_LEVEL:
k = 0
loss_wd_aux = 0
for (W1_aux, W2_aux) in zip(d_sta_aux.parameters(), d_sa_aux.parameters()):
W1_aux = W1_aux.view(-1)
W2_aux = W2_aux.view(-1)
loss_wd_aux = loss_wd_aux + (torch.matmul(W1_aux, W2_aux) / (torch.norm(W1_aux) * torch.norm(W2_aux)) + 1)
k += 1
loss_wd_aux = loss_wd_aux / k
else:
loss_wd_aux = 0
loss = (cfg.TRAIN.lamda_u * cfg.TRAIN.lamda_wd * loss_wd + cfg.TRAIN.lamda_u * cfg.TRAIN.lamda_wd * loss_wd_aux)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
torch.nn.utils.clip_grad_norm_(d_sta_aux.parameters(), 1)
torch.nn.utils.clip_grad_norm_(d_sta_main.parameters(), 1)
torch.nn.utils.clip_grad_norm_(d_sa_aux.parameters(), 1)
torch.nn.utils.clip_grad_norm_(d_sa_main.parameters(), 1)
optimizer.step()
if cfg.TRAIN.MULTI_LEVEL:
optimizer_d_sta_aux.step()
optimizer_d_sa_aux.step()
optimizer_d_sta_main.step()
optimizer_d_sa_main.step()
current_losses = {'loss_src_aux': loss_seg_src_aux,
'loss_src': loss_seg_src_main,
'loss_itcr_aux': loss_itcr_aux,
'loss_itcr': loss_itcr,
'loss_sta_aux': loss_sta_aux,
'loss_sa_aux': loss_sa_aux,
'loss_sta': loss_sta,
'loss_sa': loss_sa,
'loss_d_sta_aux': loss_d_sta_aux,
'loss_d_sa_aux': loss_d_sa_aux,
'loss_d_sta': loss_d_sta,
'loss_d_sa': loss_d_sa,
'loss_wd_aux': loss_wd_aux,
'loss_wd': loss_wd}
print_losses(current_losses, i_iter)
if i_iter % cfg.TRAIN.SAVE_PRED_EVERY == 0 and i_iter != 0:
print('taking snapshot ...')
print('exp =', cfg.TRAIN.SNAPSHOT_DIR)
snapshot_dir = Path(cfg.TRAIN.SNAPSHOT_DIR)
torch.save(model.state_dict(), snapshot_dir / f'model_{i_iter}.pth')
if i_iter >= cfg.TRAIN.EARLY_STOP - 1:
break
sys.stdout.flush()
if viz_tensorboard:
log_losses_tensorboard(writer, current_losses, i_iter)
def weighted_l1_loss(input, target, weights):
loss = weights * torch.abs(input - target)
loss = torch.mean(loss)
return loss
def print_losses(current_losses, i_iter):
list_strings = []
for loss_name, loss_value in current_losses.items():
list_strings.append(f'{loss_name} = {to_numpy(loss_value):.3f} ')
full_string = ' '.join(list_strings)
tqdm.write(f'iter = {i_iter} {full_string}')
def log_losses_tensorboard(writer, current_losses, i_iter):
for loss_name, loss_value in current_losses.items():
writer.add_scalar(f'data/{loss_name}', to_numpy(loss_value), i_iter)
def to_numpy(tensor):
if isinstance(tensor, (int, float)):
return tensor
else:
return tensor.data.cpu().numpy()
|
11459126
|
from __future__ import annotations
from abc import abstractmethod
import logging
from pathlib import Path
from typing import (
Any, cast, Iterator, Mapping, Optional,
TYPE_CHECKING, Union,
)
from PyQt5 import Qt
from .bases import (
AbstractYAMLObjectSingleton, QABC, QAbstractYAMLObjectSingleton,
)
from .better_abc import abstract_attribute
from .types import Frame, Output, Time
class AbstractMainWindow(Qt.QMainWindow, QAbstractYAMLObjectSingleton):
if TYPE_CHECKING:
from vspreview.models import Outputs
from vspreview.widgets import Timeline
__slots__ = ()
@abstractmethod
def load_script(self, script_path: Path, external_args: str = '', reloading = False) -> None:
raise NotImplementedError
@abstractmethod
def reload_script(self) -> None:
raise NotImplementedError
@abstractmethod
def init_outputs(self) -> None:
raise NotImplementedError
@abstractmethod
def switch_output(self, value: Union[int, Output]) -> None:
raise NotImplementedError()
@abstractmethod
def switch_frame(self, pos: Union[Frame, Time], *, render_frame: bool = True) -> None:
raise NotImplementedError()
@abstractmethod
def show_message(self, message: str, timeout: Optional[int] = None) -> None:
raise NotImplementedError
central_widget: Qt.QWidget = abstract_attribute()
clipboard : Qt.QClipboard = abstract_attribute()
current_frame : Frame = abstract_attribute()
current_time : Time = abstract_attribute()
current_output: Output = abstract_attribute()
display_scale : float = abstract_attribute()
graphics_scene: Qt.QGraphicsScene = abstract_attribute()
graphics_view : Qt.QGraphicsView = abstract_attribute()
outputs : Outputs = abstract_attribute()
timeline : Timeline = abstract_attribute()
toolbars : AbstractToolbars = abstract_attribute() # pylint: disable=used-before-assignment
save_on_exit : bool = abstract_attribute()
script_path : Path = abstract_attribute()
statusbar : Qt.QStatusBar = abstract_attribute()
class AbstractToolbar(Qt.QWidget, QABC):
if TYPE_CHECKING:
from vspreview.widgets import Notches
__slots__ = (
'main', 'toggle_button',
)
if TYPE_CHECKING:
notches_changed = Qt.pyqtSignal(AbstractToolbar) # pylint: disable=undefined-variable
else:
notches_changed = Qt.pyqtSignal(object)
def __init__(self, main: AbstractMainWindow, name: str) -> None:
super().__init__(main.central_widget)
self.main = main
self.setFocusPolicy(Qt.Qt.ClickFocus)
self.notches_changed.connect(self.main.timeline.update_notches)
self.toggle_button = Qt.QToolButton(self)
self.toggle_button.setCheckable(True)
self.toggle_button.setText(name)
self.toggle_button.clicked.connect(self.on_toggle)
self.setVisible(False)
def on_toggle(self, new_state: bool) -> None:
# invoking order matters
self.setVisible(new_state)
self.resize_main_window(new_state)
def on_current_frame_changed(self, frame: Frame, time: Time) -> None:
pass
def on_current_output_changed(self, index: int, prev_index: int) -> None:
pass
def on_script_unloaded(self) -> None:
pass
def on_script_loaded(self) -> None:
pass
def get_notches(self) -> Notches:
from vspreview.widgets import Notches
return Notches()
def is_notches_visible(self) -> bool:
return self.isVisible()
def resize_main_window(self, expanding: bool) -> None:
if self.main.windowState() in (Qt.Qt.WindowMaximized,
Qt.Qt.WindowFullScreen):
return
if expanding:
self.main.resize(
self.main.width(),
self.main.height() + self.height() + round(6 * self.main.display_scale))
if not expanding:
self.main.resize(
self.main.width(),
self.main.height() - self.height() - round(6 * self.main.display_scale))
self.main.timeline.full_repaint()
def __getstate__(self) -> Mapping[str, Any]:
return {
'toggle': self.toggle_button.isChecked()
}
def __setstate__(self, state: Mapping[str, Any]) -> None:
try:
toggle = state['toggle']
if not isinstance(toggle, bool):
raise TypeError
except (KeyError, TypeError):
logging.warning(
'Storage loading: Toolbar: failed to parse toggle')
toggle = self.main.TOGGLE_TOOLBAR
if self.toggle_button.isChecked() != toggle:
self.toggle_button.click()
class AbstractToolbars(AbstractYAMLObjectSingleton):
yaml_tag: str = abstract_attribute()
__slots__ = ()
# special toolbar ignored by len()
# and not accessible via subscription and 'in' operator
main : AbstractToolbar = abstract_attribute()
playback : AbstractToolbar = abstract_attribute()
scening : AbstractToolbar = abstract_attribute()
pipette : AbstractToolbar = abstract_attribute()
benchmark: AbstractToolbar = abstract_attribute()
misc : AbstractToolbar = abstract_attribute()
debug : AbstractToolbar = abstract_attribute()
toolbars_names = ('playback', 'scening', 'pipette', 'benchmark', 'misc', 'debug')
# 'main' should be the first
all_toolbars_names = ['main'] + list(toolbars_names)
def __getitem__(self, index: int) -> AbstractToolbar:
if index >= len(self.toolbars_names):
raise IndexError
return cast(AbstractToolbar, getattr(self, self.toolbars_names[index]))
def __len__(self) -> int:
return len(self.toolbars_names)
@abstractmethod
def __getstate__(self) -> Mapping[str, Any]:
raise NotImplementedError
@abstractmethod
def __setstate__(self, state: Mapping[str, Any]) -> None:
raise NotImplementedError
if TYPE_CHECKING:
# https://github.com/python/mypy/issues/2220
def __iter__(self) -> Iterator[AbstractToolbar]: ...
|
11459155
|
class Node:
# Constructor tor create a new node
def __init__(self, key):
self.key = key
self.left = None
self.right = None
def printLevels(root, low, high):
Q = []
marker = Node(11114) # Marker node to indicate end of level
level = 1 # Initialize level number
# Enqueue the only first level node and marker node for
# end of level
Q.append(root)
Q.append(marker)
#print Q
# Simple level order traversal loop
while(len(Q) >0):
# Remove the front item from queue
n = Q[0]
Q.pop(0)
#print Q
# Check if end of level is reached
if n == marker:
# print a new line and increment level number
print
level += 1
# Check if marker node was last node in queue
# or level nubmer is beyond the given upper limit
if len(Q) == 0 or level > high:
break
# Enqueue the marker for end of next level
Q.append(marker)
# If this is marker, then we don't need print it
# and enqueue its children
continue
if level >= low:
print n.key,
# Enqueue children of non-marker node
if n.left is not None:
Q.append(n.left)
Q.append(n.right)
# Driver program to test the above function
root = Node(20)
root.left = Node(8)
root.right = Node(22)
root.left.left = Node(4)
root.left.right = Node(12)
root.left.right.left = Node(10)
root.left.right.right = Node(14)
print "Level Order Traversal between given two levels is",
printLevels(root,2,3)
|
11459187
|
import pytest
from eth_utils.hexadecimal import is_0x_prefixed
@pytest.mark.parametrize(
"value,expected",
(("", False), ("0x", True), ("0x12345", True), ("12345", False), ("0X12345", True)),
)
def test_is_0x_prefixed(value, expected):
assert is_0x_prefixed(value) is expected
@pytest.mark.parametrize("value", (b"", 123, {}, lambda: None))
def test_is_0x_prefixed_rejects_non_text_types(value):
with pytest.raises(TypeError):
is_0x_prefixed(value)
|
11459223
|
from ..typing import SpectrumType
from ..utils import clean_adduct
from ..utils import looks_like_adduct
def derive_adduct_from_name(spectrum_in: SpectrumType,
remove_adduct_from_name: bool = True) -> SpectrumType:
"""Find adduct in compound name and add to metadata (if not present yet).
Method to interpret the given compound name to find the adduct.
Parameters
----------
spectrum_in:
Input spectrum.
remove_adduct_from_name:
Remove found adducts from compound name if set to True. Default is True.
"""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
if spectrum.get("compound_name", None) is not None:
name = spectrum.get("compound_name")
else:
assert spectrum.get("name", None) in [None, ""], ("Found 'name' but not 'compound_name' in metadata",
"Apply 'add_compound_name' filter first.")
return spectrum
# Detect adduct in compound name
adduct_from_name = None
name_split = name.split(" ")
for name_part in name_split[::-1][:2]:
if looks_like_adduct(name_part):
adduct_from_name = name_part
break
if adduct_from_name and remove_adduct_from_name:
name_adduct_removed = " ".join([x for x in name_split if x != adduct_from_name])
spectrum.set("compound_name", name_adduct_removed)
print("Removed adduct {} from compound name.".format(adduct_from_name))
# Add found adduct to metadata (if not present yet)
if adduct_from_name and not looks_like_adduct(spectrum.get("adduct")):
adduct_cleaned = clean_adduct(adduct_from_name)
spectrum.set("adduct", adduct_cleaned)
print(f"Added adduct {adduct_cleaned} to metadata.")
return spectrum
|
11459231
|
def top_level_reducer(owner, child):
return owner
def bot_level_reducer(owner, child):
return child
def addition_reducer(owner, child):
return owner + child
def mult_reducer(owner, child):
return owner * child
|
11459237
|
class BaseDriverUnitTest:
def setup_method(self):
pass
def get_prompt(self):
return self.standard_prompt
@staticmethod
def send_inputs():
return True
@staticmethod
def send_inputs_interact():
return True
def test__determine_current_priv_exec(self):
assert self.driver._determine_current_priv("myrouter>") == self.privs["exec"]
def test__determine_current_priv_privilege_exec(self):
assert self.driver._determine_current_priv("myrouter#") == self.privs["privilege_exec"]
def test__determine_current_priv_config(self):
assert (
self.driver._determine_current_priv("myrouter(config)#") == self.privs["configuration"]
)
def test__determine_current_priv_special_config(self):
assert (
self.driver._determine_current_priv("myrouter(config-if)#")
== self.privs["special_configuration"]
)
|
11459272
|
import pdb
import random
import torch
from transformers import BertTokenizer
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset
class MTSIAdapterDataset(Dataset):
"""
MTSIAdapterDataset is a module implementing the adapter pattern used as intermediary between you program and the dataset when using Bert.
It performs all the operations to adapt your dataset input to the one that Bert expects. It does the tokenization, indices transformation, padding etc.
Another important operation is the __getitem__(idx) that returns the utterances of the dataset corresponding to that dialogue concatenated with
the first utterance of another random dialogue.
Input:
dataset : A class extending Dataset containing you dataset
tokenizer : The tokenizer to use
max_sequence_len : Tha max length of the sequence (for padding)
max_dialogue_len : Tha max length of a dialogue (for padding)
Output:
tok_ids : Indices of each token detected byt the Bert tokenizer
dialogue_turns : User or Agent turns vector
intent : the intent index for that utterance
action : Fetch or Insert
dialogue_id : The ID to identify that dialogue
Note:
Takes care about the feature order returned by the dataset __getitem__()
"""
def __init__(self, dataset, tokenizer, max_sequence_len, max_dialogue_len):
self._dataset = dataset
self._tokenizer = tokenizer
self._max_sequence_len = max_sequence_len
self._max_dialogue_len = max_dialogue_len
def __len__(self):
return self._dataset.__len__()
def __getitem__(self, idx):
utterances, dialogue_turns, intent, action, dialogue_id = self._dataset.__getitem__(idx)
# copy the list to avoid modifications happen also in the internal dataset
utterances = list(utterances)
dialogue_turns = list(dialogue_turns)
# the random dialogue can also be this one
random_dialogue_idx = random.randint(0, self._dataset.__len__()-1)
random_utterances , other_turns, _, _, _ = self._dataset.__getitem__(random_dialogue_idx)
utterances.append(random_utterances[0])
dialogue_turns.append(other_turns[0])
# this vector will contain list of utterances ids
utt_ids = []
for utt in utterances:
tok_utt = self._tokenizer.tokenize(utt)
tok_utt_len = len(tok_utt)
# apply padding if needed
assert tok_utt_len <= self._max_sequence_len
if tok_utt_len < self._max_sequence_len:
tok_utt = self.do_padding(tok_utt, self._max_sequence_len)
tok_idx = self._tokenizer.convert_tokens_to_ids(tok_utt)
utt_ids.append(tok_idx)
# apply dialogue padding both to utterances and turn vector
dialogue_len = len(utt_ids)
if dialogue_len < self._max_dialogue_len:
residual = self._max_dialogue_len - dialogue_len
utt_ids = utt_ids + [[0]*self._max_sequence_len]*residual
dialogue_turns = dialogue_turns + [0]*residual
assert len(utt_ids) == self._max_dialogue_len, '[ASSERT FAILED] -- wrong dialogue len of ' + str(len(utt_ids))
assert len(utt_ids[0]) == self._max_sequence_len, '[ASSERT FAILED] -- wrong sentence len of ' + str(len(utt_ids[0]))
return torch.tensor(utt_ids), torch.tensor(dialogue_turns), intent, action, dialogue_id
def do_padding(self, tok_text, max_len, pad_token = '[PAD]'):
"""
Method for applying padding to the tokenized sentence until reaching max_len
Input:
tok_text : list containing the tokenized text
max_len : the max len to pad
"""
diff = max_len - len(tok_text)
assert diff >= 0
res = tok_text
for count in range(diff):
res.append(pad_token)
return res
|
11459334
|
import os
import re
from xml.etree import ElementTree as ET
import sublime
import sublime_plugin
from . import project
class AndroidXmlComplete(sublime_plugin.EventListener):
def __init__(self):
self.dirty = False
def on_query_completions(self, view, prefix, locations):
if not self.is_responsible(view):
return
if not hasattr(self, "lookup"):
self.load_lookup()
line = view.substr(sublime.Region(view.full_line(locations[0]).begin(), locations[0])).strip()
if line == "<":
keys = [(k, k) for k in list(self.lookup.keys()) if k.lower().startswith(prefix.lower())]
return (keys, sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS)
part = line.rsplit(" ")[-1].strip() # BUG this would flunk on string values with spaces
data = view.substr(sublime.Region(0, locations[0] - len(prefix)))
idx = data.rfind("<")
el = re.search("<([[a-zA-Z0-9\.]*)[ \n\r]", data[idx:]).groups()[0].strip()
if part.lower() == "android:":
keys = []
# TODO cache all this searching during initial load
# match el and el_*
for e in self.match_keys(el):
keys += [(k, "%s=\"$0\"" % k) for k in list(self.lookup[e].keys())]
for parent in self.widgets[el]:
for e in self.match_keys(parent):
keys += [(k, "%s=\"$0\"" % k) for k in list(self.lookup[e].keys())]
#
self.dirty = True # trigger to provide further completions to value
keys.sort()
return (set(keys), sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS)
# set `dirty = False` here after providing initial autocomplete for dirty
self.dirty = False
srch = re.search(":(.*)=", part)
if not srch:
return
groups = srch.groups()
if not groups:
return
attr = groups[0]
# need to iter through all possible keys to find attr def
# TODO cache all this searching during initial load
for e in self.match_keys(el):
if attr in self.lookup[e] and self.lookup[e][attr]:
keys = [(k, k) for k in self.lookup[e][attr]]
return (keys, sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS)
for parent in self.widgets[el]:
for e in self.match_keys(parent):
if attr in self.lookup[e] and self.lookup[e][attr]:
keys = [(k, k) for k in self.lookup[e][attr]]
return (keys, sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS)
# TODO provide completions based on custom attrs defined within project
def match_keys(self, key):
"""Matches a given key to other versions of the same type.
The SDK data files segment items based on certain types of groups. For
example, `ViewGroup` also has an entry for `ViewGroup_MarginLayout`.
We don't want to provide tag completion for `ViewGroup_MarginLayout` as
that's not a valid tag, but we do want to be able to lookup all keys
that are associated with `ViewGroup`.
Returns:
List of strings where each value maps to self.lookup keys.
"""
keys = []
for e in list(self.lookup.keys()):
if e == key or e.startswith(key + "_"):
keys.append(e)
return keys
def on_modified(self, view):
if not self.is_responsible(view):
return
if self.dirty:
# dont reset dirty here as it prevents final autocompletion in a somewhat
# bizarre manner.
view.run_command("auto_complete")
return
sel = view.sel()[0]
if view.substr(sel.a - 1) in ["<", ":"]:
view.run_command("auto_complete")
def is_responsible(self, view):
# TODO better check for if this is an android project
if view.file_name() and view.file_name().endswith(".xml"):
return True
return False
def load_widgets(self, sdk_dir, platform):
self.widgets = {}
lines = open(os.path.join(sdk_dir, "platforms", platform, "data/widgets.txt"), "rt").readlines()
for line in lines:
records = [s.rsplit(".")[-1] for s in line.split(" ")]
self.widgets[records[0]] = records[1:]
def load_lookup(self):
self.lookup = {} # prevents recursive calls (i guess) due to how things currently are
sdk_dir = project.get_sdk_dir()
platform = project.get_target_platform()
self.load_widgets(sdk_dir, platform)
els = ET.parse(os.path.join(sdk_dir, "platforms", platform, "data/res/values/attrs.xml")).getroot()
self.lookup = {}
for el in els:
name = el.attrib.get("name", None)
if name is None:
continue
attrs = {}
for attr in el.getchildren():
attr_name = attr.attrib.pop("name", None)
if attr_name is None:
continue
options = []
for enum in attr.getchildren():
options.append(enum.attrib["name"])
attrs[attr_name] = options
self.lookup[name] = attrs
|
11459377
|
from math import ceil
from typing import List
from shapely.geometry import Point, LineString, Polygon
from plaza_preprocessing.optimizer import utils
from plaza_preprocessing.optimizer.graphprocessor.graphprocessor import GraphProcessor
class SpiderWebGraphProcessor(GraphProcessor):
""" Process a plaza with a spider web graph """
def __init__(self, spacing_m, visibility_delta_m):
self.spacing_m = spacing_m
self.visibility_delta_m = visibility_delta_m
def create_graph_edges(self, plaza_geometry: Polygon, entry_points: List[Point]) -> List[LineString]:
""" create a spiderwebgraph and connect edges to entry points """
if not plaza_geometry:
raise ValueError("Plaza geometry not defined for spiderwebgraph processor")
if not entry_points:
raise ValueError("No entry points defined for spiderwebgraph processor")
graph_edges = self._calc_spiderwebgraph(plaza_geometry)
if not graph_edges: # no graph edges could be constructed
return []
return self._connect_entry_points_with_graph(entry_points, graph_edges)
def optimize_lines(self, plaza_geometry: Polygon, lines: List[LineString], tolerance_m: float) -> List[LineString]:
"""
simplify lines to reduce amount of line points.
Optimizations that fall outside of the plaza will be discarded
"""
tolerance = utils.meters_to_degrees(tolerance_m)
return list(map(lambda line: self._get_simplified_visible_line(plaza_geometry, line, tolerance), lines))
def _get_simplified_visible_line(self, plaza_geometry: Polygon, line: LineString, tolerance):
""" returns the simplified line if it's inside the plaza, the original line otherwise"""
simplified_line = line.simplify(tolerance, preserve_topology=False)
return simplified_line if utils.line_visible(plaza_geometry, simplified_line, self.visibility_delta_m) else line
def _calc_spiderwebgraph(self, plaza_geometry):
""" calculate spider web graph edges"""
spacing = utils.meters_to_degrees(self.spacing_m)
x_left, y_bottom, x_right, y_top = plaza_geometry.bounds
# based on https://github.com/michaelminn/mmqgis
rows = int(ceil((y_top - y_bottom) / spacing))
columns = int(ceil((x_right - x_left) / spacing))
graph_edges = []
for column in range(0, columns + 1):
for row in range(0, rows + 1):
x_1 = x_left + (column * spacing)
x_2 = x_left + ((column + 1) * spacing)
y_1 = y_bottom + (row * spacing)
y_2 = y_bottom + ((row + 1) * spacing)
top_left = (x_1, y_1)
top_right = (x_2, y_1)
bottom_left = (x_1, y_2)
bottom_right = (x_2, y_2)
# horizontal line
if column < columns:
h_line = self._get_spiderweb_intersection_line(plaza_geometry, top_left, top_right)
if h_line:
graph_edges.append(h_line)
# vertical line
if row < rows:
v_line = self._get_spiderweb_intersection_line(plaza_geometry, top_left, bottom_left)
if v_line:
graph_edges.append(v_line)
# diagonal line
if row < rows and column < columns:
d1_line = self._get_spiderweb_intersection_line(plaza_geometry, top_left, bottom_right)
if d1_line:
graph_edges.append(d1_line)
d2_line = self._get_spiderweb_intersection_line(plaza_geometry, bottom_left, top_right)
if d2_line:
graph_edges.append(d2_line)
return graph_edges
def _get_spiderweb_intersection_line(self, plaza_geometry, start, end):
""" returns a line that is completely inside the plaza, if possible """
line = LineString([start, end])
if not utils.line_visible(plaza_geometry, line, self.visibility_delta_m):
return None
return line
def _connect_entry_points_with_graph(self, entry_points, graph_edges):
connection_lines = []
for entry_point in entry_points:
neighbor_line = utils.find_nearest_geometry(entry_point, graph_edges)
target_point = min(
neighbor_line.coords, key=lambda c: Point(c).distance(entry_point))
connection_line = (LineString([(entry_point.x, entry_point.y), target_point]))
connection_lines.append(connection_line)
graph_edges.extend(connection_lines)
return graph_edges
|
11459385
|
import numpy as np
import matplotlib.pyplot as plt
# Generate a distribution
x = 0.5*np.random.randn(1000)+4
# Standard (mean=0, stdev=1) Scaler
y = (x-np.mean(x))/np.std(x)
# Min-Max (0-1) Scaler
z = (x-np.min(x))/(np.max(x)-np.min(x))
# Plot distributions
plt.figure(figsize=(8,4))
plt.hist(x, bins=30, label='original')
plt.hist(y, alpha=0.7, bins=30, label='standard scaler')
plt.hist(z, alpha=0.7, bins=30, label='minmax scaler')
plt.legend()
plt.show()
|
11459417
|
from itertools import groupby
from urlparse import urlparse
from tld import get_tld
from sqlalchemy import (
Column,
Integer,
String,
ForeignKey,
)
from sqlalchemy.orm import relationship
from ..app import db
class Medium(db.Model):
""" A medium from which articles are drawn, such as a newspaper
or radio station.
"""
__tablename__ = 'mediums'
id = Column(Integer, primary_key=True)
name = Column(String(100), index=True, nullable=False, unique=True)
domain = Column(String(100), index=True)
medium_type = Column(String(100), index=True, nullable=False)
medium_group = Column(String(100))
parent_org = Column(String(100))
country_id = Column(Integer, ForeignKey('countries.id'), nullable=False, index=True)
country = relationship("Country", backref='mediums')
def group_name(self):
return self.medium_group or self.name
@classmethod
def is_tld_exception(cls, url):
""" Test if the url falls within one of the exceptions,
this is intended to handle instances where get_tld()
calls fail to recognise urls (eg: .co.tz fails...)
"""
url_exceptions = [
'thecitizen.co.tz',
'dailynews.co.tz',
'nigeriatoday.ng',
'zumi.ng'
'nta.ng',
'nan.ng',
'leadership.ng',
'independent.ng',
'guardian.ng',
'dailytimes.ng',
'theinterview.ng',
'city-press.news24.com',
'nation.africa',
'nation.co.ke'
]
for ex in url_exceptions:
# check if it exists in the url add buffer for [https://www.] characters at start
if ex in url[:len(ex)+12]:
return ex
return None
@classmethod
def for_url(cls, url):
domain = get_tld(url, fail_silently=True)
# fail silently
if domain is None:
domain = cls.is_tld_exception(url)
if domain is None:
return None
parts = urlparse(url)
# iol.co.za/isolezwe
domain = domain + parts.path
# explicitly look for city-press, subdomain does not play nice with current Dexter code
if 'city-press.news24.com' in url:
medium = Medium.query.get(5)
return medium
elif 'nation.africa' in url:
medium = Medium.query.get(319)
return medium
else:
# find the medium with the longest matching domain
for medium in sorted(Medium.query.all(), key=lambda m: len(m.domain or ''), reverse=True):
if medium.domain and domain.startswith(medium.domain):
return medium
return None
@classmethod
def for_select_widget(cls):
from . import Country
mediums = cls.query.join(Country).all()
mediums.sort(key=lambda m: [m.country.name, m.name])
choices = []
for group, items in groupby(mediums, lambda m: m.country.name):
choices.append((group, [[str(m.id), m.name] for m in items]))
return choices
@classmethod
def create_defaults(cls):
from . import Country
text = """
Beeld|print - daily|beeld.com||za
Business Day|print - daily|bdlive.co.za||za
Cape Argus|print - daily|iol.co.za/capeargus||za
Cape Times|print - daily|iol.co.za/capetimes||za
City Press|print - weekly|citypress.co.za||za
Daily Dispatch|print - daily|dispatch.co.za||za
Daily Maverick|daily|dailymaverick.co.za||za
Daily Sun|print - daily|dailysun.mobi||za
Daily Voice|print - daily|iol.co.za/2.1894||za
Die Burger|print - daily|dieburger.com||za
Etv English News|television||etv|za
Etv Sunrise|television||etv|za
Grocott's Mail|print - daily|grocotts.co.za||za
Ilanga|ONLINE|ilanganews.co.za||za
IOL|online|iol.co.za||za
Isolezwe|print - daily|iol.co.za/isolezwe||za
Kaya FM|radio|||za
Lesedi FM|radio|||za
Ligwalagwala FM|radio|||za
Lotus FM|radio|||za
Mail and Guardian|print - weekly|mg.co.za||za
Metro FM|radio|||za
Motsweding FM|radio|||za
Munghana Lonene FM|radio|||za
News24|online|news24.com||za
Phalaphala FM|radio|||za
Post|print - daily|iol.co.za/thepost||za
Power FM|radio|||za
Public Eye|ONLINE|publiceye.co.ls||za
RSG FM|radio|||za
SA Breaking News|ONLINE|sabreakingnews.co.za||za
SABC 1 Elections programs|television||SABC 1|za
SABC 1 Isizulu/Isixhosa News|television||SABC 1|za
SABC 1 Siswati/Ndebele News|television||SABC 1|za
SABC 2 Afrikaans News|television||SABC 2|za
SABC 2 Morning Live|television||SABC 2|za
SABC 2 Sesotho/Setswana News|television||SABC 2|za
SABC 2 Special Elections Programs|television||SABC 2|za
SABC 2 Xitsonga/Tschivenda News|television||SABC 2|za
SABC 3 English News|television||SABC 3|za
SAfm|radio|||za
Saturday Star|print - weekly|iol.co.za/saturday-star||za
Sowetan|print - daily|sowetanlive.co.za||za
Sunday Independent|print - weekly|iol.co.za/sundayindependant||za
Sunday Sun|print - weekly|||za
Sunday Times|print - weekly|timeslive.co.za/sundaytimes||za
Sunday Tribune|ONLINE|iol.co.za/sunday-tribune||za
Sunday World|print - weekly|sundayworld.co.za||za
Talk Radio 702|radio|||za
Citizen|print - daily|citizen.co.za||za
The Daily News|print - daily|iol.co.za/dailynews||za
The Free State Times|PRINT|fstimes.co.za||za
The Herald|print - daily|heraldlive.co.za||za
The Independent on Saturday|print - weekly|iol.co.za/ios||za
The Mercury|print - daily|iol.co.za/mercury||za
The New Age|print - daily|thenewage.co.za||za
The Star|print - daily|iol.co.za/the-star||za
The Witness|print - daily|witness.co.za||za
Thobela FM|radio|||za
Times|print - daily|timeslive.co.za||za
Ukhozi FM|radio|||za
Umhlobo Wenene FM|radio|||za
Unknown|other|||za
Volksbad|print - daily|volksblad.com||za
Weekend Argus|print - weekly|||za
Weekend Dispatch|print - weekly|||za
Weekend Post|print - daily|weekendpost.co.za||za
Namibian|online|namibian.com.na||na
Daily Nation|online|zambiadailynation.com||zm
Lusaka Times|online|lusakatimes.com||zm
Zambian Watchdog|online|zambianwatchdog.com||zm
Zambia Daily Mail|online|daily-mail.co.zm||zm
Post Zambia|online|postzambia.com||zm
Times of Zambia|online|times.co.zm||zm
The Chronicle|online|chronicle.co.zw||zw
NewsDay Zimbabwe|online|newsday.co.zw||zw
The Citizen Tanzania|online|thecitizen.co.tz||tz
Deutsche Welle|online|dw.com||de
BBC|online|bbc.com||gb
Daily Nation (Kenya)|online|nation.co.ke||ke
Standard Digital|online|standardmedia.co.ke||ke
The Star (Kenya)|online|the-star.co.ke||ke
The East African|online|theeastafrican.co.ke||ke
Daily News (Tanzania)|online|dailynews.co.tz||tz
Daily News (Zimbabwe)|online|dailynews.co.zw||tz
"""
mediums = []
for medium in text.strip().split("\n"):
m = Medium()
components = medium.strip().split('|')
m.name, m.medium_type, m.domain, m.medium_group, country = components
if not m.domain:
m.domain = None
if not m.medium_group:
m.medium_group = None
if country:
m.country = Country.query.filter(Country.code == country).one()
mediums.append(m)
return mediums
|
11459461
|
from network import ipaddr
def valid_ip_network(network):
"""Take a v4 or v6 network, e.g. '192.168.3.5/24' or
'192.168.3.5/255.255.255.0' and return whether it is valid.
Args:
network (str): IP address and mask, e.g. '192.168.3.5/24'.
Returns:
True if valid, False otherwise.
"""
try:
ipaddr.IPNetwork(network)
except ValueError:
return False
return True
|
11459478
|
from typing import List
class Solution:
def islandPerimeter(self, grid: List[List[int]]) -> int:
perimeter, rows, columns = 0, len(grid), len(grid[0])
for row in range(rows):
for column in range(columns):
if grid[row][column] == 1:
perimeter += 4 - self.surroundedBy(grid, row, column)
return perimeter
def surroundedBy(self, grid: List[List[int]], row: int, column: int) -> int:
surrounded = 0
if row - 1 >= 0 and grid[row - 1][column] == 1:
surrounded += 1
if column + 1 < len(grid[0]) and grid[row][column + 1] == 1:
surrounded += 1
if row + 1 < len(grid) and grid[row + 1][column] == 1:
surrounded += 1
if column - 1 >= 0 and grid[row][column - 1] == 1:
surrounded += 1
return surrounded
|
11459521
|
from django.views.generic import DetailView
from templated_email.models import SavedEmail
class ShowEmailView(DetailView):
model = SavedEmail
template_name = 'templated_email/saved_email.html'
slug_field = 'uuid'
slug_url_kwarg = 'uuid'
|
11459533
|
import sublime
import sublime_plugin
def plugin_loaded():
global close_sidebar_if_opened
global settings
global settings_base
settings = sublime.load_settings("FocusFileOnSidebar.sublime-settings")
settings_base = sublime.load_settings("Preferences.sublime-settings")
plugin_reload()
settings.add_on_change('reload', plugin_reload)
settings_base.add_on_change('focusfileonsidebar-reload', plugin_reload)
def plugin_reload():
global close_sidebar_if_opened
close_sidebar_if_opened = settings_base.get(
'close_sidebar_if_opened', settings.get('close_sidebar_if_opened'))
def plugin_unloaded():
settings.clear_on_change('reload')
settings_base.clear_on_change('focusfileonsidebar-reload')
def refresh_folders(self):
data = get_project_json(self)
set_project_json(self, {})
set_project_json(self, data)
def get_project_json(self):
return self.window.project_data()
def set_project_json(self, data):
return self.window.set_project_data(data)
def reveal_and_focus_in_sidebar(self):
self.window.run_command("reveal_in_side_bar")
self.window.run_command("focus_side_bar")
class FocusFileOnSidebar(sublime_plugin.WindowCommand):
def run(self):
if not self.window.is_sidebar_visible():
refresh_folders(self)
self.window.set_sidebar_visible(True)
# set_project_data is asynchronous so we need settimeout for subsequent commands
sublime.set_timeout_async(lambda: reveal_and_focus_in_sidebar(self), 250)
else:
if close_sidebar_if_opened:
self.window.set_sidebar_visible(False)
refresh_folders(self)
else:
refresh_folders(self)
sublime.set_timeout_async(lambda: reveal_and_focus_in_sidebar(self), 250)
|
11459540
|
import networkx as nx
import asyncio
import ccxt.async_support as ccxt
import json
from .settings import COLLECTIONS_DIR
__all__ = [
'ExchangeMultiGraphBuilder',
'build_arbitrage_graph_for_exchanges',
'build_multi_graph_for_exchanges',
]
class ExchangeMultiGraphBuilder:
def __init__(self, exchanges: list):
self.exchanges = exchanges
self.graph = nx.MultiGraph()
def build_multi_graph(self, write=False, ccxt_errors=True):
futures = [asyncio.ensure_future(self._add_exchange_to_graph(exchange_name, ccxt_errors)) for
exchange_name in self.exchanges]
asyncio.get_event_loop().run_until_complete(asyncio.gather(*futures))
if write:
with open(COLLECTIONS_DIR + 'graph.json', 'w') as outfile:
json.dump(self.graph, outfile)
return self.graph
async def _add_exchange_to_graph(self, exchange_name: str, ccxt_errors=True):
"""
:param ccxt_errors: if true, raises errors ccxt raises when calling load_markets. The common ones are
RequestTimeout and ExchangeNotAvailable, which are caused by problems with exchanges' APIs.
"""
exchange = getattr(ccxt, exchange_name)()
if ccxt_errors:
await exchange.load_markets()
await exchange.close()
else:
try:
await exchange.load_markets()
await exchange.close()
except ccxt.BaseError:
await exchange.close()
return
for market_name in exchange.symbols:
currencies = market_name.split('/')
try:
self.graph.add_edge(currencies[0], currencies[1], exchange_name=exchange_name, market_name=market_name)
# certain exchanges (lykke, possibly more)
except IndexError as e:
pass
def build_multi_graph_for_exchanges(exchanges: list):
"""
A wrapper function for the usage of the ExchangeMultiGraphBuilder class which returns a dict as specified in the
docstring of __init__ in ExchangeMultiGraphBuilder.
:param exchanges: A list of exchanges (e.g. ['bittrex', 'poloniex', 'bitstamp', 'anxpro']
"""
return ExchangeMultiGraphBuilder(exchanges).build_multi_graph()
def build_arbitrage_graph_for_exchanges(exchanges: list, k_core=2):
"""
This function is currently inefficient as it finds the entire graph for the given exchanges then finds the k-core
for that graph. todo: It would be great if someone could improve the efficiency of it but this is not a priority.
IMPORTANT: For this function to work, the @not_implemented_for('multigraph') decorator above the core_number
function in networkx.algorithms.core.py must be removed or commented out.
Todo: Improve this project so that the above does not have to be done.
:param exchanges: A list of exchanges (e.g. ['bittrex', 'poloniex', 'bitstamp', 'anxpro']
"""
return nx.k_core(build_multi_graph_for_exchanges(exchanges), k_core)
|
11459552
|
import torch
import torch.nn as nn
import numpy as np
from torch.distributions.multivariate_normal import MultivariateNormal
class GaussianDistribution(nn.Module):
"""
Standard Normal Likelihood
"""
def __init__(self, size):
super().__init__()
self.size = size
self.dim = dim = int(np.prod(size))
self.N = MultivariateNormal(torch.zeros(dim, device='cuda'),
torch.eye(dim, device='cuda'))
def forward(self, input, context=None):
return self.log_prob(input, context).sum(-1)
def log_prob(self, input, context=None, sum=True):
return self.N.log_prob(input.view(-1, self.dim))
def sample(self, n_samples, context=None):
x = self.N.sample((n_samples,)).view(n_samples, *self.size)
log_px = self.log_prob(x, context)
return x, log_px
|
11459580
|
from django.conf import settings
from django.utils.translation import gettext_lazy as _
# Example:
SETTING_1 = getattr(settings, "MAGAZINE_SETTING_1", "default value")
MEANING_OF_LIFE = getattr(settings, "MAGAZINE_MEANING_OF_LIFE", 42)
ARTICLE_THEME_CHOICES = getattr(
settings,
"MAGAZINE_ARTICLE_THEME_CHOICES",
[
('futurism', _("Futurism")),
('nostalgia', _("Nostalgia")),
('sustainability', _("Sustainability")),
('wonder', _("Wonder")),
]
)
|
11459594
|
import argparse, random, os, math, functools
from operator import mul
def create_dirs(program):
dirname = "data/"+program
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST: raise
def get_rand_list(bits, l):
return [random.getrandbits(bits) for _ in range(l)]
def gen_mult3_input(n) :
product = 1
bits = int(n/3)
xs = get_rand_list(bits,3)
ys = get_rand_list(bits,3)
fx = open("data/mult3/%d.1.dat"%n, 'w')
fy = open("data/mult3/%d.2.dat"%n, 'w')
for i in range(3):
fx.write("%d\n"%xs[i])
fy.write("%d\n"%ys[i])
fx.close()
fy.close()
result = functools.reduce(mul, [x+y for x,y in zip(xs,ys)], 1)
print("expected value: %d"%result)
print(" l binary: {0:b}".format(result))
def gen_input(program, n, l):
if (n > 32):
print ("invalid bit length---this test can only handle up to 32 bits")
print ("because we read in input using `stoi`")
return
bits = int((n - int(math.log(l, 2))) / 2)
# print bits
lists = [(i,get_rand_list(bits,l)) for i in [1,2]]
for party,data in lists:
with open("data/%s/%d.%s.dat"%(program,n,party),'w') as f:
for x in data:
f.write("%d\n"%x)
print("expected value: %d"%(sum(x*y for x,y in zip(lists[0][1], lists[1][1]))))
def gen_xtabs_input(n, l):
LEN = l
IDMAX = min(2 * LEN, 2**n)
BINS = 5
with open("data/xtabs/%d.bins.dat"%n,'w') as f:
xs = [(random.randint(0,IDMAX), random.randint(0,BINS-1)) for _ in range(LEN)]
for idx, binx in xs:
x = "%d %d\n"%(idx, binx)
f.write(x)
with open("data/xtabs/%d.vals.dat"%n,'w') as f:
ys = [(random.randint(0,IDMAX),
random.getrandbits(int(n/int(math.log(l,2))))) for _ in range(LEN)]
for idy, val in ys:
y = "%d %d\n"%(idy,val)
f.write(y)
binsums = [0] * BINS
for idx, binx in xs:
for idy, val in ys:
if idx == idy:
binsums[binx] += val
print("expected value: ", binsums)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='generates input for emp-toolkit sample programs')
parser.add_argument('-n', default=32, type=int,
help="integer bit length")
parser.add_argument('-l', default=10, type=int,
help="array length (for innerprod, xtabs)")
programs = ["mult3","innerprod","xtabs"]
parser.add_argument('-e', default="xtabs", choices = programs,
help="program selection")
args = parser.parse_args()
create_dirs(args.e)
if args.e == "mult3":
gen_mult3_input(args.n)
elif args.e == "innerprod":
gen_input(args.e, args.n, args.l)
elif args.e == "xtabs":
gen_xtabs_input(args.n, args.l)
|
11459651
|
from __future__ import unicode_literals
from os_urlpattern.parse_utils import (EMPTY_PARSED_PIECE, PieceParser,
analyze_url)
from os_urlpattern.piece_pattern_node import (PiecePatternNode,
build_from_parsed_pieces,
build_from_piece_pattern_nodes)
from os_urlpattern.utils import dump_tree, pick
def test_count():
num = 100
urls = ['http://test.com/abc/%d' % i for i in range(num)]
parser = PieceParser()
root = PiecePatternNode((EMPTY_PARSED_PIECE, None))
for url in urls:
_, pieces = analyze_url(url)
parsed_pieces = [parser.parse(piece) for piece in pieces]
build_from_parsed_pieces(root, parsed_pieces)
assert root.count == num
for url in urls:
_, pieces = analyze_url(url)
parsed_pieces = [parser.parse(piece) for piece in pieces]
build_from_parsed_pieces(root, parsed_pieces)
assert root.count == num
root01 = PiecePatternNode((EMPTY_PARSED_PIECE, None))
for nodes in dump_tree(root):
build_from_piece_pattern_nodes(root01, nodes[1:])
assert root01.count == num
nodes = pick(dump_tree(root))
assert nodes[-1].parrent.children_num == num
assert str(nodes[-1].parrent.pattern) == "abc"
|
11459656
|
from django.forms import ModelForm
from cases.models import Case
from django import forms
from individuals.models import Individual
# Create the form class.
class CaseForm(ModelForm):
class Meta:
model = Case
fields = '__all__'
|
11459668
|
from .__version__ import __version__
from .core.session import Session
from .core.wrapper import BrowserWrapper
from .core.exceptions import LogInError, NotConnectedError
from .core._parser import InfoTypes, ResultTypes
def init_session():
return Session(BrowserWrapper())
|
11459745
|
import re
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Set, Optional
DOCSTRING_REGEX_TOKENIZER = re.compile(r"[^\s,'\"`.():\[\]=*;>{\}+-/\\]+|\\+|\.+|\(\)|{\}|\[\]|\(+|\)+|:+|\[+|\]+|{+|\}+|=+|\*+|;+|>+|\++|-+|/+")
def tokenize_docstring(docstring: str) -> List[str]:
return [t for t in DOCSTRING_REGEX_TOKENIZER.findall(docstring) if t is not None and len(t) > 0]
def tokenize_code(node, blob: str) -> List:
tokens = []
traverse(node, tokens)
return [match_from_span(token, blob) for token in tokens]
def traverse(node, results: List) -> None:
if node.type == 'string':
results.append(node)
return
for n in node.children:
traverse(n, results)
if not node.children:
results.append(node)
def nodes_are_equal(n1, n2):
return n1.type == n2.type and n1.start_point == n2.start_point and n1.end_point == n2.end_point
def previous_sibling(tree, node):
"""
Search for the previous sibling of the node.
TODO: C TreeSitter should support this natively, but not its Python bindings yet. Replace later.
"""
to_visit = [tree.root_node]
while len(to_visit) > 0:
next_node = to_visit.pop()
for i, node_at_i in enumerate(next_node.children):
if nodes_are_equal(node, node_at_i):
if i > 0:
return next_node.children[i-1]
return None
else:
to_visit.extend(next_node.children)
return ValueError("Could not find node in tree.")
def node_parent(tree, node):
to_visit = [tree.root_node]
while len(to_visit) > 0:
next_node = to_visit.pop()
for child in next_node.children:
if nodes_are_equal(child, node):
return next_node
else:
to_visit.extend(next_node.children)
raise ValueError("Could not find node in tree.")
def match_from_span(node, blob: str) -> str:
lines = blob.split('\n')
line_start = node.start_point[0]
line_end = node.end_point[0]
char_start = node.start_point[1]
char_end = node.end_point[1]
if line_start != line_end:
return '\n'.join([lines[line_start][char_start:]] + lines[line_start+1:line_end] + [lines[line_end][:char_end]])
else:
return lines[line_start][char_start:char_end]
def traverse_type(node, results: List, kind: str) -> None:
if node.type == kind:
results.append(node)
if not node.children:
return
for n in node.children:
traverse_type(n, results, kind)
class LanguageParser(ABC):
FILTER_PATHS = ()
@staticmethod
@abstractmethod
def get_definitions(tree, blob: str) -> List[Dict[str, Any]]:
pass
@staticmethod
@abstractmethod
def get_class_metadata(class_node, blob):
pass
@staticmethod
@abstractmethod
def get_function_metadata(function_node, blob) -> Dict[str, str]:
pass
|
11459788
|
from copy import deepcopy
from typing import Dict
import pytest
import torch
from ludwig.features.category_feature import CategoryInputFeature
from ludwig.models.ecd import build_single_input
BATCH_SIZE = 2
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
@pytest.fixture(scope="module")
def category_config():
return {
"name": "category_column_name",
"type": "category",
"tied": None,
"embedding_size": 256,
"embeddings_on_cpu": False,
"pretrained_embeddings": None,
"embeddings_trainable": True,
"dropout": 0.0,
"vocab": ["a", "b", "c"],
"embedding_initializer": None,
}
@pytest.mark.parametrize("encoder", ["dense", "sparse"])
def test_category_input_feature(
category_config: Dict,
encoder: str,
) -> None:
# setup image input feature definition
category_def = deepcopy(category_config)
category_def["encoder"] = encoder
# pickup any other missing parameters
CategoryInputFeature.populate_defaults(category_def)
# ensure no exceptions raised during build
input_feature_obj = build_single_input(category_def, None)
# check one forward pass through input feature
input_tensor = torch.randint(0, 3, size=(BATCH_SIZE,), dtype=torch.int32).to(DEVICE)
encoder_output = input_feature_obj(input_tensor)
assert encoder_output["encoder_output"].shape == (BATCH_SIZE, *input_feature_obj.output_shape)
|
11459807
|
import hashlib
import os
from typing import List, Tuple
from xml.etree import cElementTree as ETree
import yaml
from flask import current_app
from sqlalchemy import text
from yaml.loader import FullLoader
from actor_libs.database.orm import db
from actor_libs.utils import get_cwd, get_services_path
from app.models import (
DictCode, SystemInfo, User, Resource, Service,
Lwm2mObject, Lwm2mItem
)
__all__ = [
'convert_timescaledb', 'create_triggers', 'init_services',
'init_resources', 'init_admin_account', 'init_dict_code',
'init_system_info', 'init_lwm2m_info'
]
def convert_timescaledb():
""" timescaledb process """
timescaledb_init = """
CREATE EXTENSION IF NOT EXISTS timescaledb CASCADE;
"""
emqx_bills = """
SELECT create_hypertable('emqx_bills', 'msgTime');
"""
emqx_bills_hour = """
SELECT create_hypertable('emqx_bills_hour', 'countTime');
"""
device_events = """
SELECT create_hypertable('device_events', 'msgTime');
"""
device_events_hour = """
SELECT create_hypertable('device_events_hour', 'countTime');
"""
publish_logs = """
SELECT create_hypertable('publish_logs', 'msgTime');
"""
client_connect_logs = """
SELECT create_hypertable('connect_logs', 'msgTime');
"""
with db.engine.begin() as connection:
connection.execute(timescaledb_init)
connection.execute(emqx_bills)
connection.execute(emqx_bills_hour)
connection.execute(device_events)
connection.execute(device_events_hour)
connection.execute(publish_logs)
connection.execute(client_connect_logs)
def create_triggers():
create_latest_events_fn = """
CREATE OR REPLACE FUNCTION create_latest_events_fn()
RETURNS TRIGGER
LANGUAGE PLPGSQL AS
$BODY$
BEGIN
INSERT INTO device_events_latest
VALUES (NEW."msgTime", NEW."tenantID", NEW."deviceID", NEW."dataType",
NEW."streamID",NEW.topic, NEW.data, NEW."responseResult")
ON CONFLICT ("tenantID","deviceID")
DO UPDATE SET "msgTime"=NEW."msgTime",
"dataType"=NEW."dataType",
"streamID"=NEW."streamID",
topic=NEW.topic,
data=NEW.data,
"responseResult"=NEW."responseResult";
RETURN NEW;
END
$BODY$;
"""
create_latest_events_trigger = """
CREATE TRIGGER create_latest_events_trigger
BEFORE INSERT OR UPDATE
ON device_events
FOR EACH ROW
EXECUTE PROCEDURE create_latest_events_fn();
"""
with db.engine.begin() as connection:
connection.execute(create_latest_events_fn)
connection.execute(create_latest_events_trigger)
def init_services() -> None:
""" services table init """
project_backend = get_cwd()
service_path = os.path.join(project_backend, 'config/base/services.yml')
if not os.path.isfile(service_path):
raise RuntimeError(f"The file {service_path} does not exist.")
with open(service_path, 'r', encoding='utf-8') as load_file:
service_data = yaml.load(load_file, Loader=FullLoader)
query_service_dict = dict(
db.session.query(Service.code, Service).all()
)
for code, service_values in service_data.items():
if query_service_dict.get(code):
query_service = query_service_dict.get(code)
for key, value in service_values.items():
if hasattr(query_service, key):
setattr(query_service, key, value)
else:
service = Service()
for key, value in service_values.items():
if hasattr(service, key):
setattr(service, key, value)
db.session.add(service)
db.session.commit()
info = "services table init successfully!"
print(info)
def init_resources() -> None:
"""Initialize resources table """
level1, level2, level3, level4 = [], [], [], []
services_path = get_services_path().values()
for service_path in services_path:
resource_path = os.path.join(service_path, 'resources.yml')
if not os.path.isfile(resource_path):
continue
else:
with open(resource_path, 'r', encoding='utf-8') as load_file:
resource_data = yaml.load(load_file, Loader=FullLoader)
if not resource_data:
continue
for _, value in resource_data.items():
if value.get('level') == 1:
level1.append(value)
elif value.get('level') == 2:
level2.append(value)
elif value.get('level') == 3:
level3.append(value)
else:
level4.append(value)
all_levels_resources = [level1, level2, level3, level4]
for level_resources in all_levels_resources:
_insert_resources(level_resources=level_resources)
info = "resources successfully!"
print(info)
def init_admin_account() -> None:
""" Initialize admin user """
email = current_app.config.get('ADMIN_EMAIL', '<EMAIL>')
password = current_app.config.get('ADMIN_PASSWORD', '<PASSWORD>').encode('utf-8')
admin_user = User(
username='admin',
email=email, roleIntID=1,
password=<PASSWORD>()
)
db.session.add(admin_user)
db.session.commit()
info = "admin user init successfully!"
print(info)
def init_dict_code() -> None:
""" Initialize dict code table """
project_backend = get_cwd()
dict_code_path = os.path.join(project_backend, 'config/base/dict_code.yml')
if not os.path.isfile(dict_code_path):
raise RuntimeError(f"The file {dict_code_path} does not exist.")
with open(dict_code_path, 'r', encoding='utf-8') as load_file:
dict_code_yml = yaml.load(load_file, Loader=FullLoader)
# truncate dict_code table
truncate_sql = 'TRUNCATE TABLE dict_code RESTART IDENTITY;'
db.engine.execute(
text(truncate_sql).execution_options(autocommit=True)
)
for _, dict_code_values in dict_code_yml.items():
for dict_code_value in dict_code_values:
dict_code = DictCode()
for key, value in dict_code_value.items():
if hasattr(dict_code, key):
setattr(dict_code, key, value)
db.session.add(dict_code)
db.session.commit()
info = "dict_code table init successfully!"
print(info)
def init_system_info() -> None:
""" Initialize system info table """
system_info_key = (
'mqttBroker', 'mqttsBroker', 'mqttssBroker', 'coapBroker',
'coapsBroker', 'coapssBroker', 'wsBroker', 'wssBroker',
'projectVersion'
)
query = db.session.query(SystemInfo.key).all()
is_exist_keys = [key[0] for key in query]
for key in system_info_key:
if key in is_exist_keys:
continue
new_system_info = SystemInfo(key=key)
db.session.add(new_system_info)
db.session.commit()
info = "system info table init successfully!"
print(info)
def init_lwm2m_info() -> None:
""" Initialize lwm2m_object and lwm2m_item table """
project_backend = current_app.config['PROJECT_PATH']
lwm2m_xml_dir_path = os.path.join(project_backend, 'config/base/lwm2m_obj')
if not os.path.isdir(lwm2m_xml_dir_path):
raise RuntimeError(f"no such file or directory: lwm2m_xml_dir_path")
query_lwm2m_object = db.session.query(Lwm2mObject.objectID).all()
query_lwm2m_object_list = [i[0] for i in query_lwm2m_object]
query_lwm2m_items = db.session.query(Lwm2mItem.itemID, Lwm2mObject.objectID).all()
object_list, item_list = _parse_lwm2m_file(xml_path=lwm2m_xml_dir_path)
for lwm2m_object in object_list:
if int(lwm2m_object.get('ObjectID')) in query_lwm2m_object_list:
continue
insert_lwm2m_object = Lwm2mObject(
objectID=lwm2m_object.get('ObjectID'),
objectName=lwm2m_object.get('Name'),
description=lwm2m_object.get('Description1'),
objectURN=lwm2m_object.get('ObjectURN'),
mandatory=lwm2m_object.get('Mandatory'),
objectVersion=lwm2m_object.get('ObjectVersion'),
multipleInstance=lwm2m_object.get('MultipleInstances'))
db.session.add(insert_lwm2m_object)
db.session.flush()
for lwm2m_item in item_list:
# is exist jump
item_id_tuple = (
int(lwm2m_item.get('ID')), int(lwm2m_item.get('ObjectID'))
)
if item_id_tuple in query_lwm2m_items:
continue
insert_lwm2m_item = Lwm2mItem(
objectID=lwm2m_item.get('ObjectID'),
itemID=lwm2m_item.get('ID'),
objectItem=f'/{lwm2m_item.get("ObjectID")}/{lwm2m_item.get("ID")}',
itemName=lwm2m_item.get('Name'),
description=lwm2m_item.get('Description'),
itemType=lwm2m_item.get('Type'),
itemOperations=lwm2m_item.get('Operations'),
itemUnit=lwm2m_item.get('Units'),
mandatory=lwm2m_item.get('Mandatory'),
rangeEnumeration=lwm2m_item.get('RangeEnumeration'),
multipleInstance=lwm2m_item.get('MultipleInstances'))
db.session.add(insert_lwm2m_item)
db.session.commit()
info = "lwm2m_object lwm2m_item table init successfully!"
print(info)
def _insert_resources(level_resources: List = None) -> None:
""" insert resources to database """
insert_resources_code = [
level_resource.get('code') for level_resource in level_resources
]
query_resources = db.session.query(Resource.code, Resource).all()
query_resources_dict = dict(query_resources)
query_resources_code = query_resources_dict.keys()
insert_code = set(insert_resources_code) ^ set(query_resources_code)
update_code = set(insert_resources_code) & set(query_resources_code)
for level_resource in level_resources:
level_code = level_resource.get('code')
if level_code in insert_code:
resource = Resource()
for key, value in level_resource.items():
if hasattr(resource, key):
setattr(resource, key, value)
db.session.add(resource)
elif level_code in update_code and query_resources_dict.get(level_code):
query_resource = query_resources_dict.get(level_code)
for key, value in level_resource.items():
if hasattr(query_resource, key):
setattr(query_resource, key, value)
else:
raise RuntimeError(f'Please check {level_resource}')
db.session.commit()
def _parse_lwm2m_file(xml_path: str) -> Tuple[list, list]:
"""
read lwm2m object and resource item in lwm2m xml file
:param xml_path: lwm2m object xml file path
"""
file_names = [
f"{xml_path}/{file_name}" for file_name in os.listdir(xml_path)
if file_name.endswith('.xml')
]
object_list = []
item_list = []
object_append = object_list.append
item_extend = item_list.extend
for xml_file in file_names:
object_dict, items = _lw2m_xml_to_dict(xml_file)
object_append(object_dict)
item_extend(items)
return object_list, item_list
def _lw2m_xml_to_dict(xml_file: str) -> Tuple[dict, list]:
"""
Converting lwm2m xml object to dict
:param xml_file: lwm2m object xml file
:return: object dict, resource dict list
"""
root = ETree.ElementTree(file=xml_file)
lwm2m_object = root.find('Object')
object_id = lwm2m_object.findtext('ObjectID')
object_dict = {}
for child in lwm2m_object:
object_dict[child.tag] = child.text
resources = lwm2m_object.iterfind('Resources/Item')
items = []
items_append = items.append
for resource in resources:
item_id = resource.attrib.get('ID')
item_dict = {'ID': item_id, 'ObjectID': object_id}
for item in resource:
item_dict[item.tag] = item.text
items_append(item_dict)
return object_dict, items
|
11459839
|
from collections import defaultdict
import os
import random
from moviepy.editor import *
import numpy as np
from PIL import Image
from heatmappy import Heatmapper
class VideoHeatmapper:
def __init__(self, img_heatmapper):
self.img_heatmapper = img_heatmapper
def heatmap_on_video(self, base_video, points,
heat_fps=20,
keep_heat=False,
heat_decay_s=None):
width, height = base_video.size
frame_points = self._frame_points(
points,
fps=heat_fps,
keep_heat=keep_heat,
heat_decay_s=heat_decay_s
)
heatmap_frames = self._heatmap_frames(width, height, frame_points)
heatmap_clips = self._heatmap_clips(heatmap_frames, heat_fps)
return CompositeVideoClip([base_video] + list(heatmap_clips))
def heatmap_on_video_path(self, video_path, points, heat_fps=20):
base = VideoFileClip(video_path)
return self.heatmap_on_video(base, points, heat_fps)
def heatmap_on_image(self, base_img, points,
heat_fps=20,
duration_s=None,
keep_heat=False,
heat_decay_s=None):
base_img = np.array(base_img)
points = list(points)
if not duration_s:
duration_s = max(t for x, y, t in points) / 1000
base_video = ImageClip(base_img).set_duration(duration_s)
return self.heatmap_on_video(
base_video, points,
heat_fps=heat_fps,
keep_heat=keep_heat,
heat_decay_s=heat_decay_s
)
def heatmap_on_image_path(self, base_img_path, points,
heat_fps=20,
duration_s=None,
keep_heat=False,
heat_decay_s=None):
base_img = Image.open(base_img_path)
return self.heatmap_on_image(
base_img, points,
heat_fps=heat_fps,
duration_s=duration_s,
keep_heat=keep_heat,
heat_decay_s=heat_decay_s
)
@staticmethod
def _frame_points(pts, fps, keep_heat=False, heat_decay_s=None):
interval = 1000 // fps
frames = defaultdict(list)
if not keep_heat:
for x, y, t in pts:
start = (t // interval) * interval
frames[start].append((x, y))
return frames
pts = list(pts)
last_interval = max(t for x, y, t in pts)
for x, y, t in pts:
start = (t // interval) * interval
pt_last_interval = int(start + heat_decay_s*1000) if heat_decay_s else last_interval
for frame_time in range(start, pt_last_interval+1, interval):
frames[frame_time].append((x, y))
return frames
def _heatmap_frames(self, width, height, frame_points):
for frame_start, points in frame_points.items():
heatmap = self.img_heatmapper.heatmap(width, height, points)
yield frame_start, np.array(heatmap)
@staticmethod
def _heatmap_clips(heatmap_frames, fps):
interval = 1000 // fps
for frame_start, heat in heatmap_frames:
yield (ImageClip(heat)
.set_start(frame_start/1000)
.set_duration(interval/1000))
def _example_random_points():
def rand_point(max_x, max_y, max_t):
return random.randint(0, max_x), random.randint(0, max_y), random.randint(0, max_t)
return (rand_point(720, 480, 40000) for _ in range(500))
def main():
example_base_img = os.path.join('assets', 'cat.jpg')
img_heatmapper = Heatmapper(colours='default', point_strength=0.6)
video_heatmapper = VideoHeatmapper(img_heatmapper)
heatmap_video = video_heatmapper.heatmap_on_image_path(
base_img_path=example_base_img,
points=_example_random_points(),
duration_s=40,
keep_heat=True
)
heatmap_video.write_videofile('out_on_image.mp4', bitrate="5000k", fps=24)
if __name__ == '__main__':
main()
|
11459883
|
import subprocess
import glob
import os
import tempfile
import lxml.etree
ICON_MAPPING = {
'cal_homework.gif': 'grading',
'check.gif': 'done',
'delete2.gif': 'delete',
'discuss.gif': 'forum',
'doc2.gif': 'description',
'edit2.gif': 'edit',
'find.png': 'search',
'hot.gif': 'whatshot',
'info.gif': 'info',
'item2.gif': 'attachment',
'link_doc.gif': 'description',
'lock.gif': 'lock',
'mail.png': 'email',
'phone.gif': 'phone',
'printer.gif': 'print',
'rss.gif': 'rss_feed',
'wait.gif': 'hourglass_top',
'web.gif': 'home',
'webhd.gif': 'cloud',
'zoom.jpg': 'zoom_in',
}
OUTDIR = 'freeze-app/sys/res/icon'
os.makedirs(OUTDIR, exist_ok=True)
for asset, icon in ICON_MAPPING.items():
dst = os.path.join(OUTDIR, asset)
print(dst)
(src,) = glob.glob(f'material-design-icons/src/*/{icon}/materialicons/24px.svg')
svg = lxml.etree.parse(src)
for g in svg.xpath('//*[local-name()="path"]'):
if g.attrib.get('fill') == 'none':
continue
g.attrib['style'] = 'fill: #4a4a4a'
with tempfile.TemporaryDirectory() as d:
png = subprocess.run(
[
'inkscape',
'--pipe',
f'--export-filename={d}/out.png',
'--export-type=png',
'--export-width=16',
'--export-height=16',
],
input=lxml.etree.tostring(svg),
check=True,
)
subprocess.run(
['convert', f'{d}/out.png', dst],
check=True,
)
|
11459905
|
import FWCore.ParameterSet.Config as cms
#
# module to fill the full-hadronic ttbar event structure
#
ttFullHadEvent = cms.EDProducer("TtFullHadEvtBuilder",
## choose leptonic decay modes
decayChannel1 = cms.int32(0), # 0: none
# 1: electron
# 2: muon
# 3: tau
decayChannel2 = cms.int32(0), # 0: none
# 1: electron
# 2: muon
# 3: tau
## set verbosity level
verbosity = cms.int32(0), # 0: no additional printout
# 1: print a summary for each event
## add genEvt (if available)
genEvent = cms.InputTag("genEvt"),
## labels for event hypotheses
## (this vector of strings can be modified using the functions
## addTtFullHadHypotheses and removeTtFullHadHypGenMatch in
## TopQuarkAnalysis.TopEventProducers.sequences.ttFullHadEvtBuilder_cff)
hypotheses = cms.VInputTag("ttFullHadHypGenMatch"), # "ttFullHadHypKinFit"
## add extra information on kinFit
kinFit = cms.PSet(
chi2 = cms.InputTag("kinFitTtFullHadEventHypothesis","Chi2"),
prob = cms.InputTag("kinFitTtFullHadEventHypothesis","Prob"),
),
## add extra information on genMatch
genMatch = cms.PSet(
sumPt = cms.InputTag("ttFullHadJetPartonMatch","SumPt"),
sumDR = cms.InputTag("ttFullHadJetPartonMatch","SumDR"),
)
)
|
11459966
|
from io import BytesIO
from PIL import Image, ImageEnhance
from colors import color
def render(image_data, width=120, height_scale=0.55, colorize=True):
with BytesIO(image_data) as fp:
img = Image.open(fp)
org_width, orig_height = img.size
aspect_ratio = orig_height / org_width
new_height = aspect_ratio * width * height_scale
img = img.resize((width, int(new_height)))
img = img.convert('RGBA')
img = ImageEnhance.Sharpness(img).enhance(2.0)
pixels = img.getdata()
def mapto(r, g, b, alpha):
if alpha == 0.:
return ' '
chars = ["B", "S", "#", "&", "@", "$", "%", "*", "!", ".", " "]
pixel = (r * 19595 + g * 38470 + b * 7471 + 0x8000) >> 16
if colorize:
return color(chars[pixel // 25], (r, g, b))
else:
return chars[pixel // 25]
new_pixels = [mapto(r, g, b, alpha) for r, g, b, alpha in pixels]
new_pixels_count = len(new_pixels)
ascii_image = [''.join(new_pixels[index:index + width]) for index in range(0, new_pixels_count, width)]
ascii_image = "\n".join(ascii_image)
return ascii_image
|
11459977
|
dict = {}
with open("irregular_verbs.txt","r") as f:
lines = f.readlines()
for line in lines:
tenses = line.split("\t")
dict[tenses[0]]=tenses
def is_vowel(c):
return c=='a' or c=='e' or c=='i' or c=='o' or c=='u'
def is_conso(c):
return (not is_vowel(c)) and (not c=='y') and (not c=='h') and (not c=='r') and (not c == 'l') and (not c == 'w')
def regular_past(word):
if word[-1] == 'e':
return word + 'd'
elif word[-1] == 'y' and is_conso(word[-2]):
return word[:-1]+"ied"
elif len(word)>2 and is_conso(word[-1]) and is_vowel(word[-2]) and not is_vowel(word[-3]):
return word + word[-1] + 'ed'
else:
return word+'ed'
def find_past_participle(word):
if word.lower() == '\'s':
return 'had'
if word in dict:
z = dict[word][2]
if z[-1] == '\n':
return z[:-1]
else:
return z
else:
return regular_past(word)
def find_past(word):
if word=='\'m' or word=='am':
return 'was'
if word == '\'s':
return 'had'
if word == 'is':
return 'was'
elif word == 'are':
return 'were'
elif word in dict:
return dict[word][1]
else:
return regular_past(word)
def pluralverb(word):
if word == 'have':
return 'has'
if word == 'do':
return 'does'
if word == 'is' or word == 'are':
return 'is'
if word[-1] == 'y' and not is_vowel(word[-2]):
return word[:-1]+'ies'
elif word[-1] == 's' or word[-1] == 'z' or word[-1] == 'x':
return word + "es"
else:
return word + "s"
|
11459989
|
from tensorflow.keras import layers, Model
from tensorflow.keras.applications import ResNet50, ResNet101, ResNet152
from tensorflow.keras.regularizers import l2
from utils import add_regularization, get_flops
BACKBONES = {
'resnet50': ResNet50,
'resnet101': ResNet101,
'resnet152': ResNet152
}
def SimpleBaseline(cfg):
regularizer = l2(cfg.TRAIN.WD)
if cfg.MODEL.LOAD_WEIGHTS:
weights = 'imagenet'
else:
weights = None
backbone = BACKBONES[cfg.MODEL.BACKBONE](
weights=weights,
include_top=False,
input_shape=cfg.DATASET.INPUT_SHAPE)
backbone = add_regularization(backbone, regularizer)
x = backbone.output
for i in range(3):
x = layers.Conv2DTranspose(
cfg.MODEL.HEAD_CHANNELS,
cfg.MODEL.HEAD_KERNEL,
strides=2,
padding='same',
use_bias=False,
kernel_regularizer=regularizer,
name='head_conv{}'.format(i + 1))(x)
x = layers.BatchNormalization(name='head_bn{}'.format(i + 1))(x)
x = layers.Activation(cfg.MODEL.HEAD_ACTIVATION, name='head_act{}'.format(i + 1))(x)
x = layers.Conv2D(
cfg.DATASET.OUTPUT_SHAPE[-1],
1,
padding='same',
use_bias=True,
kernel_regularizer=regularizer,
name='final_conv')(x)
return Model(backbone.input, x, name='sb_{}'.format(cfg.MODEL.BACKBONE))
if __name__ == '__main__':
from dataset.coco import cn as cfg
cfg.merge_from_file('../configs/sb_resnet50_256x192.yaml')
cfg.DATASET.INPUT_SHAPE = [384, 288, 3]
cfg.MODEL.BACKBONE = 'resnet152'
model = SimpleBaseline(cfg)
model.summary()
print('{:.2f}M parameters | {:.2f}G multiply-adds'
.format(model.count_params() / 1e6, get_flops(model) / 1e9 / 2))
|
11460000
|
from utils import *
def do_inference(model, path):
img = open_img(path)
return get_pred(model, img)
if __name__ == '__main__':
model = prepare_model()
print('Finished loading model------------------------------')
print('-----------------------------------------------------')
do_inference(model, path='data/glass51.jpg')
|
11460026
|
import uuid
from typing import List
from azure.cosmos import CosmosClient
from pydantic import parse_obj_as
from db.repositories.resources import ResourceRepository
from models.domain.workspace_service import WorkspaceService
from models.schemas.workspace_service import WorkspaceServiceInCreate, WorkspaceServicePatchEnabled
from resources import strings
from db.errors import ResourceIsNotDeployed, EntityDoesNotExist
from models.domain.resource import Deployment, Status, ResourceType
class WorkspaceServiceRepository(ResourceRepository):
def __init__(self, client: CosmosClient):
super().__init__(client)
@staticmethod
def active_workspace_services_query(workspace_id: str):
return f'SELECT * FROM c WHERE c.resourceType = "{ResourceType.WorkspaceService}" AND c.deployment.status != "{Status.Deleted}" AND c.workspaceId = "{workspace_id}"'
def get_active_workspace_services_for_workspace(self, workspace_id: str) -> List[WorkspaceService]:
"""
returns list of "non-deleted" workspace services linked to this workspace
"""
query = WorkspaceServiceRepository.active_workspace_services_query(workspace_id)
workspace_services = self.query(query=query)
return parse_obj_as(List[WorkspaceService], workspace_services)
def get_deployed_workspace_service_by_id(self, workspace_id: str, service_id: str) -> WorkspaceService:
workspace_service = self.get_workspace_service_by_id(workspace_id, service_id)
if workspace_service.deployment.status != Status.Deployed:
raise ResourceIsNotDeployed
return workspace_service
def get_workspace_service_by_id(self, workspace_id: str, service_id: str) -> WorkspaceService:
query = self.active_workspace_services_query(workspace_id) + f' AND c.id = "{service_id}"'
workspace_services = self.query(query=query)
if not workspace_services:
raise EntityDoesNotExist
return parse_obj_as(WorkspaceService, workspace_services[0])
def get_workspace_service_spec_params(self):
return self.get_resource_base_spec_params()
def create_workspace_service_item(self, workspace_service_input: WorkspaceServiceInCreate, workspace_id: str) -> WorkspaceService:
full_workspace_service_id = str(uuid.uuid4())
template_version = self.validate_input_against_template(workspace_service_input.templateName, workspace_service_input, ResourceType.WorkspaceService)
# we don't want something in the input to overwrite the system parameters, so dict.update can't work.
resource_spec_parameters = {**workspace_service_input.properties, **self.get_workspace_service_spec_params()}
workspace_service = WorkspaceService(
id=full_workspace_service_id,
workspaceId=workspace_id,
templateName=workspace_service_input.templateName,
templateVersion=template_version,
properties=resource_spec_parameters,
deployment=Deployment(status=Status.NotDeployed, message=strings.RESOURCE_STATUS_NOT_DEPLOYED_MESSAGE)
)
return workspace_service
def patch_workspace_service(self, workspace_service: WorkspaceService, workspace_service_patch: WorkspaceServicePatchEnabled):
workspace_service.properties["enabled"] = workspace_service_patch.enabled
self.update_item(workspace_service)
|
11460036
|
from .abc import *
from .neighbor import *
from .routing import *
from .mac import *
from .ipaddress import *
from .password_type import *
from .route_target import *
from .route_distinguisher import *
from .redistribution_attr import *
|
11460066
|
import gym
import math
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
env = gym.make('MountainCar-v0')
Q_table = np.zeros((20,20,3))
alpha=0.3
buckets=[20, 20]
gamma=0.99
rewards=[]
episodes = 3000
def to_discrete_states(observation):
interval=[0 for i in range(len(observation))]
max_range=[1.2,0.07]
for i in range(len(observation)):
data = observation[i]
inter = int(math.floor((data + max_range[i])/(2*max_range[i]/buckets[i])))
if inter>=buckets[i]:
interval[i]=buckets[i]-1
elif inter<0:
interval[i]=0
else:
interval[i]=inter
return interval
def expect_epsilon(t):
return min(0.015, 1.0 - math.log10((t+1)/220.))
def expect_alpha(t):
return min(0.1, 1.0 - math.log10((t+1)/125.))
def get_action(observation,t):
if np.random.random()<max(0.001, expect_epsilon(t)):
return env.action_space.sample()
interval = to_discrete_states(observation)
return np.argmax(np.array(Q_table[tuple(interval)]))
def update_SARSA(observation,reward,action,ini_obs,next_action,t):
interval = to_discrete_states(observation)
Q_next = Q_table[tuple(interval)][next_action]
ini_interval = to_discrete_states(ini_obs)
Q_table[tuple(ini_interval)][action]+=max(0.4, expect_alpha(t))*(reward + gamma*(Q_next) - Q_table[tuple(ini_interval)][action])
for episode in range(episodes):
observation = env.reset()
t=0
done=False
while (done==False):
#env.render()
#print(observation)
action = get_action(observation,episode)
obs_next, reward, done, info = env.step(action)
next_action = get_action(obs_next,episode)
update_SARSA(obs_next,reward,action,observation,next_action,episode)
observation=obs_next
action = next_action
t+=1
rewards.append(t+1)
plt.plot(rewards)
plt.show()
|
11460080
|
import torch.nn as nn
from collections import OrderedDict
import torch.nn.functional as F
import torch
class conv_bn(nn.Module):
def __init__(self, inp, oup, kernel, stride, padding, activate='relu6'):
super(conv_bn, self).__init__()
if activate == 'relu6':
self.convbn = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(inp, oup, kernel, stride, padding, bias=False)),
('bn', nn.BatchNorm2d(oup)),
('relu', nn.ReLU6(inplace=True))
]))
elif activate == 'leaky':
self.convbn = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(inp, oup, kernel, stride, padding, bias=False)),
('bn', nn.BatchNorm2d(oup)),
('relu', nn.LeakyReLU(0.1))
]))
else:
raise AttributeError("activate type not supported")
def forward(self, input):
return self.convbn(input)
class ASFF(nn.Module):
def __init__(self, level, activate, rfb=False, vis=False):
super(ASFF, self).__init__()
self.level = level
self.dim = [512, 256, 128]
self.inter_dim = self.dim[self.level]
if level == 0:
self.stride_level_1 = conv_bn(256, self.inter_dim, kernel=3, stride=2, padding=1, activate=activate)
self.stride_level_2 = conv_bn(128, self.inter_dim, kernel=3, stride=2, padding=1, activate=activate)
self.expand = conv_bn(self.inter_dim, 512, kernel=3, stride=1, padding=1, activate=activate)
elif level == 1:
self.compress_level_0 = conv_bn(512, self.inter_dim, kernel=1, stride=1, padding=0, activate=activate)
self.stride_level_2 = conv_bn(128, self.inter_dim, kernel=3, stride=2, padding=1, activate=activate)
self.expand = conv_bn(self.inter_dim, 256, kernel=3, stride=1, padding=1, activate=activate)
elif level == 2:
self.compress_level_0 = conv_bn(512, self.inter_dim, kernel=1, stride=1, padding=0, activate=activate)
self.compress_level_1= conv_bn(256,self.inter_dim,kernel=1,stride=1,padding=0,activate=activate)
self.expand = conv_bn(self.inter_dim, 128, kernel=3, stride=1, padding=1, activate=activate)
compress_c = 8 if rfb else 16 # when adding rfb, we use half number of channels to save memory
self.weight_level_0 = conv_bn(self.inter_dim, compress_c, 1, 1, 0, activate=activate)
self.weight_level_1 = conv_bn(self.inter_dim, compress_c, 1, 1, 0, activate=activate)
self.weight_level_2 = conv_bn(self.inter_dim, compress_c, 1, 1, 0, activate=activate)
self.weight_levels = conv_bias(compress_c * 3, 3, kernel=1, stride=1, padding=0)
self.vis = vis
def forward(self, x_level_0, x_level_1, x_level_2):
if self.level == 0:
level_0_resized = x_level_0
level_1_resized = self.stride_level_1(x_level_1)
level_2_downsampled_inter = F.max_pool2d(x_level_2, 3, stride=2, padding=1)
level_2_resized = self.stride_level_2(level_2_downsampled_inter)
elif self.level == 1:
level_0_compressed = self.compress_level_0(x_level_0)
level_0_resized = F.interpolate(level_0_compressed, scale_factor=2, mode='nearest')
level_1_resized = x_level_1
level_2_resized = self.stride_level_2(x_level_2)
elif self.level == 2:
level_0_compressed = self.compress_level_0(x_level_0)
level_0_resized = F.interpolate(level_0_compressed, scale_factor=4, mode='nearest')
level_1_compressed = self.compress_level_1(x_level_1)
level_1_resized = F.interpolate(level_1_compressed, scale_factor=2, mode='nearest')
level_2_resized = x_level_2
level_0_weight_v = self.weight_level_0(level_0_resized)
level_1_weight_v = self.weight_level_1(level_1_resized)
level_2_weight_v = self.weight_level_2(level_2_resized)
levels_weight_v = torch.cat((level_0_weight_v, level_1_weight_v, level_2_weight_v), 1)
levels_weight = self.weight_levels(levels_weight_v)
levels_weight = F.softmax(levels_weight, dim=1)
fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] + \
level_1_resized * levels_weight[:, 1:2, :, :] + \
level_2_resized * levels_weight[:, 2:, :, :]
out = self.expand(fused_out_reduced)
if self.vis:
return out, levels_weight, fused_out_reduced.sum(dim=1)
else:
return out
class conv_bias(nn.Module):
def __init__(self, inp, oup, kernel, stride, padding):
super(conv_bias, self).__init__()
self.conv = nn.Conv2d(inp, oup, kernel, stride, padding, bias=True)
def forward(self, input):
return self.conv(input)
class sepconv_bn(nn.Module):
def __init__(self, inp, oup, kernel, stride, padding, seprelu):
super(sepconv_bn, self).__init__()
if seprelu:
self.sepconv_bn = nn.Sequential(OrderedDict([
('sepconv', nn.Conv2d(inp, inp, kernel, stride, padding, groups=inp, bias=False)),
('sepbn', nn.BatchNorm2d(inp)),
('seprelu', nn.ReLU6(inplace=True)),
('pointconv', nn.Conv2d(inp, oup, 1, 1, 0, bias=False)),
('pointbn', nn.BatchNorm2d(oup)),
('pointrelu', nn.ReLU6(inplace=True)),
]))
else:
self.sepconv_bn = nn.Sequential(OrderedDict([
('sepconv', nn.Conv2d(inp, inp, kernel, stride, padding, groups=inp, bias=False)),
('sepbn', nn.BatchNorm2d(inp)),
('pointconv', nn.Conv2d(inp, oup, 1, 1, 0, bias=False)),
('pointbn', nn.BatchNorm2d(oup)),
('pointrelu', nn.ReLU6(inplace=True)),
]))
def forward(self, input):
return self.sepconv_bn(input)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(OrderedDict([
('dw_conv', nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False)),
('dw_bn', nn.BatchNorm2d(hidden_dim)),
('dw_relu', nn.ReLU6(inplace=True)),
('project_conv', nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)),
('project_bn', nn.BatchNorm2d(oup))
]))
else:
self.conv = nn.Sequential(OrderedDict(
[
('expand_conv', nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)),
('expand_bn', nn.BatchNorm2d(hidden_dim)),
('expand_relu', nn.ReLU6(inplace=True)),
('dw_conv', nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False)),
('dw_bn', nn.BatchNorm2d(hidden_dim)),
('dw_relu', nn.ReLU6(inplace=True)),
('project_conv', nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)),
('project_bn', nn.BatchNorm2d(oup))
]
)
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class DarknetBlock(nn.Module):
def __init__(self, inplanes, planes):
super(DarknetBlock, self).__init__()
self.darkblock = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(inplanes, planes[0], kernel_size=1,
stride=1, padding=0, bias=False)),
('bn1', nn.BatchNorm2d(planes[0])),
('relu1', nn.LeakyReLU(0.1)),
('project_conv', nn.Conv2d(planes[0], planes[1], kernel_size=3,
stride=1, padding=1, bias=False)),
('project_bn', nn.BatchNorm2d(planes[1])),
('project_relu', nn.LeakyReLU(0.1)),
]))
def forward(self, x):
out = self.darkblock(x)
out += x
return out
if __name__ == '__main__':
model=ASFF(1,activate='leaky')
l1=torch.ones(1,512,10,10)
l2=torch.ones(1,256,20,20)
l3=torch.ones(1,128,40,40)
out=model(l1,l2,l3)
print(out.shape)
|
11460132
|
def forward_order_status(order):
if order["status"] == "NEW":
order["status"] = "IN_PROGRESS"
elif order["status"] == "IN_PROGRESS":
order["status"] = "SHIPPED"
else:
order["status"] = "DONE"
return order
print(forward_order_status({"status": "NEW"})) # {"status": "IN_PROGRESS"}
print(forward_order_status({"status": "IN_PROGRESS"})) # {"status": "SHIPPED"}
print(forward_order_status({"status": "SHIPPED"})) # {"status": "DONE"}
|
11460146
|
from unittest import mock
from pyramid.authorization import Authenticated
from pyramid.interfaces import IAuthorizationPolicy
from zope.interface import implementer
from kinto.core import testing
from kinto.core.storage.exceptions import BackendError
from kinto.core.utils import sqlalchemy
from .testapp import main as testapp
# This is the principal a connected user should have (in the tests).
USER_PRINCIPAL = "basicauth:8a931a10fc88ab2f6d1cc02a07d3a81b5d4768f6f13e85c5" "d8d4180419acb1b4"
class BaseWebTest(testing.BaseWebTest):
"""Base Web Test to test your cornice service.
It setups the database before each test and delete it after.
"""
entry_point = testapp
principal = USER_PRINCIPAL
authorization_policy = "tests.core.support.AllowAuthorizationPolicy"
plural_url = "/mushrooms"
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.headers.update(testing.get_user_headers("mat"))
@classmethod
def get_app_settings(cls, extras=None):
if extras is None:
extras = {}
extras.setdefault("settings_prefix", "myapp")
extras.setdefault("project_name", "myapp")
extras.setdefault("project_version", "0.0.1")
extras.setdefault("http_api_version", "0.1")
extras.setdefault("project_docs", "https://kinto.readthedocs.io/")
extras.setdefault("multiauth.policies", "basicauth")
extras.setdefault("multiauth.authorization_policy", cls.authorization_policy)
return super().get_app_settings(extras)
def get_item_url(self, id=None):
"""Return the URL of the item using self.item_url."""
if id is None:
id = self.obj["id"]
return "{}/{}".format(self.plural_url, id)
@implementer(IAuthorizationPolicy)
class AllowAuthorizationPolicy:
def permits(self, context, principals, permission):
return Authenticated in principals
def principals_allowed_by_permission(self, context, permission):
raise NotImplementedError() # PRAGMA NOCOVER
class PostgreSQLTest(BaseWebTest):
@classmethod
def get_app_settings(cls, extras=None):
settings = super().get_app_settings(extras)
if sqlalchemy is not None:
from .test_cache import PostgreSQLCacheTest
from .test_permission import PostgreSQLPermissionTest
from .test_storage import PostgreSQLStorageTest
settings.update(**PostgreSQLStorageTest.settings)
settings.update(**PostgreSQLCacheTest.settings)
settings.update(**PostgreSQLPermissionTest.settings)
settings.pop("storage_poolclass", None)
settings.pop("cache_poolclass", None)
settings.pop("permission_poolclass", None)
return settings
def run_failing_batch(self):
patch = mock.patch.object(self.storage, "delete_all", side_effect=BackendError("boom"))
self.addCleanup(patch.stop)
patch.start()
request_create = {
"method": "POST",
"path": "/mushrooms",
"body": {"data": {"name": "Amanite"}},
}
request_delete = {"method": "DELETE", "path": "/mushrooms"}
body = {"requests": [request_create, request_create, request_delete]}
self.app.post_json("/batch", body, headers=self.headers, status=503)
def run_failing_post(self):
patch = mock.patch.object(
self.permission, "add_principal_to_ace", side_effect=BackendError("boom")
)
self.addCleanup(patch.stop)
patch.start()
self.app.post_json(
"/psilos", {"data": {"name": "Amanite"}}, headers=self.headers, status=503
)
|
11460147
|
from torch import nn
import torch.nn.functional as F
class BadNet(nn.Module):
def __init__(self, input_channels, output_num):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=input_channels, out_channels=16, kernel_size=5, stride=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2)
)
fc1_input_features = 800 if input_channels == 3 else 512
self.fc1 = nn.Sequential(
nn.Linear(in_features=fc1_input_features, out_features=512),
nn.ReLU()
)
self.fc2 = nn.Sequential(
nn.Linear(in_features=512, out_features=output_num),
nn.Softmax(dim=-1)
)
self.dropout = nn.Dropout(p=.5)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = self.fc2(x)
return x
|
11460158
|
import functools
import re
from .theplatform import ThePlatformBaseIE
from ..utils import (
ExtractorError,
GeoRestrictedError,
int_or_none,
OnDemandPagedList,
parse_qs,
try_get,
urljoin,
update_url_query,
)
class MediasetIE(ThePlatformBaseIE):
_TP_TLD = 'eu'
_VALID_URL = r'''(?x)
(?:
mediaset:|
https?://
(?:(?:www|static3)\.)?mediasetplay\.mediaset\.it/
(?:
(?:video|on-demand|movie)/(?:[^/]+/)+[^/]+_|
player/index\.html\?.*?\bprogramGuid=
)
)(?P<id>[0-9A-Z]{16,})
'''
_TESTS = [{
# full episode
'url': 'https://www.mediasetplay.mediaset.it/video/mrwronglezionidamore/episodio-1_F310575103000102',
'md5': 'a7e75c6384871f322adb781d3bd72c26',
'info_dict': {
'id': 'F310575103000102',
'ext': 'mp4',
'title': 'Episodio 1',
'description': 'md5:e8017b7d7194e9bfb75299c2b8d81e02',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2682.0,
'upload_date': '20210530',
'series': 'Mr Wrong - Lezioni d\'amore',
'timestamp': 1622413946,
'uploader': 'Canale 5',
'uploader_id': 'C5',
'season': 'Season 1',
'episode': 'Episode 1',
'season_number': 1,
'episode_number': 1,
'chapters': [{'start_time': 0.0, 'end_time': 439.88}, {'start_time': 439.88, 'end_time': 1685.84}, {'start_time': 1685.84, 'end_time': 2682.0}],
},
}, {
'url': 'https://www.mediasetplay.mediaset.it/video/matrix/puntata-del-25-maggio_F309013801000501',
'md5': '1276f966ac423d16ba255ce867de073e',
'info_dict': {
'id': 'F309013801000501',
'ext': 'mp4',
'title': 'Puntata del 25 maggio',
'description': 'md5:ee2e456e3eb1dba5e814596655bb5296',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 6565.008,
'upload_date': '20200903',
'series': 'Matrix',
'timestamp': 1599172492,
'uploader': 'Canale 5',
'uploader_id': 'C5',
'season': 'Season 5',
'episode': 'Episode 5',
'season_number': 5,
'episode_number': 5,
'chapters': [{'start_time': 0.0, 'end_time': 3409.08}, {'start_time': 3409.08, 'end_time': 6565.008}],
},
}, {
'url': 'https://www.mediasetplay.mediaset.it/video/cameracafe5/episodio-69-pezzo-di-luna_F303843101017801',
'md5': 'd1650ac9ff944f185556126a736df148',
'info_dict': {
'id': 'F303843101017801',
'ext': 'mp4',
'title': 'Episodio 69 - Pezzo di luna',
'description': 'md5:7c32c8ec4118b72588b9412f11353f73',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 263.008,
'upload_date': '20200902',
'series': 'Camera Café 5',
'timestamp': 1599064700,
'uploader': 'Italia 1',
'uploader_id': 'I1',
'season': 'Season 5',
'episode': 'Episode 178',
'season_number': 5,
'episode_number': 178,
'chapters': [{'start_time': 0.0, 'end_time': 261.88}, {'start_time': 261.88, 'end_time': 263.008}],
},
}, {
'url': 'https://www.mediasetplay.mediaset.it/video/cameracafe5/episodio-51-tu-chi-sei_F303843107000601',
'md5': '567e9ad375b7a27a0e370650f572a1e3',
'info_dict': {
'id': 'F303843107000601',
'ext': 'mp4',
'title': 'Episodio 51 - Tu chi sei?',
'description': 'md5:42ef006e56824cc31787a547590923f4',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 367.021,
'upload_date': '20200902',
'series': 'Camera Café 5',
'timestamp': 1599069817,
'uploader': 'Italia 1',
'uploader_id': 'I1',
'season': 'Season 5',
'episode': 'Episode 6',
'season_number': 5,
'episode_number': 6,
'chapters': [{'start_time': 0.0, 'end_time': 358.68}, {'start_time': 358.68, 'end_time': 367.021}],
},
}, {
# movie
'url': 'https://www.mediasetplay.mediaset.it/movie/selvaggi/selvaggi_F006474501000101',
'md5': '720440187a2ae26af8148eb9e6b901ed',
'info_dict': {
'id': 'F006474501000101',
'ext': 'mp4',
'title': 'Selvaggi',
'description': 'md5:cfdedbbfdd12d4d0e5dcf1fa1b75284f',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 5233.01,
'upload_date': '20210729',
'timestamp': 1627594716,
'uploader': 'Cine34',
'uploader_id': 'B6',
'chapters': [{'start_time': 0.0, 'end_time': 1938.56}, {'start_time': 1938.56, 'end_time': 5233.01}],
},
}, {
# clip
'url': 'https://www.mediasetplay.mediaset.it/video/gogglebox/un-grande-classico-della-commedia-sexy_FAFU000000661680',
'only_matching': True,
}, {
# iframe simple
'url': 'https://static3.mediasetplay.mediaset.it/player/index.html?appKey=5ad3966b1de1c4000d5cec48&programGuid=FAFU000000665924&id=665924',
'only_matching': True,
}, {
# iframe twitter (from http://www.wittytv.it/se-prima-mi-fidavo-zero/)
'url': 'https://static3.mediasetplay.mediaset.it/player/index.html?appKey=5ad3966b1de1c4000d5cec48&programGuid=FAFU000000665104&id=665104',
'only_matching': True,
}, {
'url': 'mediaset:FAFU000000665924',
'only_matching': True,
}, {
'url': 'https://www.mediasetplay.mediaset.it/video/mediasethaacuoreilfuturo/palmieri-alicudi-lisola-dei-tre-bambini-felici--un-decreto-per-alicudi-e-tutte-le-microscuole_FD00000000102295',
'only_matching': True,
}, {
'url': 'https://www.mediasetplay.mediaset.it/video/cherryseason/anticipazioni-degli-episodi-del-23-ottobre_F306837101005C02',
'only_matching': True,
}, {
'url': 'https://www.mediasetplay.mediaset.it/video/tg5/ambiente-onda-umana-per-salvare-il-pianeta_F309453601079D01',
'only_matching': True,
}, {
'url': 'https://www.mediasetplay.mediaset.it/video/grandefratellovip/benedetta-una-doccia-gelata_F309344401044C135',
'only_matching': True,
}, {
'url': 'https://www.mediasetplay.mediaset.it/movie/herculeslaleggendahainizio/hercules-la-leggenda-ha-inizio_F305927501000102',
'only_matching': True,
}]
@staticmethod
def _extract_urls(ie, webpage):
def _qs(url):
return parse_qs(url)
def _program_guid(qs):
return qs.get('programGuid', [None])[0]
entries = []
for mobj in re.finditer(
r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:www\.)?video\.mediaset\.it/player/playerIFrame(?:Twitter)?\.shtml.*?)\1',
webpage):
embed_url = mobj.group('url')
embed_qs = _qs(embed_url)
program_guid = _program_guid(embed_qs)
if program_guid:
entries.append(embed_url)
continue
video_id = embed_qs.get('id', [None])[0]
if not video_id:
continue
urlh = ie._request_webpage(
embed_url, video_id, note='Following embed URL redirect')
embed_url = urlh.geturl()
program_guid = _program_guid(_qs(embed_url))
if program_guid:
entries.append(embed_url)
return entries
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
for video in smil.findall(self._xpath_ns('.//video', namespace)):
video.attrib['src'] = re.sub(r'(https?://vod05)t(-mediaset-it\.akamaized\.net/.+?.mpd)\?.+', r'\1\2', video.attrib['src'])
return super(MediasetIE, self)._parse_smil_formats(smil, smil_url, video_id, namespace, f4m_params, transform_rtmp_url)
def _check_drm_formats(self, tp_formats, video_id):
has_nondrm, drm_manifest = False, ''
for f in tp_formats:
if '_sampleaes/' in (f.get('manifest_url') or ''):
drm_manifest = drm_manifest or f['manifest_url']
f['has_drm'] = True
if not f.get('has_drm') and f.get('manifest_url'):
has_nondrm = True
nodrm_manifest = re.sub(r'_sampleaes/(\w+)_fp_', r'/\1_no_', drm_manifest)
if has_nondrm or nodrm_manifest == drm_manifest:
return
tp_formats.extend(self._extract_m3u8_formats(
nodrm_manifest, video_id, m3u8_id='hls', fatal=False) or [])
def _real_extract(self, url):
guid = self._match_id(url)
tp_path = 'PR1GhC/media/guid/2702976343/' + guid
info = self._extract_theplatform_metadata(tp_path, guid)
formats = []
subtitles = {}
first_e = geo_e = None
asset_type = 'geoNo:HD,browser,geoIT|geoNo:HD,geoIT|geoNo:SD,browser,geoIT|geoNo:SD,geoIT|geoNo|HD|SD'
# TODO: fixup ISM+none manifest URLs
for f in ('MPEG4', 'M3U'):
try:
tp_formats, tp_subtitles = self._extract_theplatform_smil(
update_url_query('http://link.theplatform.%s/s/%s' % (self._TP_TLD, tp_path), {
'mbr': 'true',
'formats': f,
'assetTypes': asset_type,
}), guid, 'Downloading %s SMIL data' % (f.split('+')[0]))
except ExtractorError as e:
if not geo_e and isinstance(e, GeoRestrictedError):
geo_e = e
if not first_e:
first_e = e
continue
self._check_drm_formats(tp_formats, guid)
formats.extend(tp_formats)
subtitles = self._merge_subtitles(subtitles, tp_subtitles)
# check for errors and report them
if (first_e or geo_e) and not formats:
raise geo_e or first_e
self._sort_formats(formats)
feed_data = self._download_json(
'https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs-v2/guid/-/' + guid,
guid, fatal=False)
if feed_data:
publish_info = feed_data.get('mediasetprogram$publishInfo') or {}
thumbnails = feed_data.get('thumbnails') or {}
thumbnail = None
for key, value in thumbnails.items():
if key.startswith('image_keyframe_poster-'):
thumbnail = value.get('url')
break
info.update({
'description': info.get('description') or feed_data.get('description') or feed_data.get('longDescription'),
'uploader': publish_info.get('description'),
'uploader_id': publish_info.get('channel'),
'view_count': int_or_none(feed_data.get('mediasetprogram$numberOfViews')),
'thumbnail': thumbnail,
})
if feed_data.get('programType') == 'episode':
info.update({
'episode_number': int_or_none(
feed_data.get('tvSeasonEpisodeNumber')),
'season_number': int_or_none(
feed_data.get('tvSeasonNumber')),
'series': feed_data.get('mediasetprogram$brandTitle'),
})
info.update({
'id': guid,
'formats': formats,
'subtitles': subtitles,
})
return info
class MediasetShowIE(MediasetIE):
_VALID_URL = r'''(?x)
(?:
https?://
(?:(?:www|static3)\.)?mediasetplay\.mediaset\.it/
(?:
(?:fiction|programmi-tv|serie-tv|kids)/(?:.+?/)?
(?:[a-z-]+)_SE(?P<id>\d{12})
(?:,ST(?P<st>\d{12}))?
(?:,sb(?P<sb>\d{9}))?$
)
)
'''
_TESTS = [{
# TV Show webpage (general webpage)
'url': 'https://www.mediasetplay.mediaset.it/programmi-tv/leiene/leiene_SE000000000061',
'info_dict': {
'id': '000000000061',
'title': '<NAME>',
},
'playlist_mincount': 7,
}, {
# TV Show webpage (specific season)
'url': 'https://www.mediasetplay.mediaset.it/programmi-tv/leiene/leiene_SE000000000061,ST000000002763',
'info_dict': {
'id': '000000002763',
'title': 'Le Iene',
},
'playlist_mincount': 7,
}, {
# TV Show specific playlist (with multiple pages)
'url': 'https://www.mediasetplay.mediaset.it/programmi-tv/leiene/iservizi_SE000000000061,ST000000002763,sb100013375',
'info_dict': {
'id': '100013375',
'title': 'I servizi',
},
'playlist_mincount': 50,
}]
_BY_SUBBRAND = 'https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs-v2?byCustomValue={subBrandId}{%s}&sort=:publishInfo_lastPublished|desc,tvSeasonEpisodeNumber|desc&range=%d-%d'
_PAGE_SIZE = 25
def _fetch_page(self, sb, page):
lower_limit = page * self._PAGE_SIZE + 1
upper_limit = lower_limit + self._PAGE_SIZE - 1
content = self._download_json(
self._BY_SUBBRAND % (sb, lower_limit, upper_limit), sb)
for entry in content.get('entries') or []:
yield self.url_result(
'mediaset:' + entry['guid'],
playlist_title=entry['mediasetprogram$subBrandDescription'])
def _real_extract(self, url):
playlist_id, st, sb = self._match_valid_url(url).group('id', 'st', 'sb')
if not sb:
page = self._download_webpage(url, st or playlist_id)
entries = [self.url_result(urljoin('https://www.mediasetplay.mediaset.it', url))
for url in re.findall(r'href="([^<>=]+SE\d{12},ST\d{12},sb\d{9})">[^<]+<', page)]
title = (self._html_search_regex(r'(?s)<h1[^>]*>(.+?)</h1>', page, 'title', default=None)
or self._og_search_title(page))
return self.playlist_result(entries, st or playlist_id, title)
entries = OnDemandPagedList(
functools.partial(self._fetch_page, sb),
self._PAGE_SIZE)
title = try_get(entries, lambda x: x[0]['playlist_title'])
return self.playlist_result(entries, sb, title)
|
11460160
|
import copy
import os
import unittest
import rdflib
from data_model_exporter.ttl_schema_generator import TtlSchemaGenerator
from data_model_exporter.property_types import PrimitiveType, RefType
class TtlSchemaGeneratorTestCase(unittest.TestCase):
def fixture_path(self, filename):
return os.path.join(os.path.dirname(__file__), 'fixtures', filename)
def setUp(self):
self.generator = TtlSchemaGenerator('Zombocom', self.fixture_path('test.ttl'))
self.generator.build_schema()
def test_ensure_property_initialized_is_idempotent_and_initializes_property(self):
zombo_node = rdflib.term.URIRef("https://zombo.com/zombo#possibilities")
self.generator.ensure_property_initialized(zombo_node)
self.assertIn("zombo:possibilities", self.generator.schema.properties)
first_execution = copy.deepcopy(self.generator.schema.properties['zombo:possibilities'])
self.generator.ensure_property_initialized(zombo_node)
self.assertEqual(first_execution, self.generator.schema.properties['zombo:possibilities'])
def test_annotates_schema_with_various_flat_fields(self):
self.assertEqual(self.generator.schema.description, "This is Zombocom.")
self.assertEqual(self.generator.schema.title, "ZomboLabel")
self.assertEqual(self.generator.schema.skos_preflabel, "ZomboPrefLabel")
def test_annotates_from_domain_fields(self):
self.assertIn('zombo:redundancy', self.generator.schema.properties)
this_is_zombocom = self.generator.schema.properties['zombo:redundancy']
self.assertEqual(this_is_zombocom.description, "Yes, this is Zombocom.")
def test_annotates_enumerated_ranges_on_property(self):
self.assertIn('zombo:greeting', self.generator.schema.properties)
enum_values = self.generator.schema.properties['zombo:greeting'].allowed_values
self.assertEqual(enum_values, [
"Welcome to Zombocom.",
"This is Zombocom."
])
def test_does_not_annotate_properties_on_other_classes(self):
self.assertNotIn('zombo:abilityToDoAnything', self.generator.schema.properties)
def test_annotates_one_cardinality_as_required(self):
self.assertIn('zombo:limit', self.generator.schema.properties)
the_only_limit = self.generator.schema.properties['zombo:limit']
self.assertTrue(the_only_limit.required)
def test_annotates_somevaluesfrom_as_array(self):
self.assertIn('zombo:redundancy', self.generator.schema.properties)
this_is_zombocom = self.generator.schema.properties['zombo:redundancy']
self.assertEqual(this_is_zombocom.allowed_types, [PrimitiveType('string')])
redundancy_dict = this_is_zombocom.as_dict()
self.assertEqual(redundancy_dict['type'], 'array')
self.assertEqual(redundancy_dict['items'], {'type': 'string'})
def test_annotates_allvaluesfrom_as_type(self):
self.assertIn('zombo:limit', self.generator.schema.properties)
the_only_limit = self.generator.schema.properties['zombo:limit']
self.assertEqual(the_only_limit.allowed_types, [RefType('zombo:Yourself')])
self.assertEqual(the_only_limit.as_dict()['$ref'], 'zombo:Yourself')
def test_annotates_cardinality_limits_as_array_type(self):
self.assertIn('zombo:possibilities', self.generator.schema.properties)
infinite_possibilities = self.generator.schema.properties['zombo:possibilities']
self.assertEqual(infinite_possibilities.min_cardinality, 1)
self.assertTrue(infinite_possibilities.is_array_type())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.