id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11595294
|
from mako.template import Template
import os
from importlib import import_module
import pathlib
import zipfile
# TODO: Replace with Python 3.9's importlib.resources.files() when it becomes min version
def files(package):
spec = import_module(package).__spec__
if spec.submodule_search_locations is None:
raise TypeError("{!r} is not a package".format(package))
package_directory = pathlib.Path(spec.origin).parent
try:
archive_path = spec.loader.archive
rel_path = package_directory.relative_to(archive_path)
return zipfile.Path(archive_path, str(rel_path) + "/")
except Exception:
pass
return package_directory
def gen_robot_code(config):
path = files(__name__).joinpath("templates")
with open(os.path.join(path, "Robot.java.mako"), "r") as template:
return Template(template.read()).render(**config)
def gen_build_gradle(team):
path = files(__name__).joinpath("templates")
with open(os.path.join(path, "build.gradle.mako"), "r") as template:
return Template(template.read()).render(team=team)
|
11595301
|
from pathlib import Path
from typing import Iterator
def get_paths(path: Path) -> Iterator[Path]:
"""Recursively yields python files.
"""
if not path.exists():
raise FileNotFoundError(str(path))
if path.is_file():
if path.suffix == '.py':
yield path
return
for subpath in path.iterdir():
if subpath.name[0] == '.':
continue
if subpath.name == '__pycache__':
continue
yield from get_paths(subpath)
|
11595340
|
from briefmetrics import api
from briefmetrics import test
from briefmetrics import model
from briefmetrics.lib.service import registry as service_registry
from briefmetrics.lib.payment import registry as payment_registry
from dateutil.relativedelta import relativedelta
import mock
import json
import logging
import datetime
from unittest import skip
from unstdlib import now
Session = model.Session
log = logging.getLogger(__name__)
RESPONSES = {
'webhook': '{"event_token": "foo"}',
'subscription_create': '{"type": "subscription_create", "event": {"returnURI": "https://example/v1/saas/saas/eventResponse/67e2b7f7dcad46cfa4e2013f224fcead", "id": "b17055ca229140309e0a54df7804fb85", "user": {"username": "testuser", "email": "<EMAIL>", "first_name": "bar", "last_name": "baz"}, "subscription_id": "308", "configuration": {}, "order": {"pricing_plan_sku": "starter-yr"}}}',
'subscription_alter': '{"type": "subscription_alter", "event": {"returnURI": "https://example/v1/saas/saas/eventResponse/75b060232a414c1084bd895e263a6bd5", "id": "a2a4e6a43fc74780b585413ea21780c1", "user": {"username": "testuser", "first_name": "bar", "last_name": "baz", "email": "<EMAIL>"}, "subscription_id": 357, "configuration": {}, "order": {"product_sku": null, "pricing_plan_sku": "%(pricing_plan_sku)s", "pricing_plan_id": "3", "product_id": 257}}}',
}
class FakeNamecheapAPI(object):
id = 'fake-namecheap'
resp = {
('GET', '/v1/saas/saas/event/fakecreate'): RESPONSES['subscription_create'],
('GET', '/v1/saas/saas/event/badevent'): '["wtf?"]',
('GET', '/v1/saas/saas/event/fakealter'): RESPONSES['subscription_alter'] % dict(pricing_plan_sku='agency-10-yr'),
('GET', '/v1/saas/saas/event/fakealter2'): RESPONSES['subscription_alter'] % dict(pricing_plan_sku='starter-yr'),
('POST', '/v1/billing/invoice'): '{"result": {"status": "open", "status_id": "1", "created_at": "2015-05-07T01:30:29.923Z", "amount_due": null, "subscription_id": 1206, "id": "123"}}',
('POST', '/v1/billing/invoice/123/line_items'): '{}',
('POST', '/v1/billing/invoice/123/payments'): '{"result": {"status": "success"}}',
}
session = mock.Mock()
config = {
'client_id': 'testing',
}
def __init__(self):
self.calls = []
def query(self, method=None, url=None):
r = self.calls
if method:
r = (c for c in r if c[0] == method)
if url:
r = (c for c in r if c[1] == url)
return list(r)
def request(self, method, url, *args, **kw):
log.info('FakeNamecheapAPI.request: %s %s (%s, %s)', method, url, args, kw)
self.calls.append((method, url, args, kw))
m = mock.Mock()
m.json.return_value = json.loads(self.resp[(method, url)])
return m
@skip("disabled namecheap")
@mock.patch('briefmetrics.lib.service.namecheap.NamecheapAPI.instance', FakeNamecheapAPI())
class TestNamecheap(test.TestWeb):
def test_connect_decode(self):
api.account.get_or_create(
email=u'<EMAIL>',
plan_id=u'starter-yr',
service=u'namecheap',
remote_id=u'shazow',
)
payload = '''id_token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJodHRwOi8vd3d3LnNhbmRib3gubmFtZWNoZWFwLmNvbS9hcHBzL3NzbyIsInN1YiI6InNoYXpvdyIsImF1ZCI6IjIzMUNCRkU4LUYxNkUtNEM3OS04MkU0LUEzREVDRTFGM0FFQyIsImV4cCI6MTQzMDY4MTY2MCwiaWF0IjoxNDMwNjc4MDYwLCJub25jZSI6IkVtQVJFUyIsImh0X2hhc2giOiJIMUFRSG10TktRSkxJeFpwQXlVQ0tRIn0.wPtPkJB1Y8AVe-u348qirxxjekz4olfXgyLjQx5VlLo&access_token=4a4422c8a3e3a95126d84131c6c1161f&token_type=Bearer&expires_in=3600&sid=s%3ao7GVoAsXfx8xAqe0X2BW6vQV5OrPNuOz.lRekL22%2fj673SlRu283JS9riFEDDx5cpoCXpb9cyou0'''
r = self.call_api('account.connect', service='namecheap', payload=payload)
self.assertEqual(r['result']['decoded'], {
'aud': u'231CBFE8-F16E-4C79-82E4-A3DECE1F3AEC',
'exp': 1430681660,
'ht_hash': u'H1AQHmtNKQJLIxZpAyUCKQ',
'iat': 1430678060,
'iss': u'http://www.sandbox.namecheap.com/apps/sso',
'nonce': u'EmARES',
'sub': u'shazow',
})
def test_webhook_provison(self):
self.assertIn('namecheap', service_registry)
self.assertIn('namecheap', payment_registry)
# Disable auto-charge
restore_auto_charge, payment_registry['namecheap'].auto_charge = payment_registry['namecheap'].auto_charge, False
with mock.patch('briefmetrics.api.email.send_message') as send_message:
resp = self.app.post('/webhook/namecheap', params='{"event_token": "<PASSWORD>"}', content_type='application/json')
r = resp.json
self.assertTrue(r['type'], 'subscription_create_resp')
self.assertTrue(r['response']['state'], 'Active')
self.assertTrue(send_message.called)
self.assertEqual(len(send_message.call_args_list), 1)
call = send_message.call_args_list[0]
message = call[0][1].params
self.assertIn(u"Welcome to Briefmetrics", message['subject'])
# Check that user was provisioned
users = model.User.all()
self.assertEqual(len(users), 1)
u = users[0]
self.assertEqual(u.email, 'foo@localhost')
self.assertEqual(u.display_name, 'bar baz')
self.assertEqual(u.plan_id, 'starter-yr')
self.assertEqual(u.time_next_payment, None)
self.assertEqual(u.payment.is_charging, False)
p = u.payment
self.assertEqual(p.id, 'namecheap')
self.assertEqual(p.token, '<PASSWORD>')
a = u.get_account(service='namecheap')
self.assertEqual(a.remote_id, 'testuser')
self.app.post('/webhook/namecheap', params='{"event_token": "<PASSWORD>"}', content_type='application/json')
users = model.User.all()
self.assertEqual(len(users), 1)
u = users[0]
self.assertEqual(u.plan_id, 'agency-10-yr')
self.assertEqual(u.time_next_payment, None)
u.time_next_payment = now()
self.assertEqual(u.payment.is_charging, True)
# Restore auto-charge
payment_registry['namecheap'].auto_charge = restore_auto_charge
def test_payment_collision(self):
u = api.account.get_or_create(
email=u'foo<EMAIL>',
plan_id=u'starter-yr',
service=u'namecheap',
remote_id=u'shazow',
)
u.set_payment('stripe', 'testtesttest')
Session.commit()
with mock.patch('briefmetrics.api.email.send_message') as send_message:
self.assertFalse(send_message.called)
resp = self.app.post('/webhook/namecheap', params='{"event_token": "fake<PASSWORD>"}', content_type='application/json')
r = resp.json
self.assertTrue(r['type'], 'subscription_create_resp')
self.assertTrue(r['response']['state'], 'Failed')
def test_prorate(self):
base_time = datetime.datetime(2000, 1, 1)
u = model.User()
u.set_payment('namecheap', 'test')
u.time_next_payment = base_time - relativedelta(months=1)
u.plan_id = 'starter-yr'
starter_plan = u.plan
self.assertEqual(u.payment.prorate(since_time=base_time), starter_plan.price)
u.time_next_payment = base_time + relativedelta(days=183)
self.assertEqual(u.payment.prorate(since_time=base_time), -starter_plan.price/2)
u.plan_id = 'agency-10-yr'
agency_plan = u.plan
self.assertEqual(u.payment.prorate(since_time=base_time, old_plan=starter_plan, new_plan=agency_plan), agency_plan.price-starter_plan.price/2)
self.assertEqual(u.payment.prorate(since_time=base_time, old_plan=agency_plan, new_plan=None), -(agency_plan.price/2))
def test_prorate_plan(self):
with mock.patch('briefmetrics.api.email.send_message') as send_message:
self.assertFalse(send_message.called)
resp = self.app.post('/webhook/namecheap', params='{"event_token": "<PASSWORD>"}', content_type='application/json')
self.assertTrue(resp.json['response']['state'], 'Active')
nc = service_registry['namecheap'].instance
# Next payment in the past
u = model.User.all()[0]
u.num_remaining = None
u.time_next_payment = now() - relativedelta(months=6)
Session.commit()
old_plan = u.plan
self.app.post('/webhook/namecheap', params='{"event_token": "<PASSWORD>"}', content_type='application/json')
new_plan = model.User.get(u.id).plan
payments = nc.query(url='/v1/billing/invoice/123/payments')
self.assertTrue(payments)
line_items = nc.query(url='/v1/billing/invoice/123/line_items')
print(line_items)
line_item = line_items[0]
params = line_item[-1]['json']
self.assertEqual(params['amount'], '%0.2f' % (new_plan.price/100.0))
# Next payment in the future
nc.calls[:] = [] # Reset call log
u = model.User.all()[0]
u.num_remaining = None
u.time_next_payment = now() + relativedelta(months=6)
Session.commit()
old_plan = u.plan
self.app.post('/webhook/namecheap', params='{"event_token": "<PASSWORD>"}', content_type='application/json')
new_plan = model.User.get(u.id).plan
payments = nc.query(url='/v1/billing/invoice/123/payments')
self.assertTrue(payments)
line_items = nc.query(url='/v1/billing/invoice/123/line_items')
print(line_items)
line_item = line_items[0]
params = line_item[-1]['json']
expected_amount = (new_plan.price - (old_plan.price / 2.0))/100.0
self.assertLess(expected_amount, 0)
delta = float(params['amount']) - expected_amount
self.assertTrue(abs(delta) < 2, '%s !~= %s' % (params['amount'], expected_amount)) # This will vary by year, so we only approximate
def test_webhook_fail(self):
with self.assertRaises(AttributeError):
self.app.post('/webhook/namecheap', params='["wtf"]', content_type='application/json')
|
11595353
|
from pprint import pprint
from visdom import Visdom
import pathlib
import json
import sys
import matplotlib.pyplot as plt
def download_env(env):
vis = Visdom('http://logserver.duckdns.org', port=5010)
data = vis.get_window_data(env=env)
d = json.loads(data)
n_deleted = []
test_acc_avg = []
for key in d:
try:
#1 for MR 0 for UMICH
x = list(d[key]["content"]["data"][1]["x"])
y = list(d[key]["content"]["data"][1]["y"])
if 'n-deleted' in key:
n_deleted = (x,y)
#1 for MR 0 for UMICH
x = list(d[key]["content"]["data"][1]["x"])
y = list(d[key]["content"]["data"][1]["y"])
if 'test-acc-avg' in key:
test_acc_avg = (x,y)
except:
pass
return n_deleted, test_acc_avg
if __name__ == "__main__":
source = [ "SS_bjornhox_11-07-18_14:22_UMICH_cnn_sim_0.08_28ef",
"SS_bjornhox_11-07-18_14:34_UMICH_cnn_sim_0.12_2366",
"SS_bjornhox_11-07-18_14:34_UMICH_cnn_sim_0.14_2f39"]
legend = ["0.08", "0.12", "0.14"]
path = './results/'
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
n_deleted = []
test_acc_avg = []
# for i in sys.argv[1:]:
# legend.append(i.split("_")[7])
# legend.append(i.split("_")[6])
# legend.append(i.split("_")[8])
for i in range(0, len(source)):
env = source[i]
res1, res2 = download_env(env)
n_deleted.append(res1)
test_acc_avg.append(res2)
plt.figure(1)
plt.axis([0,250,0,1100])
plt.subplot(111)
plt.xlabel("Amount of labeled data")
plt.ylabel("Number of deleted samples")
new_plot = []
for i in range(0,len(n_deleted)):
# print(test_acc_avg[i])
# print(n_deleted[i])
# # new = (test_acc_avg[i][0][0:8], n_deleted[i][1][0:8])
new = (test_acc_avg[i][0][0:15], n_deleted[i][1][0:15])
new[0].insert(0,0)
new[1].insert(0,0)
new_plot.append(new)
# print(new)
# print("---")
# quit()
plt.plot(*new_plot[0], dashes=[4, 2], color='#9467bd')
plt.plot(*new_plot[1], color='#1f77b4')
plt.plot(*new_plot[2], dashes=[6, 2], color='#17becf')
plt.legend(legend,
loc='center right')
plt.savefig('results/CNN_UMICH_N_DEL.png' , dpi=600)
plt.show()
|
11595383
|
from pyamf.remoting.gateway.wsgi import WSGIGateway
def echo(data):
return data
services = {
'echo': echo,
# Add other exposed functions here
}
gateway = WSGIGateway(services)
|
11595414
|
import numpy as np
import cv2
import matplotlib.image as mpimg
class CameraCalibration(object):
"""
Prepares camera calibration pipeline based on a set of calibration images.
"""
def __init__(self, calibration_images, pattern_size=(9, 6), retain_calibration_images=False):
"""
Initialises camera calibration pipeline based on a set of calibration images.
Parameters
----------
calibration_images : Calibration images.
pattern_size : Shape of the calibration pattern.
retain_calibration_images : Flag indicating if we need to preserve calibration images.
"""
self.camera_matrix = None
self.dist_coefficients = None
self.calibration_images_success = []
self.calibration_images_error = []
self.calculate_calibration(calibration_images, pattern_size, retain_calibration_images)
def __call__(self, image):
"""
Calibrates an image based on saved settings.
Parameters
----------
image : Image to calibrate.
Returns
-------
Calibrated image.
"""
if self.camera_matrix is not None and self.dist_coefficients is not None:
return cv2.undistort(image, self.camera_matrix, self.dist_coefficients, None, self.camera_matrix)
else:
return image
def calculate_calibration(self, images, pattern_size, retain_calibration_images):
"""
Prepares calibration settings.
Parameters
----------
images : Set of calibration images.
pattern_size : Calibration pattern shape.
retain_calibration_images : Flag indicating if we need to preserve calibration images.
"""
# Prepare object points: (0,0,0), (1,0,0), (2,0,0), ...
pattern = np.zeros((pattern_size[1] * pattern_size[0], 3), np.float32)
pattern[:, :2] = np.mgrid[0:pattern_size[0], 0:pattern_size[1]].T.reshape(-1, 2)
pattern_points = [] # 3d points in real world space
image_points = [] # 2d points in image plane.
image_size = None
# Step through the list and search for chessboard corners
for i, path in enumerate(images):
image = mpimg.imread(path)
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Find the chessboard corners
found, corners = cv2.findChessboardCorners(gray, pattern_size, None)
# If found, add object points and image points
if found:
pattern_points.append(pattern)
image_points.append(corners)
image_size = (image.shape[1], image.shape[0])
if retain_calibration_images:
cv2.drawChessboardCorners(image, pattern_size, corners, True)
self.calibration_images_success.append(image)
else:
if retain_calibration_images:
self.calibration_images_error.append(image)
if pattern_points and image_points:
_, self.camera_matrix, self.dist_coefficients, _, _ = cv2.calibrateCamera(
pattern_points, image_points, image_size, None, None
)
|
11595432
|
import sys
sys.path.append("../../")
from appJar import gui
def change(): print("changed")
def press(btn):
if btn == "clear": app.clearListBox("list", callFunction=app.check("call"))
elif btn == "select":
app.selectListItemAtPos("list", -5, callFunction=app.check("call"))
app.selectListItemAtPos("list", 0, callFunction=app.check("call"))
app.selectListItemAtPos("list", 40, callFunction=app.check("call"))
app.selectListItemAtPos("list", 4, callFunction=app.check("call"))
app.selectListItemAtPos("list", 3, callFunction=app.check("call"))
app.selectListItem("list", "a", callFunction=app.check("call"))
app.selectListItem("list", ["a", "b"], callFunction=app.check("call"))
app.selectListItem("list", ["a", "f"], callFunction=app.check("call"))
else: app.updateListBox("list", ["d", "e", "f", "g"], callFunction=app.check("call"))
with gui() as app:
app.label('hello world')
app.listbox("list", ["a", "b", "c", "d"], change=change)
app.check("call")
app.buttons(["select", "clear", "update"], press)
|
11595454
|
import json
import os
import sys
import logging
import time
from azure.mgmt.rdbms.mysql import MySQLManagementClient
from azure.identity import ClientSecretCredential
from azure.mgmt.rdbms.mysql.models import ServerUpdateParameters
logging.basicConfig(level=logging.INFO)
class EnableSslEnforcement(object):
def parse(self, payload):
"""Parse payload received from Remediation Service.
:param payload: JSON string containing parameters received from the remediation service.
:type payload: str.
:returns: Dictionary of parsed parameters
:rtype: dict
:raises: KeyError, JSONDecodeError
"""
remediation_entry = json.loads(payload)
notification_info = remediation_entry.get("notificationInfo", None)
finding_info = notification_info.get("FindingInfo", None)
object_id = finding_info.get("ObjectId", None)
object_chain = remediation_entry["notificationInfo"]["FindingInfo"][
"ObjectChain"
]
object_chain_dict = json.loads(object_chain)
subscription_id = object_chain_dict["cloudAccountId"]
region = finding_info.get("Region")
properties = object_chain_dict["properties"]
logging.info(f"cloud_account_id: {subscription_id}")
logging.info(f"region: {region}")
if object_id is None:
raise Exception(
"Missing parameters for 'payload.notificationInfo.ObjectId'."
)
resource_group_name = ""
for property in properties:
if property["name"] == "ResourceGroup" and property["type"] == "string":
resource_group_name = property["stringV"]
break
logging.info("parsed params")
logging.info(f" resource_group_name: {resource_group_name}")
logging.info(f" mysql_server_name: {object_id}")
logging.info(f" subscription_id: {subscription_id}")
logging.info(f" region: {region}")
return {
"resource_group_name": resource_group_name,
"mysql_server_name": object_id,
"subscription_id": subscription_id,
"region": region,
}
def remediate(self, client, resource_group_name, mysql_server_name):
"""Enable Enforce SSL connection for MySQL Server
:param client: Instance of the Azure MySQLManagementClient.
:param resource_group_name: The name of the resource group.
:param mysql_server_name: The name of the MySQL Server.
:type resource_group_name: str.
:type mysql_server_name: str.
:returns: Integer signaling success or failure
:rtype: int
:raises: msrestazure.azure_exceptions.CloudError
"""
logging.info("Enabling Enforce SSL connection for MySQL Server")
try:
logging.info(" executing client.servers.begin_update")
logging.info(f" resource_group_name={resource_group_name}")
logging.info(f" server_name={mysql_server_name}")
poller = client.servers.begin_update(
resource_group_name=resource_group_name,
server_name=mysql_server_name,
parameters=ServerUpdateParameters(ssl_enforcement="Enabled"),
)
while not poller.done():
time.sleep(5)
status = poller.status()
logging.info(f"The remediation job status: {status}")
poller.result()
except Exception as e:
logging.error(f"{str(e)}")
raise
return 0
def run(self, args):
"""Run the remediation job.
:param args: List of arguments provided to the job.
:type args: list.
:returns: int
"""
params = self.parse(args[1])
credential = ClientSecretCredential(
client_id=os.environ.get("AZURE_CLIENT_ID"),
client_secret=os.environ.get("AZURE_CLIENT_SECRET"),
tenant_id=os.environ.get("AZURE_TENANT_ID"),
)
client = MySQLManagementClient(credential, params["subscription_id"])
return self.remediate(
client, params["resource_group_name"], params["mysql_server_name"],
)
if __name__ == "__main__":
sys.exit(EnableSslEnforcement().run(sys.argv))
|
11595460
|
import time
from contextlib import contextmanager
from datetime import datetime
from typing import Dict
from typing import Iterable
from typing import List
from typing import Tuple
from typing import Union
from urllib.parse import urljoin
import lib.core as constance
import requests.packages.urllib3
from lib.core.exceptions import AppException
from lib.core.service_types import DownloadMLModelAuthData
from lib.core.service_types import ServiceResponse
from lib.core.service_types import UploadAnnotationAuthData
from lib.core.service_types import UserLimits
from lib.core.serviceproviders import SuerannotateServiceProvider
from lib.infrastructure.helpers import timed_lru_cache
from requests.exceptions import HTTPError
requests.packages.urllib3.disable_warnings()
class BaseBackendService(SuerannotateServiceProvider):
AUTH_TYPE = "sdk"
PAGINATE_BY = 100
LIMIT = 100
"""
Base service class
"""
def __init__(
self, api_url: str, auth_token: str, logger, paginate_by=None, verify_ssl=True
):
self.api_url = api_url
self._auth_token = auth_token
self.logger = logger
self._paginate_by = paginate_by
self._verify_ssl = verify_ssl
self.team_id = auth_token.split("=")[-1]
self.get_session()
@timed_lru_cache(seconds=360)
def get_session(self):
session = requests.Session()
session.headers.update(self.default_headers)
return session
@property
def default_headers(self):
return {
"Authorization": self._auth_token,
"authtype": self.AUTH_TYPE,
"Content-Type": "application/json",
# "User-Agent": constance.__version__,
}
@property
def safe_api(self):
"""
Context manager which will handle requests calls.
"""
@contextmanager
def safe_api():
"""
Context manager which handles Requests error.
"""
try:
yield None
except (HTTPError, ConnectionError) as exc:
raise AppException(f"Unknown exception: {exc}.")
return safe_api
@property
def paginate_by(self):
if self._paginate_by:
return self._paginate_by
else:
return self.PAGINATE_BY
def _request(
self,
url,
method="get",
data=None,
headers=None,
params=None,
retried=0,
content_type=None,
) -> Union[requests.Response, ServiceResponse]:
kwargs = {"json": data} if data else {}
session = self.get_session()
session.headers.update(headers if headers else {})
with self.safe_api():
req = requests.Request(method=method, url=url, **kwargs, params=params)
prepared = session.prepare_request(req)
response = session.send(request=prepared, verify=self._verify_ssl)
if response.status_code == 404 and retried < 3:
return self._request(
url,
method="get",
data=None,
headers=None,
params=None,
retried=retried + 1,
content_type=content_type,
)
if response.status_code > 299:
import traceback
traceback.print_stack()
self.logger.error(
f"Got {response.status_code} response from backend: {response.text}"
)
if content_type:
return ServiceResponse(response, content_type)
return response
def _get_page(self, url, offset, params=None, key_field: str = None):
splitter = "&" if "?" in url else "?"
url = f"{url}{splitter}offset={offset}"
response = self._request(url, params=params)
if response.status_code != 200:
return {"data": []}, 0
# raise AppException(f"Got invalid response for url {url}: {response.text}.")
data = response.json()
if data:
if isinstance(data, dict):
if key_field:
data = data[key_field]
if data.get("count", 0) < self.LIMIT:
return data, 0
else:
return data, data.get("count", 0) - offset
if isinstance(data, list):
return {"data": data}, 0
return {"data": []}, 0
def _get_all_pages(self, url, offset=0, params=None, key_field: str = None):
total = list()
while True:
resources, remains_count = self._get_page(url, offset, params, key_field)
total.extend(resources["data"])
if remains_count <= 0:
break
offset += len(resources["data"])
return total
class SuperannotateBackendService(BaseBackendService):
"""
Manage projects, images and team in the Superannotate
"""
URL_USERS = "users"
URL_LIST_PROJECTS = "projects"
URL_FOLDERS_IMAGES = "images-folders"
URL_CREATE_PROJECT = "project"
URL_GET_PROJECT = "project/{}"
URL_GET_FOLDER_BY_NAME = "folder/getFolderByName"
URL_CREATE_FOLDER = "folder"
URL_UPDATE_FOLDER = "folder/{}"
URL_GET_IMAGE = "image/{}"
URL_GET_IMAGES = "images"
URL_BULK_GET_IMAGES = "images/getBulk"
URL_DELETE_FOLDERS = "image/delete/images"
URL_CREATE_IMAGE = "image/ext-create"
URL_PROJECT_SETTINGS = "project/{}/settings"
URL_PROJECT_WORKFLOW = "project/{}/workflow"
URL_SHARE_PROJECT = "project/{}/share"
URL_SHARE_PROJECT_BULK = "project/{}/share/bulk"
URL_ANNOTATION_CLASSES = "classes"
URL_TEAM = "team"
URL_INVITE_CONTRIBUTOR = "team/{}/invite"
URL_PREPARE_EXPORT = "export"
URL_COPY_IMAGES_FROM_FOLDER = "images/copy-image-or-folders"
URL_MOVE_IMAGES_FROM_FOLDER = "image/move"
URL_GET_COPY_PROGRESS = "images/copy-image-progress"
URL_ASSIGN_IMAGES = "images/editAssignment/"
URL_ASSIGN_FOLDER = "folder/editAssignment"
URL_S3_ACCESS_POINT = "/project/{}/get-image-s3-access-point"
URL_S3_UPLOAD_STATUS = "/project/{}/getS3UploadStatus"
URL_GET_EXPORTS = "exports"
URL_GET_CLASS = "class/{}"
URL_ANNOTATION_UPLOAD_PATH_TOKEN = "images/getAnnotationsPathsAndTokens"
URL_PRE_ANNOTATION_UPLOAD_PATH_TOKEN = "images/getPreAnnotationsPathsAndTokens"
URL_GET_TEMPLATES = "templates"
URL_PROJECT_WORKFLOW_ATTRIBUTE = "project/{}/workflow_attribute"
URL_MODELS = "ml_models"
URL_MODEL = "ml_model"
URL_GET_MODEL_METRICS = "ml_models/{}/getCurrentMetrics"
URL_BULK_GET_FOLDERS = "foldersByTeam"
URL_GET_EXPORT = "export/{}"
URL_GET_ML_MODEL_DOWNLOAD_TOKEN = "ml_model/getMyModelDownloadToken/{}"
URL_SEGMENTATION = "images/segmentation"
URL_PREDICTION = "images/prediction"
URL_SET_IMAGES_STATUSES_BULK = "image/updateAnnotationStatusBulk"
URL_DELETE_ANNOTATIONS = "annotations/remove"
URL_DELETE_ANNOTATIONS_PROGRESS = "annotations/getRemoveStatus"
URL_GET_LIMITS = "project/{}/limitationDetails"
def get_project(self, uuid: int, team_id: int):
get_project_url = urljoin(self.api_url, self.URL_GET_PROJECT.format(uuid))
res = self._request(get_project_url, "get", params={"team_id": team_id})
return res.json()
def get_s3_upload_auth_token(self, team_id: int, folder_id: int, project_id: int):
auth_token_url = urljoin(
self.api_url,
self.URL_GET_PROJECT.format(project_id) + "/sdkImageUploadToken",
)
response = self._request(
auth_token_url, "get", params={"team_id": team_id, "folder_id": folder_id}
)
return response.json()
def get_download_token(
self,
project_id: int,
team_id: int,
folder_id: int,
image_id: int,
include_original: int = 1,
):
download_token_url = urljoin(
self.api_url,
self.URL_GET_IMAGE.format(image_id)
+ "/annotation/getAnnotationDownloadToken",
)
response = self._request(
download_token_url,
"get",
params={
"project_id": project_id,
"team_id": team_id,
"folder_id": folder_id,
"include_original": include_original,
},
)
return response.json()
def get_upload_token(
self, project_id: int, team_id: int, folder_id: int, image_id: int,
):
download_token_url = urljoin(
self.api_url,
self.URL_GET_IMAGE.format(image_id)
+ "/annotation/getAnnotationUploadToken",
)
response = self._request(
download_token_url,
"get",
params={
"project_id": project_id,
"team_id": team_id,
"folder_id": folder_id,
},
)
return response.json()
def get_projects(self, query_string: str = None) -> list:
url = urljoin(self.api_url, self.URL_LIST_PROJECTS)
if query_string:
url = f"{url}?{query_string}"
data = self._get_all_pages(url)
return data
def create_project(self, project_data: dict) -> dict:
create_project_url = urljoin(self.api_url, self.URL_CREATE_PROJECT)
res = self._request(create_project_url, "post", project_data)
return res.json()
def delete_project(self, uuid: int, query_string: str = None) -> bool:
url = urljoin(self.api_url, self.URL_GET_PROJECT.format(uuid))
if query_string:
url = f"{url}?{query_string}"
res = self._request(url, "delete")
return res.ok
def update_project(self, data: dict, query_string: str = None) -> bool:
url = urljoin(self.api_url, self.URL_GET_PROJECT.format(data["id"]))
if query_string:
url = f"{url}?{query_string}"
res = self._request(url, "put", data)
return res.json()
def attach_files(
self,
project_id: int,
folder_id: int,
team_id: int,
files: List[Dict],
annotation_status_code,
upload_state_code,
meta,
):
data = {
"project_id": project_id,
"folder_id": folder_id,
"team_id": team_id,
"images": files,
"annotation_status": annotation_status_code,
"upload_state": upload_state_code,
"meta": meta,
}
create_image_url = urljoin(self.api_url, self.URL_CREATE_IMAGE)
response = self._request(create_image_url, "post", data)
return response.json()
def get_folder(self, query_string: str):
get_folder_url = urljoin(self.api_url, self.URL_GET_FOLDER_BY_NAME)
if query_string:
get_folder_url = f"{get_folder_url}?{query_string}"
response = self._request(get_folder_url, "get")
if response.ok:
return response.json()
def get_folders(self, query_string: str = None, params: dict = None):
get_folder_url = urljoin(self.api_url, self.URL_FOLDERS_IMAGES)
if query_string:
get_folder_url = f"{get_folder_url}?{query_string}"
return self._get_all_pages(get_folder_url, params=params, key_field="folders")
def delete_folders(self, project_id: int, team_id: int, folder_ids: List[int]):
delete_folders_url = urljoin(self.api_url, self.URL_DELETE_FOLDERS)
params = {"team_id": team_id, "project_id": project_id}
response = self._request(
delete_folders_url, "put", params=params, data={"folder_ids": folder_ids}
)
return response.ok
def create_folder(self, project_id: int, team_id: int, folder_name: str):
create_folder_url = urljoin(self.api_url, self.URL_CREATE_FOLDER)
data = {"name": folder_name}
params = {"project_id": project_id, "team_id": team_id}
res = self._request(create_folder_url, "post", data=data, params=params)
return res.json()
def update_folder(self, project_id: int, team_id: int, folder_data: dict):
update_folder_url = urljoin(
self.api_url, self.URL_UPDATE_FOLDER.format(folder_data["id"])
)
params = {"project_id": project_id, "team_id": team_id}
res = self._request(update_folder_url, "put", data=folder_data, params=params)
if res.ok:
return res.json()
def get_project_settings(self, project_id: int, team_id: int):
get_settings_url = urljoin(
self.api_url, self.URL_PROJECT_SETTINGS.format(project_id)
)
res = self._request(get_settings_url, "get", params={"team_id": team_id})
return res.json()
def set_project_settings(self, project_id: int, team_id: int, data: List):
set_project_settings_url = urljoin(
self.api_url, self.URL_PROJECT_SETTINGS.format(project_id)
)
res = self._request(
set_project_settings_url,
"put",
data={"settings": data},
params={"team_id": team_id},
)
return res.json()
def get_annotation_classes(
self, project_id: int, team_id: int, query_string: str = None
):
get_annotation_classes_url = urljoin(self.api_url, self.URL_ANNOTATION_CLASSES)
if query_string:
get_annotation_classes_url = f"{get_annotation_classes_url}?{query_string}"
params = {"project_id": project_id, "team_id": team_id}
return self._get_all_pages(get_annotation_classes_url, params=params)
def set_annotation_classes(self, project_id: int, team_id: int, data: List):
set_annotation_class_url = urljoin(self.api_url, self.URL_ANNOTATION_CLASSES)
params = {
"team_id": team_id,
"project_id": project_id,
}
res = self._request(
set_annotation_class_url, "post", params=params, data={"classes": data}
)
return res.json()
def get_project_workflows(self, project_id: int, team_id: int):
get_project_workflow_url = urljoin(
self.api_url, self.URL_PROJECT_WORKFLOW.format(project_id)
)
return self._get_all_pages(
get_project_workflow_url, params={"team_id": team_id}
)
def set_project_workflow(self, project_id: int, team_id: int, data: Dict):
set_project_workflow_url = urljoin(
self.api_url, self.URL_PROJECT_WORKFLOW.format(project_id)
)
res = self._request(
set_project_workflow_url,
"post",
data={"steps": [data]},
params={"team_id": team_id},
)
return res.json()
def set_project_workflow_bulk(self, project_id: int, team_id: int, steps: list):
set_project_workflow_url = urljoin(
self.api_url, self.URL_PROJECT_WORKFLOW.format(project_id)
)
res = self._request(
set_project_workflow_url,
"post",
data={"steps": steps},
params={"team_id": team_id},
)
return res.json()
def share_project_bulk(self, project_id: int, team_id: int, users: list):
share_project_url = urljoin(
self.api_url, self.URL_SHARE_PROJECT_BULK.format(project_id)
)
res = self._request(
share_project_url,
"post",
data={"users": users},
params={"team_id": team_id},
)
return res.json()
def search_team_contributors(self, team_id: int, query_string: str = None):
list_users_url = urljoin(self.api_url, self.URL_USERS)
if query_string:
list_users_url = f"{list_users_url}?{query_string}"
params = {"team_id": team_id}
return self._get_all_pages(list_users_url, params=params)
def un_share_project(self, project_id: int, team_id: int, user_id: int):
users_url = urljoin(self.api_url, self.URL_SHARE_PROJECT.format(project_id))
res = self._request(
users_url, "delete", data={"user_id": user_id}, params={"team_id": team_id}
)
return res.ok
def get_images(self, query_string: str = None):
url = urljoin(self.api_url, self.URL_FOLDERS_IMAGES)
if query_string:
url = f"{url}?{query_string}"
pages = self._get_all_pages(url, key_field="images")
return [image for image in pages]
def list_images(self, query_string):
url = urljoin(self.api_url, self.URL_GET_IMAGES)
if query_string:
url = f"{url}?{query_string}"
return self._get_all_pages(url)
def prepare_export(
self,
project_id: int,
team_id: int,
folders: List[str],
annotation_statuses: Iterable[str],
include_fuse: bool,
only_pinned: bool,
):
prepare_export_url = urljoin(self.api_url, self.URL_PREPARE_EXPORT)
annotation_statuses = ",".join(
[str(constance.AnnotationStatus.get_value(i)) for i in annotation_statuses]
)
data = {
"include": annotation_statuses,
"fuse": int(include_fuse),
"is_pinned": int(only_pinned),
"coco": 0,
"time": datetime.now().strftime("%b %d %Y %H:%M"),
}
if folders:
data["folder_names"] = folders
res = self._request(
prepare_export_url,
"post",
data=data,
params={"project_id": project_id, "team_id": team_id},
)
return res.json()
def get_team(self, team_id: int):
get_team_url = urljoin(self.api_url, f"{self.URL_TEAM}/{team_id}")
res = self._request(get_team_url, "get")
return res.json()
def invite_contributor(self, team_id: int, email: str, user_role: str) -> bool:
invite_contributor_url = urljoin(
self.api_url, self.URL_INVITE_CONTRIBUTOR.format(team_id)
)
res = self._request(
invite_contributor_url,
"post",
data={"email": email, "user_role": user_role},
)
return res.ok
def delete_team_invitation(self, team_id: int, token: str, email: str) -> bool:
invite_contributor_url = urljoin(
self.api_url, self.URL_INVITE_CONTRIBUTOR.format(team_id)
)
res = self._request(
invite_contributor_url, "delete", data={"token": token, "e_mail": email}
)
return res.ok
def update_image(self, image_id: int, team_id: int, project_id: int, data: dict):
update_image_url = urljoin(self.api_url, self.URL_GET_IMAGE.format(image_id))
res = self._request(
update_image_url,
"put",
data=data,
params={"team_id": team_id, "project_id": project_id},
)
return res.ok
def copy_images_between_folders_transaction(
self,
team_id: int,
project_id: int,
from_folder_id: int,
to_folder_id: int,
images: List[str],
include_annotations: bool = False,
include_pin: bool = False,
) -> int:
"""
Returns poll id.
"""
copy_images_url = urljoin(self.api_url, self.URL_COPY_IMAGES_FROM_FOLDER)
res = self._request(
copy_images_url,
"post",
params={"team_id": team_id, "project_id": project_id},
data={
"is_folder_copy": False,
"image_names": images,
"destination_folder_id": to_folder_id,
"source_folder_id": from_folder_id,
"include_annotations": include_annotations,
"keep_pin_status": include_pin,
},
)
if res.ok:
return res.json()["poll_id"]
def move_images_between_folders(
self,
team_id: int,
project_id: int,
from_folder_id: int,
to_folder_id: int,
images: List[str],
) -> List[str]:
move_images_url = urljoin(self.api_url, self.URL_MOVE_IMAGES_FROM_FOLDER)
res = self._request(
move_images_url,
"post",
params={"team_id": team_id, "project_id": project_id},
data={
"image_names": images,
"destination_folder_id": to_folder_id,
"source_folder_id": from_folder_id,
},
)
if res.ok:
return res.json()["done"]
return []
def get_progress(
self, project_id: int, team_id: int, poll_id: int
) -> Tuple[int, int]:
get_progress_url = urljoin(self.api_url, self.URL_GET_COPY_PROGRESS)
res = self._request(
get_progress_url,
"get",
params={"team_id": team_id, "project_id": project_id, "poll_id": poll_id},
).json()
return res["done"], res["skipped"]
def get_duplicated_images(
self, project_id: int, team_id: int, folder_id: int, images: List[str]
) -> List[str]:
get_duplications_url = urljoin(self.api_url, self.URL_BULK_GET_IMAGES)
res = self._request(
get_duplications_url,
"post",
data={
"project_id": project_id,
"team_id": team_id,
"folder_id": folder_id,
"names": images,
},
)
return res.json()
def delete_image(self, image_id, team_id: int, project_id: int):
delete_image_url = urljoin(self.api_url, self.URL_GET_IMAGE.format(image_id))
res = self._request(
delete_image_url,
"delete",
params={"team_id": team_id, "project_id": project_id},
)
return res.ok
def set_images_statuses_bulk(
self,
image_names: list,
team_id: int,
project_id: int,
folder_id: int,
annotation_status: int,
):
set_images_statuses_bulk_url = urljoin(
self.api_url, self.URL_SET_IMAGES_STATUSES_BULK
)
res = self._request(
set_images_statuses_bulk_url,
"put",
params={"team_id": team_id, "project_id": project_id},
data={
"folder_id": folder_id,
"annotation_status": annotation_status,
"image_names": image_names,
},
)
return res.ok
def get_bulk_images(
self, project_id: int, team_id: int, folder_id: int, images: List[str]
) -> List[dict]:
bulk_get_images_url = urljoin(self.api_url, self.URL_BULK_GET_IMAGES)
time.sleep(1)
res = self._request(
bulk_get_images_url,
"post",
data={
"project_id": project_id,
"team_id": team_id,
"folder_id": folder_id,
"names": images,
},
)
return res.json()
def delete_images(self, project_id: int, team_id: int, image_ids: List[int]):
delete_images_url = urljoin(self.api_url, self.URL_DELETE_FOLDERS)
res = self._request(
delete_images_url,
"put",
params={"team_id": team_id, "project_id": project_id},
data={"image_ids": image_ids},
)
return res.json()
def assign_images(
self,
team_id: int,
project_id: int,
folder_name: str,
user: str,
image_names: list,
):
assign_images_url = urljoin(self.api_url, self.URL_ASSIGN_IMAGES)
res = self._request(
assign_images_url,
"put",
params={"team_id": team_id, "project_id": project_id},
data={
"image_names": image_names,
"assign_user_id": user,
"folder_name": folder_name,
},
)
return res.ok
def un_assign_images(
self, team_id: int, project_id: int, folder_name: str, image_names: List[str],
):
un_assign_images_url = urljoin(self.api_url, self.URL_ASSIGN_IMAGES)
res = self._request(
un_assign_images_url,
"put",
params={"team_id": team_id, "project_id": project_id},
data={
"image_names": image_names,
"remove_user_ids": ["all"],
"folder_name": folder_name,
},
)
return res.ok
def un_assign_folder(
self, team_id: int, project_id: int, folder_name: str,
):
un_assign_folder_url = urljoin(self.api_url, self.URL_ASSIGN_FOLDER)
res = self._request(
un_assign_folder_url,
"post",
params={"team_id": team_id, "project_id": project_id},
data={"folder_name": folder_name, "remove_user_ids": ["all"]},
)
return res.ok
def assign_folder(
self, team_id: int, project_id: int, folder_name: str, users: list
):
assign_folder_url = urljoin(self.api_url, self.URL_ASSIGN_FOLDER)
res = self._request(
assign_folder_url,
"post",
params={"team_id": team_id, "project_id": project_id},
data={"folder_name": folder_name, "assign_user_ids": users},
)
return res.ok
def get_exports(self, team_id: int, project_id: int):
exports_url = urljoin(self.api_url, self.URL_GET_EXPORTS)
res = self._request(
exports_url, "get", params={"team_id": team_id, "project_id": project_id}
)
return res.json()
def get_export(self, team_id: int, project_id: int, export_id: int):
exports_url = urljoin(self.api_url, self.URL_GET_EXPORT.format(export_id))
res = self._request(
exports_url, "get", params={"team_id": team_id, "project_id": project_id}
)
return res.json()
def upload_form_s3(
self,
project_id: int,
team_id: int,
access_key: str,
secret_key: str,
bucket_name: str,
from_folder_name: str,
to_folder_id: int,
):
upload_from_s3_url = urljoin(
self.api_url, self.URL_S3_ACCESS_POINT.format(project_id)
)
response = self._request(
upload_from_s3_url,
"post",
params={"team_id": team_id},
data={
"accessKeyID": access_key,
"secretAccessKey": secret_key,
"bucketName": bucket_name,
"folderName": from_folder_name,
"folder_id": to_folder_id,
},
)
return response
def get_upload_status(self, project_id: int, team_id: int, folder_id: int):
get_upload_status_url = urljoin(
self.api_url, self.URL_S3_UPLOAD_STATUS.format(project_id)
)
res = self._request(
get_upload_status_url,
"get",
params={"team_id": team_id, "folder_id": folder_id},
)
return res.json().get("progress")
def get_project_images_count(self, team_id: int, project_id: int):
get_images_count_url = urljoin(self.api_url, self.URL_FOLDERS_IMAGES)
res = self._request(
get_images_count_url,
"get",
params={"team_id": team_id, "project_id": project_id},
)
return res.json()
def delete_annotation_class(
self, team_id: int, project_id: int, annotation_class_id: int
):
delete_image_url = urljoin(
self.api_url, self.URL_GET_CLASS.format(annotation_class_id)
)
res = self._request(
delete_image_url,
"delete",
params={"team_id": team_id, "project_id": project_id},
)
return res.json()
def set_project_workflow_attributes_bulk(
self, project_id: int, team_id: int, attributes: list
):
set_project_workflow_attribute_url = urljoin(
self.api_url, self.URL_PROJECT_WORKFLOW_ATTRIBUTE.format(project_id)
)
res = self._request(
set_project_workflow_attribute_url,
"post",
data={"data": attributes},
params={"team_id": team_id},
)
return res.json()
def get_annotation_upload_data(
self, project_id: int, team_id: int, image_ids: List[int], folder_id: int
):
get_annotation_upload_data_url = urljoin(
self.api_url, self.URL_ANNOTATION_UPLOAD_PATH_TOKEN
)
response = self._request(
get_annotation_upload_data_url,
"post",
data={
"project_id": project_id,
"team_id": team_id,
"ids": image_ids,
"folder_id": folder_id,
},
content_type=UploadAnnotationAuthData,
)
return response
def get_pre_annotation_upload_data(
self, project_id: int, team_id: int, image_ids: List[int], folder_id: int
):
get_annotation_upload_data_url = urljoin(
self.api_url, self.URL_PRE_ANNOTATION_UPLOAD_PATH_TOKEN
)
response = self._request(
get_annotation_upload_data_url,
"post",
data={
"project_id": project_id,
"team_id": team_id,
"ids": image_ids,
"folder_id": folder_id,
},
content_type=UploadAnnotationAuthData,
)
return response
def get_templates(self, team_id: int):
get_templates_url = urljoin(self.api_url, self.URL_GET_TEMPLATES)
response = self._request(get_templates_url, "get", params={"team_id": team_id})
return response.json()
def start_model_training(self, team_id: int, hyper_parameters: dict) -> dict:
start_training_url = urljoin(self.api_url, self.URL_MODELS)
res = self._request(
start_training_url,
"post",
params={"team_id": team_id},
data=hyper_parameters,
)
return res.json()
def get_model_metrics(self, team_id: int, model_id: int) -> dict:
get_metrics_url = urljoin(
self.api_url, self.URL_GET_MODEL_METRICS.format(model_id)
)
res = self._request(get_metrics_url, "get", params={"team_id": team_id})
return res.json()
def get_models(
self, name: str, team_id: int, project_id: int, model_type: str
) -> List:
search_model_url = urljoin(self.api_url, self.URL_MODELS)
res = self._request(
search_model_url,
"get",
params={"team_id": team_id, "project_id": project_id, "name": name},
)
return res.json()
def search_models(self, query_string: str):
search_model_url = urljoin(self.api_url, self.URL_MODELS)
if query_string:
search_model_url = f"{search_model_url}?{query_string}"
# response = self._request(search_model_url, "get",)
return self._get_all_pages(search_model_url)
def bulk_get_folders(self, team_id: int, project_ids: List[int]):
get_folders_url = urljoin(self.api_url, self.URL_BULK_GET_FOLDERS)
res = self._request(
get_folders_url,
"put",
params={"team_id": team_id, "completedImagesCount": True},
data={"project_ids": project_ids},
)
return res.json()
def update_model(self, team_id: int, model_id: int, data: dict):
update_model_url = urljoin(self.api_url, f"{self.URL_MODELS}/{model_id}")
res = self._request(
update_model_url, "put", data=data, params={"team_id": team_id}
)
return res.json()
def delete_model(self, team_id: int, model_id: int):
delete_model_url = urljoin(self.api_url, f"{self.URL_MODEL}/{model_id}")
res = self._request(delete_model_url, "delete", params={"team_id": team_id})
return res.ok
def get_ml_model_download_tokens(self, team_id: int, model_id: int):
get_token_url = urljoin(
self.api_url, self.URL_GET_ML_MODEL_DOWNLOAD_TOKEN.format(model_id)
)
return self._request(
get_token_url,
"get",
params={"team_id": team_id},
content_type=DownloadMLModelAuthData,
)
def run_prediction(
self, team_id: int, project_id: int, ml_model_id: int, image_ids: list
):
prediction_url = urljoin(self.api_url, self.URL_PREDICTION)
res = self._request(
prediction_url,
"post",
data={
"team_id": team_id,
"project_id": project_id,
"ml_model_id": ml_model_id,
"image_ids": image_ids,
},
)
return res
def delete_image_annotations(
self,
team_id: int,
project_id: int,
folder_id: int = None,
image_names: List[str] = None,
) -> dict:
delete_annotations_url = urljoin(self.api_url, self.URL_DELETE_ANNOTATIONS)
params = {"team_id": team_id, "project_id": project_id}
data = {}
if folder_id:
params["folder_id"] = folder_id
if image_names:
data["image_names"] = image_names
response = self._request(
delete_annotations_url, "post", params=params, data=data
)
if response.ok:
return response.json()
def get_annotations_delete_progress(
self, team_id: int, project_id: int, poll_id: int
):
get_progress_url = urljoin(self.api_url, self.URL_DELETE_ANNOTATIONS_PROGRESS)
response = self._request(
get_progress_url,
"get",
params={"team_id": team_id, "project_id": project_id, "poll_id": poll_id},
)
return response.json()
def get_limitations(
self, team_id: int, project_id: int, folder_id: int = None
) -> ServiceResponse:
get_limits_url = urljoin(self.api_url, self.URL_GET_LIMITS.format(project_id))
return self._request(
get_limits_url,
"get",
params={"team_id": team_id, "folder_id": folder_id},
content_type=UserLimits,
)
|
11595465
|
import os
import sys
import logging
import time
from azure.eventhub import EventHubClient, Receiver, Offset
logger = logging.getLogger("azure")
# Address can be in either of these formats:
# "amqps://<URL-encoded-SAS-policy>:<URL-encoded-SAS-key>@<mynamespace>.servicebus.windows.net/myeventhub"
# "amqps://<namespace>.servicebus.windows.net/<eventhub>"
# SAS policy and key are not required if they are encoded in the URL
ADDRESS = "amqps://<namespace>.servicebus.windows.net/<eventhub>"
USER = "<AccessKeyName>"
KEY = "<primary key value>"
CONSUMER_GROUP = "$default"
OFFSET = Offset("-1")
PARTITION = "0"
total = 0
last_sn = -1
last_offset = "-1"
client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY)
try:
receiver = client.add_receiver(
CONSUMER_GROUP, PARTITION, prefetch=5000, offset=OFFSET)
client.run()
start_time = time.time()
for event_data in receiver.receive(timeout=100):
print("Received: {}".format(event_data.body_as_str(encoding='UTF-8')))
total += 1
end_time = time.time()
client.stop()
run_time = end_time - start_time
print("Received {} messages in {} seconds".format(total, run_time))
except KeyboardInterrupt:
pass
finally:
client.stop()
|
11595499
|
from __future__ import absolute_import, print_function, unicode_literals
from wolframclient.language import wl
from wolframclient.serializers import export, wolfram_encoder
# define a new class.
class Animal(object):
pass
# register a new encoder for instances of the Animal class.
@wolfram_encoder.dispatch(Animal)
def encode_animal(serializer, animal):
# encode the class as a symbol called Animal
return serializer.encode(wl.Animal)
# create a new instance
animal = Animal()
# serialize it
result = export(animal)
print(result) # b'Animal'
|
11595573
|
import numpy.testing
from refnx._lib.util import (
TemporaryDirectory,
preserve_cwd,
possibly_open_file,
MapWrapper,
)
from refnx._lib._numdiff import approx_hess2
from refnx._lib._testutils import PytestTester
try:
from refnx._lib._cutil import c_unique as unique
from refnx._lib._cutil import c_flatten as flatten
except ImportError:
from refnx._lib.util import unique, flatten
test = PytestTester(__name__)
del PytestTester
__all__ = [s for s in dir() if not s.startswith("_")]
|
11595615
|
from django.urls import path
from .views import (
IdeaList,
IdeaDetail,
add_or_change_idea,
delete_idea,
download_idea_picture,
)
urlpatterns = [
path("", IdeaList.as_view(), name="idea_list"),
path("add/", add_or_change_idea, name="add_idea"),
path("<uuid:pk>/", IdeaDetail.as_view(), name="idea_detail"),
path(
"<uuid:pk>/download-picture/",
download_idea_picture,
name="download_idea_picture",
),
path("<uuid:pk>/change/", add_or_change_idea, name="change_idea"),
path("<uuid:pk>/delete/", delete_idea, name="delete_idea"),
]
|
11595617
|
import FWCore.ParameterSet.Config as cms
from RecoBTag.Skimming.btagMuonInJet_EventContent_cff import *
btagMuonInJetOutputModuleAODSIM = cms.OutputModule("PoolOutputModule",
btagMuonInJetEventSelection,
AODSIMbtagMuonInJetEventContent,
dataset = cms.untracked.PSet(
filterName = cms.untracked.string('btagMuonInJetAODSIM'),
dataTier = cms.untracked.string('USER')
),
fileName = cms.untracked.string('btagMuonInJetAODSIM.root')
)
|
11595618
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from past.builtins import basestring
import os
import os.path
import stat
import logging
import hashlib
from io import BytesIO as StringIO
from zipfile import ZipFile, ZIP_DEFLATED
import botocore
import formic
from troposphere.awslambda import Code
from stacker.session_cache import get_session
from stacker.util import (
get_config_directory,
ensure_s3_bucket,
)
"""Mask to retrieve only UNIX file permissions from the external attributes
field of a ZIP entry.
"""
ZIP_PERMS_MASK = (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) << 16
logger = logging.getLogger(__name__)
def _zip_files(files, root):
"""Generates a ZIP file in-memory from a list of files.
Files will be stored in the archive with relative names, and have their
UNIX permissions forced to 755 or 644 (depending on whether they are
user-executable in the source filesystem).
Args:
files (list[str]): file names to add to the archive, relative to
``root``.
root (str): base directory to retrieve files from.
Returns:
str: content of the ZIP file as a byte string.
str: A calculated hash of all the files.
"""
zip_data = StringIO()
with ZipFile(zip_data, 'w', ZIP_DEFLATED) as zip_file:
for fname in files:
zip_file.write(os.path.join(root, fname), fname)
# Fix file permissions to avoid any issues - only care whether a file
# is executable or not, choosing between modes 755 and 644 accordingly.
for zip_entry in zip_file.filelist:
perms = (zip_entry.external_attr & ZIP_PERMS_MASK) >> 16
if perms & stat.S_IXUSR != 0:
new_perms = 0o755
else:
new_perms = 0o644
if new_perms != perms:
logger.debug("lambda: fixing perms: %s: %o => %o",
zip_entry.filename, perms, new_perms)
new_attr = ((zip_entry.external_attr & ~ZIP_PERMS_MASK) |
(new_perms << 16))
zip_entry.external_attr = new_attr
contents = zip_data.getvalue()
zip_data.close()
content_hash = _calculate_hash(files, root)
return contents, content_hash
def _calculate_hash(files, root):
""" Returns a hash of all of the given files at the given root.
Args:
files (list[str]): file names to include in the hash calculation,
relative to ``root``.
root (str): base directory to analyze files in.
Returns:
str: A hash of the hashes of the given files.
"""
file_hash = hashlib.md5()
for fname in sorted(files):
f = os.path.join(root, fname)
file_hash.update((fname + "\0").encode())
with open(f, "rb") as fd:
for chunk in iter(lambda: fd.read(4096), ""):
if not chunk:
break
file_hash.update(chunk)
file_hash.update("\0".encode())
return file_hash.hexdigest()
def _calculate_prebuilt_hash(f):
file_hash = hashlib.md5()
while True:
chunk = f.read(4096)
if not chunk:
break
file_hash.update(chunk)
return file_hash.hexdigest()
def _find_files(root, includes, excludes, follow_symlinks):
"""List files inside a directory based on include and exclude rules.
This is a more advanced version of `glob.glob`, that accepts multiple
complex patterns.
Args:
root (str): base directory to list files from.
includes (list[str]): inclusion patterns. Only files matching those
patterns will be included in the result.
excludes (list[str]): exclusion patterns. Files matching those
patterns will be excluded from the result. Exclusions take
precedence over inclusions.
follow_symlinks (bool): If true, symlinks will be included in the
resulting zip file
Yields:
str: a file name relative to the root.
Note:
Documentation for the patterns can be found at
http://www.aviser.asia/formic/doc/index.html
"""
root = os.path.abspath(root)
file_set = formic.FileSet(
directory=root, include=includes,
exclude=excludes, symlinks=follow_symlinks,
)
for filename in file_set.qualified_files(absolute=False):
yield filename
def _zip_from_file_patterns(root, includes, excludes, follow_symlinks):
"""Generates a ZIP file in-memory from file search patterns.
Args:
root (str): base directory to list files from.
includes (list[str]): inclusion patterns. Only files matching those
patterns will be included in the result.
excludes (list[str]): exclusion patterns. Files matching those
patterns will be excluded from the result. Exclusions take
precedence over inclusions.
follow_symlinks (bool): If true, symlinks will be included in the
resulting zip file
See Also:
:func:`_zip_files`, :func:`_find_files`.
Raises:
RuntimeError: when the generated archive would be empty.
"""
logger.info('lambda: base directory: %s', root)
files = list(_find_files(root, includes, excludes, follow_symlinks))
if not files:
raise RuntimeError('Empty list of files for Lambda payload. Check '
'your include/exclude options for errors.')
logger.info('lambda: adding %d files:', len(files))
for fname in files:
logger.debug('lambda: + %s', fname)
return _zip_files(files, root)
def _head_object(s3_conn, bucket, key):
"""Retrieve information about an object in S3 if it exists.
Args:
s3_conn (botocore.client.S3): S3 connection to use for operations.
bucket (str): name of the bucket containing the key.
key (str): name of the key to lookup.
Returns:
dict: S3 object information, or None if the object does not exist.
See the AWS documentation for explanation of the contents.
Raises:
botocore.exceptions.ClientError: any error from boto3 other than key
not found is passed through.
"""
try:
return s3_conn.head_object(Bucket=bucket, Key=key)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == '404':
return None
else:
raise
def _upload_code(s3_conn, bucket, prefix, name, contents, content_hash,
payload_acl):
"""Upload a ZIP file to S3 for use by Lambda.
The key used for the upload will be unique based on the checksum of the
contents. No changes will be made if the contents in S3 already match the
expected contents.
Args:
s3_conn (botocore.client.S3): S3 connection to use for operations.
bucket (str): name of the bucket to create.
prefix (str): S3 prefix to prepend to the constructed key name for
the uploaded file
name (str): desired name of the Lambda function. Will be used to
construct a key name for the uploaded file.
contents (str): byte string with the content of the file upload.
content_hash (str): md5 hash of the contents to be uploaded.
payload_acl (str): The canned S3 object ACL to be applied to the
uploaded payload
Returns:
troposphere.awslambda.Code: CloudFormation Lambda Code object,
pointing to the uploaded payload in S3.
Raises:
botocore.exceptions.ClientError: any error from boto3 is passed
through.
"""
logger.debug('lambda: ZIP hash: %s', content_hash)
key = '{}lambda-{}-{}.zip'.format(prefix, name, content_hash)
if _head_object(s3_conn, bucket, key):
logger.info('lambda: object %s already exists, not uploading', key)
else:
logger.info('lambda: uploading object %s', key)
s3_conn.put_object(Bucket=bucket, Key=key, Body=contents,
ContentType='application/zip',
ACL=payload_acl)
return Code(S3Bucket=bucket, S3Key=key)
def _check_pattern_list(patterns, key, default=None):
"""Validates file search patterns from user configuration.
Acceptable input is a string (which will be converted to a singleton list),
a list of strings, or anything falsy (such as None or an empty dictionary).
Empty or unset input will be converted to a default.
Args:
patterns: input from user configuration (YAML).
key (str): name of the configuration key the input came from,
used for error display purposes.
Keyword Args:
default: value to return in case the input is empty or unset.
Returns:
list[str]: validated list of patterns
Raises:
ValueError: if the input is unacceptable.
"""
if not patterns:
return default
if isinstance(patterns, basestring):
return [patterns]
if isinstance(patterns, list):
if all(isinstance(p, basestring) for p in patterns):
return patterns
raise ValueError("Invalid file patterns in key '{}': must be a string or "
'list of strings'.format(key))
def _upload_prebuilt_zip(s3_conn, bucket, prefix, name, options, path,
payload_acl):
logging.debug('lambda: using prebuilt ZIP %s', path)
with open(path, 'rb') as zip_file:
# Default to the MD5 of the ZIP if no explicit version is provided
version = options.get('version')
if not version:
version = _calculate_prebuilt_hash(zip_file)
zip_file.seek(0)
return _upload_code(s3_conn, bucket, prefix, name, zip_file,
version, payload_acl)
def _build_and_upload_zip(s3_conn, bucket, prefix, name, options, path,
follow_symlinks, payload_acl):
includes = _check_pattern_list(options.get('include'), 'include',
default=['**'])
excludes = _check_pattern_list(options.get('exclude'), 'exclude',
default=[])
# os.path.join will ignore other parameters if the right-most one is an
# absolute path, which is exactly what we want.
zip_contents, zip_version = _zip_from_file_patterns(
path, includes, excludes, follow_symlinks)
version = options.get('version') or zip_version
return _upload_code(s3_conn, bucket, prefix, name, zip_contents, version,
payload_acl)
def _upload_function(s3_conn, bucket, prefix, name, options, follow_symlinks,
payload_acl):
"""Builds a Lambda payload from user configuration and uploads it to S3.
Args:
s3_conn (botocore.client.S3): S3 connection to use for operations.
bucket (str): name of the bucket to upload to.
prefix (str): S3 prefix to prepend to the constructed key name for
the uploaded file
name (str): desired name of the Lambda function. Will be used to
construct a key name for the uploaded file.
options (dict): configuration for how to build the payload.
Consists of the following keys:
* path:
base path to retrieve files from (mandatory). If not
absolute, it will be interpreted as relative to the stacker
configuration file directory, then converted to an absolute
path. See :func:`stacker.util.get_config_directory`.
* include:
file patterns to include in the payload (optional).
* exclude:
file patterns to exclude from the payload (optional).
follow_symlinks (bool): If true, symlinks will be included in the
resulting zip file
payload_acl (str): The canned S3 object ACL to be applied to the
uploaded payload
Returns:
troposphere.awslambda.Code: CloudFormation AWS Lambda Code object,
pointing to the uploaded object in S3.
Raises:
ValueError: if any configuration is invalid.
botocore.exceptions.ClientError: any error from boto3 is passed
through.
"""
try:
path = os.path.expanduser(options['path'])
except KeyError as e:
raise ValueError(
"missing required property '{}' in function '{}'".format(
e.args[0], name))
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(get_config_directory(), path))
if path.endswith('.zip') and os.path.isfile(path):
logging.debug('lambda: using prebuilt zip: %s', path)
return _upload_prebuilt_zip(s3_conn, bucket, prefix, name, options,
path, payload_acl)
elif os.path.isdir(path):
logging.debug('lambda: building from directory: %s', path)
return _build_and_upload_zip(s3_conn, bucket, prefix, name, options,
path, follow_symlinks, payload_acl)
else:
raise ValueError('Path must be an existing ZIP file or directory')
def select_bucket_region(custom_bucket, hook_region, stacker_bucket_region,
provider_region):
"""Returns the appropriate region to use when uploading functions.
Select the appropriate region for the bucket where lambdas are uploaded in.
Args:
custom_bucket (str, None): The custom bucket name provided by the
`bucket` kwarg of the aws_lambda hook, if provided.
hook_region (str): The contents of the `bucket_region` argument to
the hook.
stacker_bucket_region (str): The contents of the
`stacker_bucket_region` global setting.
provider_region (str): The region being used by the provider.
Returns:
str: The appropriate region string.
"""
region = None
if custom_bucket:
region = hook_region
else:
region = stacker_bucket_region
return region or provider_region
def upload_lambda_functions(context, provider, **kwargs):
"""Builds Lambda payloads from user configuration and uploads them to S3.
Constructs ZIP archives containing files matching specified patterns for
each function, uploads the result to Amazon S3, then stores objects (of
type :class:`troposphere.awslambda.Code`) in the context's hook data,
ready to be referenced in blueprints.
Configuration consists of some global options, and a dictionary of function
specifications. In the specifications, each key indicating the name of the
function (used for generating names for artifacts), and the value
determines what files to include in the ZIP (see more details below).
Payloads are uploaded to either a custom bucket or stackers default bucket,
with the key containing it's checksum, to allow repeated uploads to be
skipped in subsequent runs.
The configuration settings are documented as keyword arguments below.
Keyword Arguments:
bucket (str, optional): Custom bucket to upload functions to.
Omitting it will cause the default stacker bucket to be used.
bucket_region (str, optional): The region in which the bucket should
exist. If not given, the region will be either be that of the
global `stacker_bucket_region` setting, or else the region in
use by the provider.
prefix (str, optional): S3 key prefix to prepend to the uploaded
zip name.
follow_symlinks (bool, optional): Will determine if symlinks should
be followed and included with the zip artifact. Default: False
payload_acl (str, optional): The canned S3 object ACL to be applied to
the uploaded payload. Default: private
functions (dict):
Configurations of desired payloads to build. Keys correspond to
function names, used to derive key names for the payload. Each
value should itself be a dictionary, with the following data:
* path (str):
Base directory or path of a ZIP file of the Lambda function
payload content.
If it not an absolute path, it will be considered relative
to the directory containing the stacker configuration file
in use.
When a directory, files contained will be added to the
payload ZIP, according to the include and exclude patterns.
If not patterns are provided, all files in the directory
(respecting default exclusions) will be used.
Files are stored in the archive with path names relative to
this directory. So, for example, all the files contained
directly under this directory will be added to the root of
the ZIP file.
When a ZIP file, it will be uploaded directly to S3.
The hash of whole ZIP file will be used as the version key
by default, which may cause spurious rebuilds when building
the ZIP in different environments. To avoid that,
explicitly provide a `version` option.
* include(str or list[str], optional):
Pattern or list of patterns of files to include in the
payload. If provided, only files that match these
patterns will be included in the payload.
Omitting it is equivalent to accepting all files that are
not otherwise excluded.
* exclude(str or list[str], optional):
Pattern or list of patterns of files to exclude from the
payload. If provided, any files that match will be ignored,
regardless of whether they match an inclusion pattern.
Commonly ignored files are already excluded by default,
such as ``.git``, ``.svn``, ``__pycache__``, ``*.pyc``,
``.gitignore``, etc.
* version(str, optional):
Value to use as the version for the current function, which
will be used to determine if a payload already exists in
S3. The value can be any string, such as a version number
or a git commit.
Note that when setting this value, to re-build/re-upload a
payload you must change the version manually.
Examples:
.. Hook configuration.
.. code-block:: yaml
pre_build:
- path: stacker.hooks.aws_lambda.upload_lambda_functions
required: true
enabled: true
data_key: lambda
args:
bucket: custom-bucket
follow_symlinks: true
prefix: cloudformation-custom-resources/
payload_acl: authenticated-read
functions:
MyFunction:
path: ./lambda_functions
include:
- '*.py'
- '*.txt'
exclude:
- '*.pyc'
- test/
.. Blueprint usage
.. code-block:: python
from troposphere.awslambda import Function
from stacker.blueprints.base import Blueprint
class LambdaBlueprint(Blueprint):
def create_template(self):
code = self.context.hook_data['lambda']['MyFunction']
self.template.add_resource(
Function(
'MyFunction',
Code=code,
Handler='my_function.handler',
Role='...',
Runtime='python2.7'
)
)
"""
custom_bucket = kwargs.get('bucket')
if not custom_bucket:
bucket_name = context.bucket_name
logger.info("lambda: using default bucket from stacker: %s",
bucket_name)
else:
bucket_name = custom_bucket
logger.info("lambda: using custom bucket: %s", bucket_name)
custom_bucket_region = kwargs.get("bucket_region")
if not custom_bucket and custom_bucket_region:
raise ValueError("Cannot specify `bucket_region` without specifying "
"`bucket`.")
bucket_region = select_bucket_region(
custom_bucket,
custom_bucket_region,
context.config.stacker_bucket_region,
provider.region
)
# Check if we should walk / follow symlinks
follow_symlinks = kwargs.get('follow_symlinks', False)
if not isinstance(follow_symlinks, bool):
raise ValueError('follow_symlinks option must be a boolean')
# Check for S3 object acl. Valid values from:
# https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
payload_acl = kwargs.get('payload_acl', 'private')
# Always use the global client for s3
session = get_session(bucket_region)
s3_client = session.client('s3')
ensure_s3_bucket(s3_client, bucket_name, bucket_region)
prefix = kwargs.get('prefix', '')
results = {}
for name, options in kwargs['functions'].items():
results[name] = _upload_function(s3_client, bucket_name, prefix, name,
options, follow_symlinks, payload_acl)
return results
|
11595621
|
import sys,os
sys.path.append("..")
import numpy as np
import tensorflow as tf
from bunch import Bunch
from data_generator import tokenization
from data_generator import tf_data_utils
from model_io import model_io
import json
import requests
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"vocab", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"url", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"port", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"model_name", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"signature_name", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"input_keys", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab,
do_lower_case=True)
def full2half(ustring):
rstring = ""
for uchar in ustring:
inside_code=ord(uchar)
if inside_code==0x3000:
inside_code=0x0020
else:
inside_code-=0xfee0
if inside_code<0x0020 or inside_code>0x7e:
rstring += uchar
else:
rstring += unichr(inside_code)
return rstring
def get_single_features(query, max_seq_length):
tokens_a = tokenizer.tokenize(query)
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids = 0
return {"input_ids":input_ids,
"input_mask":input_mask,
"segment_ids":segment_ids,
"label_ids":[0]}
def main():
query_lst = [
"那王宝强还不如找个鸡呢,也花不到那么多的钱,马蓉的比是镶金边的吗",
"SM粑粑威武霸气帅,JJ超级无敌粗又大",
"叶可怡这时已全身赤裸一丝不挂",
"仔细的舔弄丝袜脚上的高跟凉鞋和丝袜脚尖",
"这车真耐操",
"我是M,马奴,找美女S调教,付费。陪睡吗?",
"享受她们美丽的肉体"
]
features = []
for query in query_lst:
feature = get_single_features(query, 128)
features.append(feature)
if FLAGS.input_keys == "instances":
for key in features[0]:
import numpy as np
print(np.array(features[0][key]).shape, key)
feed_dict = {
"instances":features[0:5],
"signature_name":FLAGS.signature_name
}
elif FLAGS.input_keys == "inputs":
feed_dict = {
"inputs":{
"input_ids":[],
"input_mask":[],
"segment_ids":[],
"label_ids":[]
},
"signature_name":FLAGS.signature_name
}
for feature in features[0:5]:
for key in feed_dict["inputs"]:
if key not in ["label_ids"]:
feed_dict["inputs"][key].append(feature[key])
else:
feed_dict["inputs"][key].extend(feature[key])
for key in feed_dict["inputs"]:
print(key, np.array(feed_dict["inputs"][key]).shape)
results = requests.post("http://%s:%s/v1/models/%s:predict" % (FLAGS.url, FLAGS.port, FLAGS.model_name), json=feed_dict)
try:
print(results.json())
except:
import json
print(results.content)
if __name__ == "__main__":
main()
|
11595622
|
import numpy as np
import pytest
from kiez import Kiez
from kiez.hubness_reduction import MutualProximity
from numpy.testing import assert_array_equal
rng = np.random.RandomState(2)
def test_wrong_input():
with pytest.raises(ValueError) as exc_info:
MutualProximity(method="wrong")
assert "not recognized" in str(exc_info.value)
def test_sqeuclidean(n_samples=20, n_features=5):
source = rng.rand(n_samples, n_features)
target = rng.rand(n_samples, n_features)
k_inst = Kiez(hubness=MutualProximity())
k_inst.fit(source, target)
ndist, nind = k_inst.kneighbors(k=1)
out_dist, out_nind = k_inst.hubness.transform(ndist, nind, None)
assert_array_equal(ndist, out_dist)
assert_array_equal(nind, out_nind)
|
11595680
|
from datetime import datetime, timezone
from enum import Enum
from flask import (
current_app,
url_for
)
class EnumStrAutoName(Enum):
"""
For `Enum`, `auto()` will return strings, not int's.
"""
@staticmethod
def _generate_next_value_(name, start, count, last_values):
return str(count)
def absolute_url_for(endpoint: str, **kwargs) -> str:
"""
Implements Flask `url_for`, but by default
creates absolute URL (`_external` and `_scheme`) with
`PREFERRED_URL_SCHEME` scheme.
- you can specify these parameters to change behavior.
https://flask.palletsprojects.com/en/1.1.x/api/#flask.url_for
"""
if ("_external" not in kwargs):
kwargs["_external"] = True
if ("_scheme" not in kwargs):
kwargs["_scheme"] = current_app.config["PREFERRED_URL_SCHEME"]
return url_for(endpoint, **kwargs)
def get_current_datetime() -> dict:
"""
:returns: Information about current date and time.
"""
current_datetime = datetime.now(timezone.utc)
current_date = current_datetime.strftime("%d.%m.%Y")
current_time = current_datetime.strftime("%H:%M:%S")
current_timezone = current_datetime.strftime("%Z")
return {
"date": current_date,
"time": current_time,
"timezone": current_timezone
}
def get_current_iso_datetime(sep="T", timespec="seconds") -> str:
"""
See https://docs.python.org/3.8/library/datetime.html#datetime.datetime.isoformat # noqa
"""
return datetime.now(timezone.utc).isoformat(sep, timespec)
def convert_iso_datetime(date_string: str) -> dict:
"""
:returns:
Pretty-print information about ISO 8601 `date_string`.
"""
value = datetime.fromisoformat(date_string)
value_date = value.strftime("%d.%m.%Y")
value_time = value.strftime("%H:%M:%S")
value_timezone = value.strftime("%Z")
return {
"date": value_date,
"time": value_time,
"timezone": value_timezone
}
def bytes_to_human_unit(
bytes_count: int,
factor: float,
suffix: str
) -> str:
"""
Converts bytes to human readable string.
- function source: https://stackoverflow.com/a/1094933/8445442
- https://en.wikipedia.org/wiki/Binary_prefix
- https://man7.org/linux/man-pages/man7/units.7.html
:param bytes_count:
Count of bytes to convert.
:param factor:
Use `1024.0` for binary and `1000.0` for decimal.
:param suffix:
Use `iB` for binary and `B` for decimal.
"""
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(bytes_count) < factor:
return "%3.1f %s%s" % (bytes_count, unit, suffix)
bytes_count /= factor
return "%.1f %s%s" % (bytes_count, "Y", suffix)
def bytes_to_human_binary(bytes_count: int) -> str:
"""
Bytes -> binary representation.
"""
return bytes_to_human_unit(bytes_count, 1024.0, "iB")
def bytes_to_human_decimal(bytes_count: int) -> str:
"""
Bytes -> decimal representation.
"""
return bytes_to_human_unit(bytes_count, 1000.0, "B")
def get_str_bytes_length(value: str) -> int:
"""
- source: https://stackoverflow.com/a/30686735/8445442
"""
return len(value.encode("utf-8"))
|
11595709
|
import shutil
import tempfile
from unittest import TestCase, mock
import pytest
from lineflow import download
from lineflow.datasets.penn_treebank import PennTreebank, get_penn_treebank
class PennTreebankTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.default_cache_root = download.get_cache_root()
cls.temp_dir = tempfile.mkdtemp()
download.set_cache_root(cls.temp_dir)
@classmethod
def tearDownClass(cls):
download.set_cache_root(cls.default_cache_root)
shutil.rmtree(cls.temp_dir)
@pytest.mark.slow
def test_get_penn_treebank(self):
raw = get_penn_treebank()
params = [('train', 42_068), ('dev', 3_370), ('test', 3_761)]
for key, size in params:
with self.subTest(key=key, size=size):
self.assertIn(key, raw)
self.assertEqual(len(raw[key]), size)
self.assertEqual(len(PennTreebank(split=key)), size)
@pytest.mark.slow
def test_get_penn_treebank_twice(self):
get_penn_treebank()
with mock.patch('lineflow.datasets.penn_treebank.pickle', autospect=True) as mock_pickle:
get_penn_treebank()
mock_pickle.dump.assert_not_called()
mock_pickle.load.assert_called_once()
def test_raises_value_error_with_invalid_split(self):
with self.assertRaises(ValueError):
PennTreebank(split='invalid_split')
|
11595742
|
from django.db.backends.postgresql.features import (
DatabaseFeatures as PostgresDatabaseFeatures,
)
from django.utils.functional import cached_property
class DatabaseFeatures(PostgresDatabaseFeatures):
# Cloning databases doesn't speed up tests.
# https://github.com/cockroachdb/django-cockroachdb/issues/206
can_clone_databases = False
# Not supported: https://github.com/cockroachdb/cockroach/issues/40476
has_select_for_update_skip_locked = False
# Not supported: https://github.com/cockroachdb/cockroach/issues/31632
can_defer_constraint_checks = False
# Not supported: https://github.com/cockroachdb/cockroach/issues/48307
supports_deferrable_unique_constraints = False
# There are limitations on having DDL statements in a transaction:
# https://www.cockroachlabs.com/docs/stable/known-limitations.html#schema-changes-within-transactions
can_rollback_ddl = False
# Not supported: https://github.com/cockroachdb/cockroach/issues/17511
create_test_procedure_without_params_sql = None
create_test_procedure_with_int_param_sql = None
# Not supported: https://github.com/cockroachdb/cockroach/issues/20956
supports_sequence_reset = False
# Forward references in fixtures won't work until cockroachdb can
# disable constraints: https://github.com/cockroachdb/cockroach/issues/19444
supports_forward_references = False
# Unlike PostgreSQL, cockroachdb doesn't support any EXPLAIN formats
# ('JSON', 'TEXT', 'XML', and 'YAML').
supported_explain_formats = set()
# Not supported: https://github.com/cockroachdb/cockroach/issues/41645
supports_regex_backreferencing = False
# CockroachDB sorts NULL values first with ASC and last with DESC.
# PostgreSQL behaves the opposite.
nulls_order_largest = False
@cached_property
def introspected_field_types(self):
return {
**super().introspected_field_types,
'AutoField': 'BigIntegerField',
'BigAutoField': 'BigIntegerField',
'IntegerField': 'BigIntegerField',
'PositiveIntegerField': 'BigIntegerField',
'SmallAutoField': 'BigIntegerField',
}
# This can be removed when CockroachDB adds support for NULL FIRST/LAST:
# https://github.com/cockroachdb/cockroach/issues/6224
supports_order_by_nulls_modifier = False
# CockroachDB doesn't create indexes on foreign keys.
indexes_foreign_keys = False
# Not supported: https://github.com/cockroachdb/cockroach/issues/59567
supports_non_deterministic_collations = False
test_collations = {
# PostgresDatabaseFeatures uses 'sv-x-icu' for 'non_default' but
# CockroachDB doesn't introspect that properly:
# https://github.com/cockroachdb/cockroach/issues/54817
'non_default': 'sv',
'swedish_ci': 'sv-x-icu',
}
# Not supported: https://github.com/cockroachdb/cockroach/issues/9682
supports_expression_indexes = False
@cached_property
def is_cockroachdb_21_1(self):
return self.connection.cockroachdb_version >= (21, 1)
@cached_property
def django_test_expected_failures(self):
expected_failures = super().django_test_expected_failures
expected_failures.update({
# sum(): unsupported binary operator: <float> + <int>:
# https://github.com/cockroachdb/django-cockroachdb/issues/73
'aggregation.tests.AggregateTestCase.test_add_implementation',
'aggregation.tests.AggregateTestCase.test_combine_different_types',
# greatest(): expected avg(price) to be of type float, found type
# decimal: https://github.com/cockroachdb/django-cockroachdb/issues/74
'aggregation.tests.AggregateTestCase.test_expression_on_aggregation',
# POWER() doesn't support negative exponents:
# https://github.com/cockroachdb/django-cockroachdb/issues/22
'db_functions.math.test_power.PowerTests.test_integer',
# Tests that assume a serial pk: https://github.com/cockroachdb/django-cockroachdb/issues/18
'multiple_database.tests.RouterTestCase.test_generic_key_cross_database_protection',
# Unsupported query: mixed type addition in SELECT:
# https://github.com/cockroachdb/django-cockroachdb/issues/19
'annotations.tests.NonAggregateAnnotationTestCase.test_mixed_type_annotation_numbers',
# Forward references in fixtures won't work until CockroachDB can
# disable constraints: https://github.com/cockroachdb/cockroach/issues/19444
'backends.base.test_creation.TestDeserializeDbFromString.test_circular_reference',
'backends.base.test_creation.TestDeserializeDbFromString.test_circular_reference_with_natural_key',
'backends.base.test_creation.TestDeserializeDbFromString.test_self_reference',
'fixtures.tests.CircularReferenceTests.test_circular_reference',
'fixtures.tests.ForwardReferenceTests.test_forward_reference_fk',
'fixtures.tests.ForwardReferenceTests.test_forward_reference_m2m',
'serializers.test_data.SerializerDataTests.test_json_serializer',
'serializers.test_data.SerializerDataTests.test_jsonl_serializer',
'serializers.test_data.SerializerDataTests.test_python_serializer',
'serializers.test_data.SerializerDataTests.test_xml_serializer',
'serializers.test_data.SerializerDataTests.test_yaml_serializer',
# No sequence for AutoField in CockroachDB.
'introspection.tests.IntrospectionTests.test_sequence_list',
# Unsupported query: unsupported binary operator: <int> / <int>:
# https://github.com/cockroachdb/django-cockroachdb/issues/21
'expressions.tests.ExpressionOperatorTests.test_lefthand_division',
'expressions.tests.ExpressionOperatorTests.test_right_hand_division',
# CockroachDB doesn't support disabling constraints:
# https://github.com/cockroachdb/cockroach/issues/19444
'auth_tests.test_views.UUIDUserTests.test_admin_password_change',
'backends.tests.FkConstraintsTests.test_check_constraints',
'backends.tests.FkConstraintsTests.test_check_constraints_sql_keywords',
'backends.tests.FkConstraintsTests.test_disable_constraint_checks_context_manager',
'backends.tests.FkConstraintsTests.test_disable_constraint_checks_manually',
# Passing a naive datetime to cursor.execute() probably can't work
# on CockroachDB. The value needs a timezone so psycopg2 will cast
# it to timestamptz rather than timestamp to avoid "value type
# timestamp doesn't match type timestamptz of column "dt"" but
# there aren't any hooks to do that.
'timezones.tests.LegacyDatabaseTests.test_cursor_execute_accepts_naive_datetime',
# SchemaEditor._model_indexes_sql() doesn't output some expected
# tablespace SQL because CockroachDB automatically indexes foreign
# keys.
'model_options.test_tablespaces.TablespacesTests.test_tablespace_for_many_to_many_field',
# ALTER COLUMN TYPE requiring rewrite of on-disk data is currently
# not supported for columns that are part of an index.
# https://go.crdb.dev/issue/47636
'migrations.test_executor.ExecutorTests.test_alter_id_type_with_fk',
'migrations.test_operations.OperationTests.test_alter_field_pk_fk',
'migrations.test_operations.OperationTests.test_alter_field_reloads_state_on_fk_target_changes',
'migrations.test_operations.OperationTests.test_alter_field_reloads_state_on_fk_with_to_field_related_name_target_type_change', # noqa
'migrations.test_operations.OperationTests.test_alter_field_reloads_state_on_fk_with_to_field_target_changes', # noqa
'migrations.test_operations.OperationTests.test_alter_field_reloads_state_on_fk_with_to_field_target_type_change', # noqa
'migrations.test_operations.OperationTests.test_rename_field_reloads_state_on_fk_target_changes',
'schema.tests.SchemaTests.test_alter_auto_field_to_char_field',
'schema.tests.SchemaTests.test_alter_autofield_pk_to_smallautofield_pk_sequence_owner',
'schema.tests.SchemaTests.test_char_field_pk_to_auto_field',
'schema.tests.SchemaTests.test_char_field_with_db_index_to_fk',
'schema.tests.SchemaTests.test_text_field_with_db_index_to_fk',
# CockroachDB doesn't support dropping the primary key.
'schema.tests.SchemaTests.test_alter_int_pk_to_int_unique',
# CockroachDB doesn't support changing the primary key of table.
'schema.tests.SchemaTests.test_alter_not_unique_field_to_primary_key',
'schema.tests.SchemaTests.test_primary_key',
# ALTER COLUMN fails if previous asynchronous ALTER COLUMN hasn't
# finished. https://github.com/cockroachdb/cockroach/issues/47137
# These tests only fail sometimes, e.g.
# https://github.com/cockroachdb/cockroach/issues/65691
'schema.tests.SchemaTests.test_alter_field_db_collation',
'schema.tests.SchemaTests.test_alter_field_type_and_db_collation',
# SmallAutoField doesn't work:
# https://github.com/cockroachdb/cockroach-django/issues/84
'bulk_create.tests.BulkCreateTests.test_bulk_insert_nullable_fields',
'many_to_one.tests.ManyToOneTests.test_fk_to_smallautofield',
'migrations.test_operations.OperationTests.test_smallfield_autofield_foreignfield_growth',
'migrations.test_operations.OperationTests.test_smallfield_bigautofield_foreignfield_growth',
# This backend raises "ValueError: CockroachDB's EXPLAIN doesn't
# support any formats." instead of an "unknown format" error.
'queries.test_explain.ExplainTests.test_unknown_format',
# unsupported comparison operator: <jsonb> > <string>:
# https://github.com/cockroachdb/cockroach/issues/49144
'model_fields.test_jsonfield.TestQuerying.test_deep_lookup_transform',
# ordering by JSON isn't supported:
# https://github.com/cockroachdb/cockroach/issues/35706
'expressions_window.tests.WindowFunctionTests.test_key_transform',
'model_fields.test_jsonfield.TestQuerying.test_deep_distinct',
'model_fields.test_jsonfield.TestQuerying.test_order_grouping_custom_decoder',
'model_fields.test_jsonfield.TestQuerying.test_ordering_by_transform',
'model_fields.test_jsonfield.TestQuerying.test_ordering_grouping_by_key_transform',
# unexpected partial unique index in pg_constraint query:
# https://github.com/cockroachdb/cockroach/issues/61098
'introspection.tests.IntrospectionTests.test_get_constraints_unique_indexes_orders',
})
if not self.connection.features.is_cockroachdb_21_1:
expected_failures.update({
# unimplemented: unable to encode JSON as a table key:
# https://github.com/cockroachdb/cockroach/issues/35706
'model_fields.test_jsonfield.TestQuerying.test_join_key_transform_annotation_expression',
# unknown function: sha224() and sha384():
# https://github.com/cockroachdb/django-cockroachdb/issues/81
'db_functions.text.test_sha224.SHA224Tests.test_basic',
'db_functions.text.test_sha224.SHA224Tests.test_transform',
'db_functions.text.test_sha384.SHA384Tests.test_basic',
'db_functions.text.test_sha384.SHA384Tests.test_transform',
# timezones after 2038 use incorrect DST settings:
# https://github.com/cockroachdb/django-cockroachdb/issues/124
'expressions.tests.FTimeDeltaTests.test_datetime_subtraction_microseconds',
# db_collation appears even if none is specified:
# https://github.com/cockroachdb/cockroach/issues/54989
'inspectdb.tests.InspectDBTestCase.test_field_types',
# Unsupported type conversion: https://github.com/cockroachdb/cockroach/issues/9851
'migrations.test_operations.OperationTests.test_alter_fk_non_fk',
'schema.tests.SchemaTests.test_alter_field_db_collation',
'schema.tests.SchemaTests.test_alter_field_type_and_db_collation',
'schema.tests.SchemaTests.test_alter_text_field_to_date_field',
'schema.tests.SchemaTests.test_alter_text_field_to_datetime_field',
'schema.tests.SchemaTests.test_alter_text_field_to_time_field',
'schema.tests.SchemaTests.test_alter_textual_field_keep_null_status',
'schema.tests.SchemaTests.test_m2m_rename_field_in_target_model',
'schema.tests.SchemaTests.test_rename',
})
return expected_failures
@cached_property
def django_test_skips(self):
skips = super().django_test_skips
skips.update({
# https://github.com/cockroachdb/django-cockroachdb/issues/20
'Unsupported query: UPDATE float column with integer column.': {
'expressions.tests.ExpressionsNumericTests',
},
# https://github.com/cockroachdb/django-cockroachdb/issues/153#issuecomment-664697963
'CockroachDB has more restrictive blocking than other databases.': {
'select_for_update.tests.SelectForUpdateTests.test_block',
},
# https://www.cockroachlabs.com/docs/stable/transaction-retry-error-reference.html#retry_write_too_old
'Fails with TransactionRetryWithProtoRefreshError: ... RETRY_WRITE_TOO_OLD ...': {
'delete_regress.tests.DeleteLockingTest.test_concurrent_delete',
},
'Skip to prevents some error output in the logs.': {
# Since QuerySet.select_for_update() was enabled, this test is
# already skipped by the 'Database took too long to lock the row'
# logic in the test. Skipping it entirely prevents some error
# output in the logs:
# Exception in thread Thread-1:
# ...
# psycopg2.errors.SerializationFailure: restart transaction:
# TransactionRetryWithProtoRefreshError: WriteTooOldError: write
# at timestamp 1598314405.858850941,0 too old; wrote at
# 1598314405.883337663,1
'get_or_create.tests.UpdateOrCreateTransactionTests.test_creation_in_transaction',
# Sometimes fails as above or with
# AssertionError: datetime.timedelta(microseconds=28529) not
# greater than datetime.timedelta(microseconds=500000)
'get_or_create.tests.UpdateOrCreateTransactionTests.test_updates_in_transaction',
},
})
return skips
|
11595748
|
from pycdft.common.atom import Atom
from pycdft.common.sample import Sample
class Fragment(object):
"""
A part of the system to which constraints may apply.
Attributes:
sample (Sample): sample.
atoms (list of Atom): list of atoms belonging to the fragment.
natoms (int): number of atoms in fragment
rhopro_r (array): real space promolecule charge density
"""
def __init__(self, sample: Sample, atoms: list, name: str = ""):
self.name = name
self.sample = sample
self.atoms = atoms
self.natoms = len(self.atoms)
self.rhopro_r = None
self.sample.fragments.append(self)
|
11595761
|
description = 'CCR with LakeShore LS336 controller'
group = 'optional'
includes = ['alias_T']
tango_base = 'tango://resedahw2.reseda.frm2:10000/reseda'
devices = dict(
# T_ccr = device('nicos_mlz.devices.ccr.CCRControl',
# description = 'The main temperature control device of the CCR',
# stick = 'T_ccr_stick',
# tube = 'T_ccr_tube',
# unit = 'K',
# fmtstr = '%.3f',
# ),
T_ccr_stick = device('nicos.devices.entangle.TemperatureController',
description = 'The control device of the sample (stick)',
tangodevice = '%s/ccr/control2' % tango_base,
abslimits = (0, 350),
unit = 'K',
fmtstr = '%.2f',
pollinterval = 10,
maxage = 19,
),
T_ccr_tube = device('nicos.devices.entangle.TemperatureController',
description = 'The control device of the tube',
tangodevice = '%s/ccr/control' % tango_base,
abslimits = (0, 320),
warnlimits = (0, 320),
unit = 'K',
fmtstr = '%.2f',
pollinterval = 10,
maxage = 19,
),
T_ccr_sample_stick_a = device('nicos.devices.entangle.Sensor',
description = '(optional) Sample temperature',
tangodevice = '%s/ccr/sensa' % tango_base,
unit = 'K',
fmtstr = '%.2f',
pollinterval = 10,
maxage = 19,
),
T_ccr_sample_stick_b = device('nicos.devices.entangle.Sensor',
description = '(regulation) Temperature at the stick',
tangodevice = '%s/ccr/sensb' % tango_base,
unit = 'K',
fmtstr = '%.2f',
pollinterval = 10,
maxage = 19,
),
T_ccr_cold_head = device('nicos.devices.entangle.Sensor',
description = 'Temperature of the coldhead',
tangodevice = '%s/ccr/sensc' % tango_base,
warnlimits = (0, 350),
unit = 'K',
fmtstr = '%.2f',
pollinterval = 10,
maxage = 19,
),
T_ccr_sample_tube = device('nicos.devices.entangle.Sensor',
description = '(regulation) Temperature at thermal coupling to the tube',
tangodevice = '%s/ccr/sensd' % tango_base,
warnlimits = (0, 350),
unit = 'K',
fmtstr = '%.2f',
pollinterval = 10,
maxage = 19,
),
P_ccr = device('nicos.devices.entangle.Sensor',
description = 'Pressure in the neutron guide elements',
tangodevice = '%s/ccr/p1' % tango_base,
fmtstr = '%.1f',
unit = 'mbar'
),
)
alias_config = {
'T': {
# 'T_ccr': 200,
'T_ccr_stick': 150,
'T_ccr_tube': 100
},
'Ts': {
'T_ccr_stick': 100,
# 'T_ccr_sample_stick_a': 90,
# 'T_ccr_sample_stick_b': 80,
'T_ccr_tube': 20,
# 'T_ccr_cold_head': 10
},
}
|
11595795
|
import logging
import uuid
import eventlet
from errors import ExpectedException
import rtjp_eventlet
class HookboxConn(object):
logger = logging.getLogger('HookboxConn')
def __init__(self, server, rtjp_conn, config, remote_addr):
self._rtjp_conn = rtjp_conn
self.server = server
self.state = 'initial'
self.cookies = None
self.cookie_string = None
self.cookie_id = None
self.cookie_identifier = config['cookie_identifier']
self.id = str(uuid.uuid4()).replace('-', '')
self.user = None
self.remote_addr = remote_addr
def serialize(self):
return {
"id": self.id,
"user": self.user and self.user.get_name(),
"cookie": self.cookie_string
}
def send_frame(self, *args, **kw):
try:
self._rtjp_conn.send_frame(*args, **kw).wait()
except Exception, e:
if 'closed' in str(e).lower():
pass
else:
self.logger.warn("Unexpected error: %s", e, exc_info=True)
def send_error(self, *args, **kw):
return self._rtjp_conn.send_error(*args, **kw)
def get_cookie(self):
return self.cookie_string
def get_id(self):
return self.id
def get_cookie_id(self):
return self.cookie_id
def get_remote_addr(self):
return self.remote_addr
def _close(self):
if self.state == 'connected':
self.server.closed(self)
def run(self):
while True:
try:
# print 'read a frame...'
self.logger.debug('%s waiting for a frame', self)
fid, fname, fargs= self._rtjp_conn.recv_frame().wait()
# print 'got frame', fid, fname, fargs
except rtjp_eventlet.errors.ConnectionLost, e:
self.logger.debug('received connection lost error')
# print 'connection lost'
break
except:
# print 'some error..'
self.logger.warn("Error reading frame", exc_info=True)
continue
f = getattr(self, 'frame_' + fname, None)
if f:
try:
f(fid, fargs)
except ExpectedException, e:
self.send_error(fid, e)
except Exception, e:
self.logger.warn("Unexpected error: %s", e, exc_info=True)
self.send_error(fid, e)
else:
self._default_frame(fid, fname, fargs)
# print 'all DONE!'
# cleanup
self.logger.debug('loop done')
if self.user:
self.logger.debug('cleanup user')
# print 'go call remove connection'
self.user.remove_connection(self)
self.server.disconnect(self)
def _default_frame(fid, fname, fargs):
pass
def frame_CONNECT(self, fid, fargs):
if self.state != 'initial':
return self.send_error(fid, "Already logged in")
if 'cookie_string' not in fargs:
raise ExpectedException("Missing cookie_string")
self.cookie_string = fargs['cookie_string']
self.cookies = parse_cookies(fargs['cookie_string'])
self.cookie_id = self.cookies.get(self.cookie_identifier, None)
self.server.connect(self)
self.state = 'connected'
self.send_frame('CONNECTED', { 'name': self.user.get_name() })
def frame_SUBSCRIBE(self, fid, fargs):
if self.state != 'connected':
return self.send_error(fid, "Not connected")
if 'channel_name' not in fargs:
return self.send_error(fid, "channel_name required")
channel = self.server.get_channel(self, fargs['channel_name'])
channel.subscribe(self.user, conn=self)
def frame_UNSUBSCRIBE(self, fid, fargs):
if self.state != 'connected':
return self.send_error(fid, "Not connected")
if 'channel_name' not in fargs:
return self.send_error(fid, "channel_name required")
channel = self.server.get_channel(self, fargs['channel_name'])
channel.unsubscribe(self.user, conn=self)
def frame_PUBLISH(self, fid, fargs):
if self.state != 'connected':
return self.send_error(fid, "Not connected")
if 'channel_name' not in fargs:
return self.send_error(fid, "channel_name required")
channel = self.server.get_channel(self, fargs['channel_name'])
channel.publish(self.user, fargs.get('payload', 'null'), conn=self)
def frame_MESSAGE(self, fid, fargs):
if self.state != 'connected':
return self.send_error(fid, "Not connected")
if 'name' not in fargs:
return self.send_error(fid, "name required")
self.user.send_message(fargs['name'], fargs.get('payload', 'null'), conn=self)
def parse_cookies(cookieString):
output = {}
for m in cookieString.split('; '):
try:
k,v = m.split('=', 1)
output[k] = v
except:
continue
return output
|
11595797
|
from joblib import load
from pyprojroot import here
python_model = load(here("./python_model.joblib"))
test_data = load(here("./test_data.joblib"))
|
11595830
|
from netmiko import ConnectHandler
def takebackup(cmd,rname):
uname="cisco"
passwd="<PASSWORD>"
device = ConnectHandler(device_type='cisco_ios', ip=rname, username=uname, password=passwd)
output=device.send_command(cmd)
fname=rname+".txt"
file=open(fname,"w")
file.write(output)
file.close()
# assuming we have two routers in network
devices="rtr1,rtr2"
devices=devices.split(",")
for device in devices:
takebackup("show run",device)
|
11595842
|
import os
import argparse
import torch
import logging
import json
import pytorch_lightning as pl
from transformers import BartTokenizer, BartConfig
from transformers import AdamW, get_linear_schedule_with_warmup
from .network import BartGen
from .constrained_gen import BartConstrainedGen
logger = logging.getLogger(__name__)
class GenIEModel(pl.LightningModule):
def __init__(self, args):
super().__init__()
self.hparams = args
self.config=BartConfig.from_pretrained('facebook/bart-large')
self.tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
self.tokenizer.add_tokens([' <arg>',' <tgr>'])
if self.hparams.model=='gen':
self.model = BartGen(self.config, self.tokenizer)
self.model.resize_token_embeddings()
elif self.hparams.model == 'constrained-gen':
self.model = BartConstrainedGen(self.config, self.tokenizer)
self.model.resize_token_embeddings()
else:
raise NotImplementedError
def forward(self, inputs):
return self.model(**inputs)
def training_step(self, batch, batch_idx):
'''
processed_ex = {
'doc_key': ex['doc_key'],
'input_tokens_ids':input_tokens['input_ids'],
'input_attn_mask': input_tokens['attention_mask'],
'tgt_token_ids': tgt_tokens['input_ids'],
'tgt_attn_mask': tgt_tokens['attention_mask'],
}
'''
inputs = {
"input_ids": batch["input_token_ids"],
"attention_mask": batch["input_attn_mask"],
"decoder_input_ids": batch['tgt_token_ids'],
"decoder_attention_mask": batch["tgt_attn_mask"],
"task": 0
}
outputs = self.model(**inputs)
loss = outputs[0]
loss = torch.mean(loss)
log = {
'train/loss': loss,
}
return {
'loss': loss,
'log': log
}
def validation_step(self,batch, batch_idx):
inputs = {
"input_ids": batch["input_token_ids"],
"attention_mask": batch["input_attn_mask"],
"decoder_input_ids": batch['tgt_token_ids'],
"decoder_attention_mask": batch["tgt_attn_mask"],
"task" :0,
}
outputs = self.model(**inputs)
loss = outputs[0]
loss = torch.mean(loss)
return loss
def validation_epoch_end(self, outputs):
avg_loss = torch.mean(torch.stack(outputs))
log = {
'val/loss': avg_loss,
}
return {
'loss': avg_loss,
'log': log
}
def test_step(self, batch, batch_idx):
if self.hparams.sample_gen:
sample_output = self.model.generate(batch['input_token_ids'], do_sample=True,
top_k=20, top_p=0.95, max_length=30, num_return_sequences=1,num_beams=1,
)
else:
sample_output = self.model.generate(batch['input_token_ids'], do_sample=False,
max_length=30, num_return_sequences=1,num_beams=1,
)
sample_output = sample_output.reshape(batch['input_token_ids'].size(0), 1, -1)
doc_key = batch['doc_key'] # list
tgt_token_ids = batch['tgt_token_ids']
return (doc_key, sample_output, tgt_token_ids)
def test_epoch_end(self, outputs):
# evaluate F1
with open('checkpoints/{}/predictions.jsonl'.format(self.hparams.ckpt_name),'w') as writer:
for tup in outputs:
for idx in range(len(tup[0])):
pred = {
'doc_key': tup[0][idx],
'predicted': self.tokenizer.decode(tup[1][idx].squeeze(0), skip_special_tokens=True),
'gold': self.tokenizer.decode(tup[2][idx].squeeze(0), skip_special_tokens=True)
}
writer.write(json.dumps(pred)+'\n')
return {}
def configure_optimizers(self):
self.train_len = len(self.train_dataloader())
if self.hparams.max_steps > 0:
t_total = self.hparams.max_steps
self.hparams.num_train_epochs = self.hparams.max_steps // self.train_len // self.hparams.accumulate_grad_batches + 1
else:
t_total = self.train_len // self.hparams.accumulate_grad_batches * self.hparams.num_train_epochs
logger.info('{} training steps in total.. '.format(t_total))
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
# scheduler is called only once per epoch by default
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=t_total)
scheduler_dict = {
'scheduler': scheduler,
'interval': 'step',
'name': 'linear-schedule',
}
return [optimizer, ], [scheduler_dict,]
|
11595870
|
from monosat import *
bv1 = BitVector(4)
bv2 = BitVector(4)
assert(Solve())
Assert(bv1==15)
assert(Solve())
Assert(bv2>bv1)
assert(not Solve())
|
11595900
|
import telebot_login
from tg_bot import bot
# Other message
@bot.message_handler(
func=lambda mess: True, content_types=["text"]
)
@telebot_login.login_required_message
def other_text_handler(message):
bot.reply_to(message, "Не понимаю")
|
11595982
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_directories(host):
present = [
"/etc/td-agent"
]
if present:
for directory in present:
d = host.file(directory)
assert d.is_directory
assert d.exists
def test_files(host):
present = [
"/etc/td-agent/td-agent.conf",
"/etc/td-agent/plugin/out_gelf.rb",
"/opt/td-agent/embedded/bin/secure-forward-ca-generate"
]
if present:
for file in present:
f = host.file(file)
assert f.exists
assert f.is_file
def test_service(host):
present = [
"td-agent",
]
if present:
for service in present:
s = host.service(service)
assert s.is_enabled
assert s.is_running
def test_packages(host):
present = [
"td-agent"
]
if present:
for package in present:
p = host.package(package)
assert p.is_installed
|
11595983
|
import uuid
from unittest import mock
import pytest
from botocore import exceptions
from dagster import DagsterResourceFunctionError, In, Out, build_op_context, configured, job, op
from dagster_aws.s3 import S3FileHandle, S3FileManager, s3_file_manager, s3_resource
def test_s3_file_manager_write(mock_s3_resource, mock_s3_bucket):
file_manager = S3FileManager(mock_s3_resource.meta.client, mock_s3_bucket.name, "some-key")
body = b"foo"
file_handle = file_manager.write_data(body)
assert mock_s3_bucket.Object(file_handle.s3_key).get()["Body"].read() == body
file_handle = file_manager.write_data(body, ext="foo")
assert file_handle.s3_key.endswith(".foo")
assert mock_s3_bucket.Object(file_handle.s3_key).get()["Body"].read() == body
def test_s3_file_manager_read(mock_s3_resource, mock_s3_bucket):
body = b"bar"
remote_s3_object = mock_s3_bucket.Object("some-key/foo")
remote_s3_object.put(Body=body)
file_manager = S3FileManager(mock_s3_resource.meta.client, mock_s3_bucket.name, "some-key")
file_handle = S3FileHandle(mock_s3_bucket.name, "some-key/foo")
with file_manager.read(file_handle) as file_obj:
assert file_obj.read() == body
# read again. cached
remote_s3_object.delete()
with file_manager.read(file_handle) as file_obj:
assert file_obj.read() == body
def test_depends_on_s3_resource_file_manager(mock_s3_bucket):
bar_bytes = b"bar"
@op(out=Out(S3FileHandle), required_resource_keys={"file_manager"})
def emit_file(context):
return context.resources.file_manager.write_data(bar_bytes)
@op(
ins={"file_handle": In(S3FileHandle)},
required_resource_keys={"file_manager"},
)
def accept_file(context, file_handle):
local_path = context.resources.file_manager.copy_handle_to_local_temp(file_handle)
assert isinstance(local_path, str)
assert open(local_path, "rb").read() == bar_bytes
@job(resource_defs={"s3": s3_resource, "file_manager": s3_file_manager})
def s3_file_manager_test():
accept_file(emit_file())
result = s3_file_manager_test.execute_in_process(
run_config={
"resources": {
"file_manager": {
"config": {"s3_bucket": mock_s3_bucket.name, "s3_prefix": "some-prefix"}
}
},
},
)
assert result.success
keys_in_bucket = [obj.key for obj in mock_s3_bucket.objects.all()]
assert len(keys_in_bucket) == 1
file_key = list(keys_in_bucket)[0]
comps = file_key.split("/")
assert "/".join(comps[:-1]) == "some-prefix"
assert uuid.UUID(comps[-1])
@mock.patch("boto3.session.Session.resource")
@mock.patch("dagster_aws.s3.resources.S3FileManager")
def test_s3_file_manager_resource(MockS3FileManager, mock_boto3_resource):
did_it_run = dict(it_ran=False)
resource_config = {
"use_unsigned_session": True,
"region_name": "us-west-1",
"endpoint_url": "http://alternate-s3-host.io",
"s3_bucket": "some-bucket",
"s3_prefix": "some-prefix",
}
mock_s3_session = mock_boto3_resource.return_value.meta.client
@op(required_resource_keys={"file_manager"})
def test_op(context):
# test that we got back a S3FileManager
assert context.resources.file_manager == MockS3FileManager.return_value
# make sure the file manager was initalized with the config we are supplying
MockS3FileManager.assert_called_once_with(
s3_session=mock_s3_session,
s3_bucket=resource_config["s3_bucket"],
s3_base_key=resource_config["s3_prefix"],
)
_, call_kwargs = mock_boto3_resource.call_args
mock_boto3_resource.assert_called_once_with(
"s3",
region_name=resource_config["region_name"],
endpoint_url=resource_config["endpoint_url"],
use_ssl=True,
config=call_kwargs["config"],
)
assert call_kwargs["config"].retries["max_attempts"] == 5
did_it_run["it_ran"] = True
context = build_op_context(
resources={"file_manager": configured(s3_file_manager)(resource_config)}
)
test_op(context)
assert did_it_run["it_ran"]
def test_s3_file_manager_resource_with_profile():
resource_config = {
"use_unsigned_session": True,
"region_name": "us-west-1",
"endpoint_url": "http://alternate-s3-host.io",
"s3_bucket": "some-bucket",
"s3_prefix": "some-prefix",
"profile_name": "some-profile",
}
@op(required_resource_keys={"file_manager"})
def test_op(context):
# placeholder function to test resource initialization
return context.log.info("return from test_solid")
with pytest.raises(DagsterResourceFunctionError) as e:
context = build_op_context(
resources={"file_manager": configured(s3_file_manager)(resource_config)},
)
test_op(context)
assert isinstance(e.value.user_exception, exceptions.ProfileNotFound)
assert str(e.value.user_exception) == "The config profile (some-profile) could not be found"
|
11596005
|
import torch.nn.functional as F
from torch import nn, tensor
from configuration import config
class PadUp(nn.Module):
def __init__(self, window_size):
super().__init__()
self.window_size = window_size
# TODO this is inefficient, size check should only be done once, not every forward call
def forward(self, x: tensor):
shape = x.size()
if shape[3] < config.min_square_dim != -1:
return F.pad(x,
[config.min_square_dim, config.min_square_dim, config.min_square_dim, config.min_square_dim])
return x
|
11596008
|
import sys
from unittest.mock import call
import pytest
import ahkpy as ahk
import _ahk
def noop():
pass
@pytest.fixture
def call_spy(mocker):
return mocker.spy(_ahk, "call")
@pytest.fixture
def menu(request):
menu = ahk.Menu()
yield menu
menu.delete_menu()
def test_get_handle(menu):
menu.add("Test", noop)
handle = menu.get_handle()
assert isinstance(handle, int)
def test_add(call_spy, menu):
res = menu.add("E&xit", sys.exit, default=True, icon="c:/Windows/py.exe", icon_number=1)
call_spy.assert_has_calls([
call("Menu", menu.name, "Insert", None, "E&xit", call_spy.mock_calls[0][1][5], "P0 -Radio -Break -BarBreak"),
call("Menu", menu.name, "Default", "E&xit"),
call("Menu", menu.name, "Icon", "E&xit", "c:/Windows/py.exe", 2, None)
])
assert res is menu
call_spy.reset_mock()
menu.remove_default()
call_spy.assert_has_calls([
call("Menu", menu.name, "NoDefault"),
])
call_spy.reset_mock()
menu.add("Radio", lambda: menu.toggle_checked("Radio"), radio=True)
call_spy.assert_has_calls([
call("Menu", menu.name, "Insert", None, "Radio", call_spy.mock_calls[0][1][5], "P0 +Radio -Break -BarBreak"),
])
call_spy.reset_mock()
res = menu.add_separator()
call_spy.assert_has_calls([
call("Menu", menu.name, "Insert"),
])
assert res is menu
submenu = ahk.Menu()
call_spy.reset_mock()
submenu.add("I1", noop)
call_spy.assert_has_calls([
call("Menu", submenu.name, "Insert", None, "I1", call_spy.mock_calls[0][1][5], "P0 -Radio -Break -BarBreak"),
])
call_spy.reset_mock()
res = menu.add_submenu("Sub", submenu)
call_spy.assert_has_calls([
call("Menu", menu.name, "Insert", None, "Sub", f":{submenu.name}", "-Radio -Break -BarBreak"),
])
assert res is menu
call_spy.reset_mock()
menu.add("NewCol", noop, new_column=True)
call_spy.assert_has_calls([
call("Menu", menu.name, "Insert", None, "NewCol", call_spy.mock_calls[0][1][5], "P0 -Radio +Break -BarBreak"),
])
call_spy.reset_mock()
menu.add("NewCol", noop, new_column=True, checked=True)
call_spy.assert_has_calls([
call("Menu", menu.name, "Insert", None, "NewCol", call_spy.mock_calls[0][1][5], "P0 -Radio +Break -BarBreak"),
call("Menu", menu.name, "Check", "NewCol"),
])
call_spy.reset_mock()
menu.add("Dis", noop, enabled=False)
call_spy.assert_has_calls([
call("Menu", menu.name, "Insert", None, "Dis", call_spy.mock_calls[0][1][5], "P0 -Radio -Break -BarBreak"),
call("Menu", menu.name, "Disable", "Dis"),
])
def test_chaining(call_spy):
submenu = (
ahk.Menu()
.add("S1", noop)
.add("S2", noop)
)
call_spy.assert_has_calls([
call("Menu", submenu.name, "Insert", None, "S1", call_spy.mock_calls[0][1][5], "P0 -Radio -Break -BarBreak"),
call("Menu", submenu.name, "Insert", None, "S2", call_spy.mock_calls[1][1][5], "P0 -Radio -Break -BarBreak"),
])
call_spy.reset_mock()
menu = (
ahk.Menu()
.add("Item1", noop)
.add("Item2", noop)
.add_separator()
.add_submenu(
"My Submenu",
submenu,
)
.add_separator()
.add("Item3", noop)
)
call_spy.assert_has_calls([
call("Menu", menu.name, "Insert", None, "Item1", call_spy.mock_calls[0][1][5], "P0 -Radio -Break -BarBreak"),
call("Menu", menu.name, "Insert", None, "Item2", call_spy.mock_calls[1][1][5], "P0 -Radio -Break -BarBreak"),
call("Menu", menu.name, "Insert"),
call("Menu", menu.name, "Insert", None, "My Submenu", f":{submenu.name}", "-Radio -Break -BarBreak"),
call("Menu", menu.name, "Insert"),
call("Menu", menu.name, "Insert", None, "Item3", call_spy.mock_calls[5][1][5], "P0 -Radio -Break -BarBreak")
])
def test_insert(call_spy, menu):
menu.add("Test", noop)
with pytest.raises(TypeError, match="insert_before must not be None"):
menu.insert(None, "Test")
call_spy.reset_mock()
menu.insert("Test", "2&", noop)
call_spy.assert_has_calls([
call("Menu", menu.name, "Insert", "Test", "2&", call_spy.mock_calls[0][1][5], "P0 -Radio -Break -BarBreak"),
])
call_spy.reset_mock()
menu.insert(1, "2&", noop)
call_spy.assert_has_calls([
call("Menu", menu.name, "Insert", "2&", "2&", call_spy.mock_calls[0][1][5], "P0 -Radio -Break -BarBreak"),
])
def test_insert_submenu(call_spy, menu):
menu.add("Test", noop)
submenu = ahk.Menu().add("Nooo", noop)
with pytest.raises(TypeError, match="insert_before must not be None"):
menu.insert_submenu(None, "Sub", submenu)
call_spy.reset_mock()
menu.insert_submenu("Test", "Sub", submenu)
call_spy.assert_has_calls([
call("Menu", menu.name, "Insert", "Test", "Sub", f":{submenu.name}", '-Radio -Break -BarBreak'),
])
def test_insert_separator(call_spy, menu):
menu.add("Test", noop)
with pytest.raises(TypeError, match="insert_before must not be None"):
menu.insert_separator(None)
call_spy.reset_mock()
menu.insert_separator("Test")
call_spy.assert_has_calls([
call("Menu", menu.name, "Insert", "Test"),
])
def test_update(call_spy, menu):
menu.add("Test", noop)
with pytest.raises(TypeError, match="item_name must not be None"):
menu.update(None, new_name="Test")
call_spy.reset_mock()
menu.update("Test", callback=noop, radio=True, icon=sys.executable)
call_spy.assert_has_calls([
call("Menu", menu.name, "Add", "Test", None, "+Radio"),
call("Menu", menu.name, "Add", "Test", call_spy.mock_calls[1][1][4]),
])
call_spy.reset_mock()
menu.update(0, enabled=False)
call_spy.assert_has_calls([
call("Menu", menu.name, "Disable", "1&"),
])
call_spy.reset_mock()
menu.update(0, enabled=True)
call_spy.assert_has_calls([
call("Menu", menu.name, "Enable", "1&"),
])
call_spy.reset_mock()
menu.toggle_enabled(0)
call_spy.assert_has_calls([
call("Menu", menu.name, "ToggleEnable", "1&"),
])
call_spy.reset_mock()
menu.update(0, checked=True)
call_spy.assert_has_calls([
call("Menu", menu.name, "Check", "1&"),
])
call_spy.reset_mock()
menu.update(0, checked=False)
call_spy.assert_has_calls([
call("Menu", menu.name, "Uncheck", "1&"),
])
call_spy.reset_mock()
menu.toggle_checked(0)
call_spy.assert_has_calls([
call("Menu", menu.name, "ToggleCheck", "1&"),
])
call_spy.reset_mock()
menu.rename(0, "Renamed")
call_spy.assert_has_calls([
call("Menu", menu.name, "Rename", "1&", "Renamed"),
])
call_spy.reset_mock()
menu.update(0, new_name="<NAME>", callback=lambda: print("New name"))
call_spy.assert_has_calls([
call("Menu", menu.name, "Add", "1&", call_spy.mock_calls[0][1][4]),
call("Menu", menu.name, "Rename", "1&", "New name")
])
def test_update_separator(child_ahk):
def code():
import ahkpy as ahk
menu = ahk.Menu()
menu.add_separator()
menu.update(0, new_name="Test", callback=lambda: print("ok01"))
print("ok00")
menu.show()
child_ahk.popen_code(code)
child_ahk.wait(0)
ahk.sleep(0)
ahk.send("{Down}{Enter}")
child_ahk.wait(1)
def test_update_remove_icon(call_spy, menu):
menu.add("Test", noop, icon=sys.executable)
call_spy.assert_has_calls([
call("Menu", menu.name, "Insert", None, "Test", call_spy.mock_calls[0][1][5], "P0 -Radio -Break -BarBreak"),
call("Menu", menu.name, "Icon", "Test", sys.executable, 1, None)
])
call_spy.reset_mock()
menu.update("Test", icon=None)
call_spy.assert_has_calls([
call("Menu", menu.name, "NoIcon", "Test")
])
def test_update_nonexistent():
menu = ahk.Menu()
with pytest.raises(ahk.Error, match="Menu does not exist"):
menu.update("Nonexistent", new_name="Fails")
menu.add("Test", noop)
with pytest.raises(ahk.Error, match="Nonexistent menu item"):
menu.update("Nonexistent", new_name="Fails")
def test_delete_item(call_spy, menu):
menu.add("Test", noop)
menu.delete_item("Test")
call_spy.assert_has_calls([
call("Menu", menu.name, "Delete", "Test")
])
menu.add("Test", noop)
call_spy.reset_mock()
menu.delete_item(0)
call_spy.assert_has_calls([
call("Menu", menu.name, "Delete", "1&")
])
def test_delete_all_items(call_spy, menu):
menu.add("Test", noop)
menu.delete_all_items()
call_spy.assert_has_calls([
call("Menu", menu.name, "DeleteAll")
])
def test_set_color(call_spy, menu):
menu.add("Test", lambda: None)
call_spy.reset_mock()
menu.set_color("ffffff")
call_spy.assert_has_calls([
call("Menu", menu.name, "Color", "ffffff", None)
])
call_spy.reset_mock()
menu.set_color("ffffff", affects_submenus=False)
call_spy.assert_has_calls([
call("Menu", menu.name, "Color", "ffffff", "Single")
])
def test_tray_icon(request, call_spy):
request.addfinalizer(lambda: ahk.tray_menu.set_tray_icon(None))
request.addfinalizer(lambda: setattr(ahk.tray_menu, "tip", None))
assert ahk.tray_menu.tray_icon_file is None
assert ahk.tray_menu.tray_icon_number is None
call_spy.reset_mock()
ahk.tray_menu.set_tray_icon(sys.executable, number=2, affected_by_suspend=True)
ahk.tray_menu.set_tray_icon(affected_by_suspend=False)
call_spy.assert_has_calls([
call("Menu", "tray", "Icon", sys.executable, 3, "0"),
call("Menu", "tray", "Icon", "", None, "1")
])
assert ahk.tray_menu.tray_icon_file == sys.executable
assert ahk.tray_menu.tray_icon_number == 2
ahk.tray_menu.tray_icon_file = None
assert ahk.tray_menu.tray_icon_file is None
assert ahk.tray_menu.tray_icon_number is None
ahk.tray_menu.tray_icon_file = sys.executable
ahk.tray_menu.tray_icon_number = 2
assert ahk.tray_menu.tray_icon_file == sys.executable
assert ahk.tray_menu.tray_icon_number == 2
assert ahk.tray_menu.is_tray_icon_visible is True
ahk.tray_menu.is_tray_icon_visible = False
assert ahk.tray_menu.is_tray_icon_visible is False
ahk.tray_menu.is_tray_icon_visible = True
assert ahk.tray_menu.is_tray_icon_visible is True
ahk.tray_menu.hide_tray_icon()
assert ahk.tray_menu.is_tray_icon_visible is False
ahk.tray_menu.show_tray_icon()
assert ahk.tray_menu.is_tray_icon_visible is True
ahk.tray_menu.toggle_tray_icon()
assert ahk.tray_menu.is_tray_icon_visible is False
ahk.tray_menu.toggle_tray_icon()
assert ahk.tray_menu.is_tray_icon_visible is True
ahk.tray_menu.tip is None
ahk.tray_menu.tip = "Nooo"
ahk.tray_menu.tip == "Nooo"
ahk.tray_menu.tip = None
ahk.tray_menu.tip is None
call_spy.reset_mock()
ahk.tray_menu.set_clicks(1)
call_spy.assert_has_calls([
call("Menu", "tray", "Click", 1),
])
|
11596030
|
import re
import lib.core.common
__product__ = "4D"
__description__ = (
"4D web application deployment server"
)
def search(html, **kwargs):
headers = kwargs.get("headers", None)
plugin_detection_schema = (
re.compile(r"/^4D_v[\d]{1,2}(_SQL)?\/([\d\.]+)$/", re.I),
)
for plugin in plugin_detection_schema:
if plugin.search(headers.get(lib.core.common.HTTP_HEADER.SERVER, "")) is not None:
return True
|
11596084
|
from .helpers import ResourceBase, IterableResource, Nested
from .errors import ok_or_error, response_or_error
from .compat import update_doc
from enum import Enum
API_NAME = 'branch-permissions'
API_VERSION = '2.0'
API_OVERRIDE_PATH = '{0}/{1}'.format(API_NAME, API_VERSION)
class Matcher(Enum):
"""Valid values for the matcher_type for Restriction create/update"""
PATTERN = 'PATTERN'
BRANCH = 'BRANCH'
MODEL_CATEGORY = 'MODEL_CATEGORY'
MODEL_BRANCH = 'MODEL_BRANCH'
class RestrictionType(Enum):
"""Valid values for the restriction_type for Restriction create/update"""
PULL_REQUEST = 'pull-request-only'
FAST_FORWARD = 'fast-forward-only'
NO_DELETES = 'no-deletes'
READ_ONLY = 'read-only'
class Restriction(ResourceBase):
def __init__(self, id, url, client, parent):
super(Restriction, self).__init__(url, client, parent, API_OVERRIDE_PATH)
self._id = id
@response_or_error
def get(self):
"""
Retrieve a restriction
"""
return self._client.get(self.url())
@ok_or_error
def delete(self):
"""
Delete a restriction
"""
return self._client.delete(self.url())
@staticmethod
def request_data(match, users, groups, keys, restriction_type,
matcher_type):
data = dict(type=restriction_type.value)
data['matcher'] = dict(type={'id': matcher_type.value}, id=match)
if users is not None:
data['users'] = users
if groups is not None:
data['groups'] = groups
if keys is not None:
data['accessKeys'] = keys
return data
@response_or_error
def update(self, match, users=None, groups=None, keys=None,
restriction_type=RestrictionType.READ_ONLY,
matcher_type=Matcher.PATTERN):
"""
Re-restrict a branch, or set of branches defined by a pattern,
to a set of users, groups, and access keys.
Warning: The REST API does not actually support a direct update of
branch permissions. The Restriction will be deleted and recreated instead.
Note: access keys need to be specified by their numerical id. labels are
not accepted.
"""
data = self.request_data(match, users, groups, keys, restriction_type,
matcher_type)
self.delete()
return self._client.post(self._parent.url(), data=data)
class Restrictions(ResourceBase, IterableResource):
def __init__(self, url, client, parent):
ResourceBase.__init__(self, url, client, parent, API_OVERRIDE_PATH)
def __getitem__(self, item):
return Restriction(item, self.url(item), self._client, self)
@response_or_error
def create(self, match, users=None, groups=None, keys=None,
restriction_type=RestrictionType.READ_ONLY,
matcher_type=Matcher.PATTERN):
"""
Restrict a branch, or set of branches defined by a pattern,
to a set of users, groups, and access keys.
Note: access keys need to be specified by their numerical id. labels are
not accepted.
"""
data = Restriction.request_data(match, users, groups, keys,
restriction_type, matcher_type)
return self._client.post(self.url(""), data=data)
update_doc(Restrictions.all, """Retrieve list of restrictions for a repo""")
class BranchPermissions(ResourceBase):
"""Simple parent resource for this api, to distinguish restrictions from anything else"""
restrictions = Nested(Restrictions)
|
11596135
|
from ..configuration.configuration import Configuration
from ..interfaces.plugin import Plugin
from ..interfaces.state import State
from ..utilities.attributedict import AttributeDict
import aiotelegraf
from datetime import datetime
import logging
import platform
logger = logging.getLogger("cobald.runtime.tardis.plugins.telegrafmonitoring")
class TelegrafMonitoring(Plugin):
"""
The :py:class:`~tardis.plugins.telegrafmonitoring.TelegrafMonitoring`
implements an interface to monitor state changes of the Drones in a telegraf
service running a UDP input module.
"""
def __init__(self):
config = Configuration().Plugins.TelegrafMonitoring
host = config.host
port = config.port
default_tags = dict(tardis_machine_name=platform.node())
default_tags.update(getattr(config, "default_tags", {}))
self.metric = getattr(config, "metric", "tardis_data")
self.client = aiotelegraf.Client(host=host, port=port, tags=default_tags)
async def notify(self, state: State, resource_attributes: AttributeDict) -> None:
"""
Push changed state and updated meta-data of the drone into the telegraf server
:param state: New state of the Drone
:type state: State
:param resource_attributes: Contains all meta-data of the Drone (created and
updated timestamps, dns name, unique id, site_name, machine_type, etc.)
:type resource_attributes: AttributeDict
:return: None
"""
logger.debug(f"Drone: {str(resource_attributes)} has changed state to {state}")
await self.client.connect()
data = dict(
state=str(state),
created=datetime.timestamp(resource_attributes.created),
updated=datetime.timestamp(resource_attributes.updated),
)
tags = dict(
site_name=resource_attributes.site_name,
machine_type=resource_attributes.machine_type,
)
self.client.metric(self.metric, data, tags=tags)
await self.client.close()
|
11596142
|
from __future__ import unicode_literals
import re
PROTOCOL = r'[a-zA-Z]{0,64}:?//'
WEB_PROTOCOL = r'(?:(?:(?:https?|ftp|wss?):)?//)'
HTTP_PROTOCOL = r'https?://'
PROTOCOL_RE = re.compile(r'^%s' % PROTOCOL)
WEB_PROTOCOL_RE = re.compile(r'^%s' % WEB_PROTOCOL)
HTTP_PROTOCOL_RE = re.compile(r'^%s' % HTTP_PROTOCOL)
# Adapted from:
# - https://gist.github.com/dperini/729294
# - https://gist.github.com/pchc2005/b5f13e136a9c9bb2984e5b92802fc7c9
URL = (
# protocol identifier
# "(?:(?:(?:https?|ftp):)?//)"
# user:pass authentication
r"(?:\S+(?::\S*)?@)?"
r"(?:"
# IP address exclusion
# private & local networks
# r"(?!(?:10|127)(?:\.\d{1,3}){3})"
# r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})"
# r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})"
# IP address dotted notation octets
# excludes loopback network 0.0.0.0
# excludes reserved space >= 172.16.17.32
# excludes network & broadcast addresses
# (first & last IP address of each class)
r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])"
r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}"
r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))"
r"|"
r"localhost"
r"|"
# host & domain names, may end with dot
# can be replaced by a shortest alternative
# r"(?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+"
# r"(?:(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)"
# # domain name
# r"(?:\.(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)*"
r"(?:"
r"(?:"
r"[a-z0-9\u00a1-\uffff]"
r"[a-z0-9\u00a1-\uffff_-]{0,62}"
r")?"
r"[a-z0-9\u00a1-\uffff]\."
r")+"
# TLD identifier name, may end with dot
r"(?:[a-z\u00a1-\uffff]{2,}\.?)"
r")"
# port number (optional)
r"(?::\d{2,5})?"
# resource path (optional)
# r"(?:[/?#]\S*)?"
)
RESOURCE_PATH = r"(?:[/?#]\S*)?"
RELAXED_RESOURCE_PATH = r"(?:[/?#][\S ]*)?"
SPECIAL_HOSTS_RE = re.compile(r'^localhost|(\d{1,3}\.){3}\d{1,3}|\[[\da-f]*:[\da-f:]*\]$', re.I)
URL_RE = re.compile(
r'^(?:%s)?%s$' % (PROTOCOL, URL + RESOURCE_PATH), re.I | re.UNICODE)
URL_WITH_PROTOCOL_RE = re.compile(
r'^%s%s$' % (PROTOCOL, URL + RESOURCE_PATH), re.I | re.UNICODE)
RELAXED_URL = re.compile(
r'^(?:%s)?%s$' % (PROTOCOL, URL + RELAXED_RESOURCE_PATH), re.I | re.UNICODE)
RELAXED_URL_WITH_PROTOCOL_RE = re.compile(
r'^%s%s$' % (PROTOCOL, URL + RELAXED_RESOURCE_PATH), re.I | re.UNICODE)
URL_IN_TEXT_RE = re.compile(
r'(%s)%s' % (PROTOCOL, URL + RESOURCE_PATH), re.I | re.UNICODE)
URL_IN_HTML_RE = re.compile(
r"<a\s.*?href=(?:\"([.#]+?)\"|\'([.#]+?)\'|([^\s]+?))(?:>|\s.*?>)(?:.*?)<[/ ]?a>",
re.DOTALL | re.IGNORECASE)
QUERY_VALUE_IN_URL_TEMPLATE = r'(?:^|[?&])%s=([^&]+)'
QUERY_VALUE_TEMPLATE = r'%s=([^&]+)'
DOMAIN_TEMPLATE = r'^(?:https?:)?(?://)?(?:\S+(?::\S*)?@)?%s(?:[:/#]|\s*$)'
|
11596178
|
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for diag operator"""
# Configs for PT diag operator
diag_configs_short = op_bench.config_list(
attr_names=['dim', 'M', 'N', 'diagonal', 'out'],
attrs=[
[1, 64, 64, 0, True],
[2, 128, 128, -10, False],
[1, 256, 256, 20, True],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=['short'],
)
class DiagBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dim, M, N, diagonal, out, device):
self.input = torch.rand(M, N, device=device) if dim == 2 else torch.rand(M, device=device)
self.diagonal = diagonal
self.out = torch.tensor((),) if out else None
self.set_module_name('diag')
def forward(self):
return torch.diag(self.input, diagonal=self.diagonal, out=self.out)
op_bench.generate_pt_test(diag_configs_short, DiagBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
11596180
|
from __future__ import print_function
import ttfw_idf
@ttfw_idf.idf_example_test(env_tag='Example_ExtFlash')
def test_examples_storage_ext_flash_fatfs(env, extra_data):
dut = env.get_dut('ext_flash_fatfs', 'examples/storage/ext_flash_fatfs', dut_class=ttfw_idf.ESP32DUT)
dut.start_app()
dut.expect('Initialized external Flash')
dut.expect('partition \'nvs\'')
dut.expect('partition \'storage\'')
dut.expect('File written')
dut.expect('Read from file: \'Written using ESP-IDF')
if __name__ == '__main__':
test_examples_storage_ext_flash_fatfs()
|
11596208
|
from avatar2.targets import QemuTarget
from avatar2.targets import TargetStates
from avatar2.targets import action_valid_decorator_factory
class PandaTarget(QemuTarget):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
executable = kwargs.get('executable')
self.executable = (executable if executable is not None
else self._arch.get_panda_executable())
# self.protocols.monitor = self.protocols.execution
@action_valid_decorator_factory(TargetStates.STOPPED, 'monitor')
def begin_record(self, record_name):
"""
Starts recording the execution in PANDA
:param record_name: The name of the record file
"""
filename = "%s/%s" % (self.avatar.output_directory, record_name)
return self.protocols.monitor.execute_command('begin_record',
{'file_name': filename})
# self.protocols.monitor._sync_request('monitor begin_record "%s"'
# % filename, 'done')
@action_valid_decorator_factory(TargetStates.STOPPED, 'monitor')
def end_record(self):
"""
Stops recording the execution in PANDA
"""
return self.protocols.monitor.execute_command('end_record')
# self.protocols.monitor._sync_request('monitor end_record', 'done')
@action_valid_decorator_factory(TargetStates.STOPPED, 'monitor')
def begin_replay(self, replay_name, cont=True):
"""
Starts replaying a captured replay
:param replay_name: The name of the file to be replayed
:param cont: Whether execution shall automatically be resumed (default True)
"""
self.protocols.monitor.execute_command('begin_replay',
{'file_name': replay_name})
if cont is True:
self.cont()
@action_valid_decorator_factory(TargetStates.STOPPED, 'monitor')
def end_replay(self):
"""
Stops a current ongoing replay
"""
return self.protocols.monitor.execute_command('end_replay')
@action_valid_decorator_factory(TargetStates.STOPPED, 'monitor')
def load_plugin(self, plugin_name, plugin_args=None, file_name=None):
"""
Loads a PANDA plugin
:param plugin_name: The name of the plugin to be loaded
:param plugin_args: Arguments to be passed to the plugin,
aseperated by commas
:param file_name: Absolute path to the plugin shared object file,
in case that the default one should not be used
"""
args_dict = {'plugin_name': plugin_name}
if plugin_args:
args_dict['plugin_args'] = plugin_args
if file_name:
args_dict['file_name'] = file_name
return self.protocols.monitor.execute_command('load_plugin', args_dict)
@action_valid_decorator_factory(TargetStates.STOPPED, 'monitor')
def unload_plugin(self, plugin_name):
"""
Unloads a PANDA plugin
:param plugin_name: The name of the plugin to be unloaded
:return: True if the requested plugin was present
"""
full_plugin_name = 'panda_%s.so' % plugin_name
for plugin_dict in self.list_plugins():
if plugin_dict['name'] == full_plugin_name:
self.protocols.monitor.execute_command('unload_plugin',
{'index': plugin_dict['index']})
return True
return False
@action_valid_decorator_factory(TargetStates.STOPPED, 'monitor')
def list_plugins(self):
"""
Lists the laoded PANDA plugins
:return: a list with the loaded panda_plugins
"""
return self.protocols.monitor.execute_command('list_plugins')
|
11596220
|
import math
from easydict import EasyDict as edict
import torch
import torch.nn as nn
from common.utility.image_processing_cv import flip
from common_pytorch.common_loss.heatmap_label import generate_gaussian_heatmap_label
from common_pytorch.common_loss.weighted_mse import weighted_mse_loss, weighted_l1_loss, weighted_ae_loss
# config
def get_default_loss_config():
config = edict()
# gaussian
config.heatmap_type = 'gaussian'
config.loss_type = 'L2'
config.sigma = 2
config.feat_stride = 4
# ae
config.useAE = True
config.ae_weight = 1.0
config.ae_expect_dist = 12.0
config.ae_feat_dim = 1
# centerNet
config.useCenterNet = False
return config
# config
# define loss
def _assert_no_grad(tensor):
assert not tensor.requires_grad, \
"nn criterions don't compute the gradient w.r.t. targets - please " \
"mark these tensors as not requiring gradients"
def heatmap_loss(loss_type, size_average):
if loss_type == 'L1':
return weighted_l1_loss
elif loss_type == 'L2':
return weighted_mse_loss
elif loss_type == 'MSE':
return nn.MSELoss(size_average=size_average)
else:
assert 0, 'Wrong loss type, current %s' % loss_type
class GaussianAEHeatmapLoss(nn.Module):
def __init__(self, loss_config, size_average=True):
super(GaussianAEHeatmapLoss, self).__init__()
self.size_average = size_average
self.loss_type = loss_config.loss_type.upper()
self.ae_loss_weight = loss_config.ae_weight
self.ae_expect_dist = loss_config.ae_expect_dist
self.ae_feat_dim = loss_config.ae_feat_dim
self.useAE = loss_config.useAE
self.hm_loss = heatmap_loss(self.loss_type, size_average)
def forward(self, heatmaps, tagmaps, gt_heatmaps, gt_loc):
keypoints = gt_loc.type(dtype=torch.int64)
_assert_no_grad(gt_heatmaps)
_assert_no_grad(keypoints)
batchsize, num_points, _, _ = gt_heatmaps.shape
gt_heatmaps = gt_heatmaps.reshape(batchsize, num_points, -1)
heatmaps = heatmaps.reshape(batchsize, num_points, -1)
hm_loss = self.hm_loss(heatmaps, gt_heatmaps)
if self.useAE and not math.isclose(self.ae_loss_weight, 0):
tagmaps = tagmaps.reshape(batchsize, -1)
ae_loss = weighted_ae_loss(tagmaps, keypoints, self.ae_expect_dist, self.ae_feat_dim)
return hm_loss + self.ae_loss_weight * ae_loss
return hm_loss
# define label
def generate_heatmap_label(config, patch_width, patch_height, window):
type = config.heatmap_type
sigma = config.sigma
feat_stride = config.feat_stride
if 'gaussian' in type:
return generate_gaussian_heatmap_label(feat_stride, patch_width, patch_height, window, sigma)
else:
assert 0, 'Unknown heatmap type {0}'.format(type)
# define flip merge
def merge_hm_flip_func(orgin, pFliped, flip_pair):
output_flip = flip(pFliped, dims=3)
for pair in flip_pair:
tmp = torch.zeros(output_flip[:, pair[0], :, :].shape)
tmp.copy_(output_flip[:, pair[0], :, :])
output_flip[:, pair[0], :, :].copy_(output_flip[:, pair[1], :, :])
output_flip[:, pair[1], :, :].copy_(tmp)
return (orgin + output_flip) * 0.5
def merge_tag_flip_func(orgin, pFliped, flip_pair):
output_flip = flip(pFliped, dims=3)
#todo: flip-test for multi-ae-feat-dim
for pair in flip_pair:
tmp = torch.zeros(output_flip[:, pair[0]].shape)
tmp.copy_(output_flip[:, pair[0]])
output_flip[:, pair[0]].copy_(output_flip[:, pair[1]])
output_flip[:, pair[1]].copy_(tmp)
return torch.cat((orgin, pFliped), dim=1)
# API
def get_loss_func(loss_config):
return GaussianAEHeatmapLoss(loss_config)
def get_label_func(loss_config):
return generate_heatmap_label
def get_merge_func(loss_config):
return merge_hm_flip_func, merge_tag_flip_func
# API
|
11596250
|
import json
import random
import numpy as np
from copy import deepcopy
class dataloader():
def __init__(self, data_file, wordvec_file, max_len=120):
self.max_len = max_len
# Load and Pre-process word vec
print('Wordvec Loading!-----')
with open(wordvec_file,'r') as r:
ori_word_vec = json.load(r)
print('Wordvec Loaded!-----')
print('Wordvec Preprocessing!-----')
self.word2id = {}
self.word_vec_tot = len(ori_word_vec)
self.UNK = self.word_vec_tot
self.BLANK = self.word_vec_tot + 1
self.word_emb_dim = len(ori_word_vec[0]['vec'])
print("Got {} words of {} dims".format(self.word_vec_tot, self.word_emb_dim))
print("Building word_vec_mat and mapping...")
self.word_vec_mat = np.zeros((self.word_vec_tot, self.word_emb_dim), dtype=np.float32)
for cur_id, word in enumerate(ori_word_vec):
w = word['word'].lower()
self.word2id[w] = cur_id
self.word_vec_mat[cur_id, :] = word['vec']
self.word2id['UNK'] = self.UNK
self.word2id['BLANK'] = self.BLANK
print("Wordvec Preprocessed!-----")
# Load and preprocess data
print('Data Loading!-----')
with open(data_file,'r') as r:
data = json.load(r)
ori_data_len = len(data)
print('Original data has {} instances'.format(ori_data_len))
print('Data Loaded!-----')
print('Data Preprocessing!-----')
print('Delete long instances and NA, Other instances...')
unk_word_count = 0
pop_list = []
for i in range(len(data)):
# delete long instances and NA\Other instances
if (len(data[i]['sentence'])>max_len or data[i]['relation']=='NA' or data[i]['relation']=='Other'):
pop_list.append(i)
for i in range(len(data)):
if len(data)-i-1 in pop_list:
data.pop(len(data)-i-1)
for i in range(len(data)):
# delete None elements
data[i]['sentence'] = list(filter(None, data[i]['sentence']))
#ori_sentence = deepcopy(data[i]['sentence'])
# sentence-as-str to sentence-as-index
for j in range(len(data[i]['sentence'])):
data[i]['sentence'][j] = self.word2id.get(data[i]['sentence'][j].lower(),self.UNK)
if data[i]['sentence'][j] == self.UNK:
unk_word_count += 1
# print('unknown found! The sentence is:')
# print(ori_sentence)
# print('The unknown word is:')
# print(ori_sentence[j])
print('Data deletion completed, {} instances deleted, {} instances left'.
format(ori_data_len-len(data),len(data)))
print('Data_to_index completed, {} unknown words found'.
format(unk_word_count))
print('Data Preprocessed!-----')
print('Processed Data List Creating!----')
self.processed_data = []
for _,item in enumerate(data):
temp_item = {}
temp_item['sentence'] = item['sentence']
temp_item['e1_begin'] = item['head']['e1_begin']
temp_item['e1_end'] = item['head']['e1_end']
temp_item['e2_begin'] = item['tail']['e2_begin']
temp_item['e2_end'] = item['tail']['e2_end']
temp_item['relid'] = item['relid']
self.processed_data.append(temp_item)
print('Processed Data List Created!----')
self.create_lists_and_dicts()
def create_lists_and_dicts(self):
# creating some lists and dicts to be used in batching functions
print('lists_for_single_rel_mention Creating!-----')
self.lists_for_single_rel_mention = {}
self.rel_list = []
for i,item in enumerate(self.processed_data):
try:
self.lists_for_single_rel_mention[item['relid']].append(i)
except:
self.lists_for_single_rel_mention[item['relid']] = [i]
self.rel_list.append(item['relid'])
useless_single_rel_mention_list = []
for relid in self.lists_for_single_rel_mention:
if len(self.lists_for_single_rel_mention[relid])<2:
useless_single_rel_mention_list.append(relid)
for relid in useless_single_rel_mention_list:
self.lists_for_single_rel_mention.pop(relid)
print('dict_for_relids Creating!-----')
self.relid_dict = {} # remap relid to 0-? as labels for softmax loss
count = 0
for item in self.processed_data:
if item['relid'] not in self.relid_dict.keys():
self.relid_dict[item['relid']] = count
count += 1
def select_relation(self,select_relid_list):
new_processed_data = []
for item in self.processed_data:
if item['relid'] in select_relid_list:
new_processed_data.append(item)
self.processed_data = new_processed_data
self.create_lists_and_dicts()
def select_sample_num(self,sample_num):
self.processed_data = random.sample(self.processed_data,sample_num)
print('---The left sample number is:',len(self.processed_data))
self.create_lists_and_dicts()
# data_processing functions
def posnum_to_posarray(self, posbegin, posend, max_len=120):
if (posend < posbegin):
posend = posbegin
array1 = np.arange(0,posbegin) - posbegin
array2 = np.zeros(posend-posbegin,dtype=np.int32)
array3 = np.arange(posend,max_len) - posend
posarray = np.append(np.append(array1, array2), array3) + max_len
return posarray
def data_to_padded_idx_data(self, data, max_len=120):
'''padded_idx_data as [pos1array, pos2array, sentence]'''
padded_idx_data = np.zeros([3, max_len], dtype = np.int32)
padded_idx_data[0] = self.posnum_to_posarray(data['e1_begin'],data['e1_end'])
padded_idx_data[1] = self.posnum_to_posarray(data['e2_begin'],data['e2_end'])
padnum = max_len - len(data['sentence'])
padded_idx_data[2] = np.append(np.array(data['sentence']),np.array([self.BLANK]*padnum))
return padded_idx_data
# batching functions
def next_batch_same(self, batch_size): # return a list
batch_data_same_left = []
batch_data_same_right = []
for i in range(batch_size):
next_rel_index = random.choice(list(self.lists_for_single_rel_mention.keys()))
temp_index = random.sample(self.lists_for_single_rel_mention[next_rel_index],2)
batch_data_same_left.append(self.data_to_padded_idx_data(self.processed_data[temp_index[0]]))
batch_data_same_right.append(self.data_to_padded_idx_data(self.processed_data[temp_index[1]]))
return batch_data_same_left, batch_data_same_right
def next_batch_rand(self, batch_size, active_selector = None, select_num = 1): # return a list
batch_data_rand_left = []
batch_data_rand_right = []
idx_list = np.arange(len(self.processed_data))
rnd_list = np.random.choice(idx_list,2*batch_size*select_num)
for i in range(batch_size*select_num):
temp_index = rnd_list[2*i:2*i+2]
while 1:
if(self.processed_data[temp_index[0]]['relid'] != self.processed_data[temp_index[1]]['relid']):
batch_data_rand_left.append(self.data_to_padded_idx_data(self.processed_data[temp_index[0]]))
batch_data_rand_right.append(self.data_to_padded_idx_data(self.processed_data[temp_index[1]]))
break
else:
temp_index = np.random.choice(idx_list,2,replace=False)
if active_selector is not None:
selected_index = active_selector(batch_data_rand_left, batch_data_rand_right).argsort()[:batch_size]
batch_data_rand_left_selected = []
batch_data_rand_right_selected = []
for temp_index in selected_index:
batch_data_rand_left_selected.append(batch_data_rand_left[int(temp_index)])
batch_data_rand_right_selected.append(batch_data_rand_right[int(temp_index)])
batch_data_rand_left = batch_data_rand_left_selected
batch_data_rand_right = batch_data_rand_right_selected
return batch_data_rand_left, batch_data_rand_right
def next_batch(self, batch_size, same_ratio = 0.5, active_selector = None, select_num = 1):
same_batch_size = int(np.round(batch_size*same_ratio))
batch_data_same_left, batch_data_same_right = self.next_batch_same(same_batch_size)
batch_data_rand_left, batch_data_rand_right = self.next_batch_rand(
batch_size - same_batch_size, active_selector = active_selector, select_num = select_num)
batch_data_left = batch_data_same_left + batch_data_rand_left
batch_data_right = batch_data_same_right + batch_data_rand_right
batch_data_label = [[0]]*len(batch_data_same_left)+[[1]]*len(batch_data_rand_left)
return np.array(batch_data_left), np.array(batch_data_right), np.array(batch_data_label)
def next_batch_same_self(self,batch_size):
batch_data_same_left = []
batch_data_same_right = []
for i in range(batch_size):
temp_index = random.randint(0,len(self.processed_data)-1)
batch_data_same_left.append(self.data_to_padded_idx_data(self.processed_data[temp_index]))
batch_data_same_right.append(self.data_to_padded_idx_data(self.processed_data[temp_index]))
return batch_data_same_left, batch_data_same_right
def next_batch_rand_self(self,batch_size):
batch_data_rand_left = []
batch_data_rand_right = []
for i in range(batch_size):
temp_index = random.sample(np.arange(len(self.processed_data)).tolist(),2)
batch_data_rand_left.append(self.data_to_padded_idx_data(self.processed_data[temp_index[0]]))
batch_data_rand_right.append(self.data_to_padded_idx_data(self.processed_data[temp_index[1]]))
return batch_data_rand_left, batch_data_rand_right
def next_batch_self(self, batch_size, same_ratio = 0.5):
same_batch_size = int(np.round(batch_size*same_ratio))
batch_data_same_left, batch_data_same_right = self.next_batch_same_self(same_batch_size)
batch_data_rand_left, batch_data_rand_right = self.next_batch_rand_self(batch_size - same_batch_size)
batch_data_left = batch_data_same_left + batch_data_rand_left
batch_data_right = batch_data_same_right + batch_data_rand_right
batch_data_label = [[0]]*len(batch_data_same_left)+[[1]]*len(batch_data_rand_left)
return np.array(batch_data_left), np.array(batch_data_right), np.array(batch_data_label)
def next_batch_ul(self, batch_size):
batch_data_left = []
batch_data_right = []
for _ in range(batch_size):
temp_index = random.sample(np.arange(len(self.processed_data)).tolist(),2)
batch_data_left.append(self.data_to_padded_idx_data(self.processed_data[temp_index[0]]))
batch_data_right.append(self.data_to_padded_idx_data(self.processed_data[temp_index[1]]))
return np.array(batch_data_left,dtype=np.int32), np.array(batch_data_right,dtype=np.int32)
def next_batch_cnn(self, batch_size):
batch_size = min(len(self.processed_data),batch_size)
batch_data = []
label = []
index_list = random.sample(np.arange(len(self.processed_data)).tolist(),batch_size)
for idx in index_list:
batch_data.append(self.data_to_padded_idx_data(self.processed_data[idx]))
label.append([self.relid_dict[self.processed_data[idx]['relid']]])
return np.array(batch_data,dtype=np.int32), np.array(label,dtype=np.int32)
def _data_and_relid_to_cluster_(self, num_of_data = 2000, num_of_type = 20, balanced = True):
data_relid = []
data_to_cluster = []
if num_of_type > len(self.rel_list):
temp_type_list = self.rel_list
else:
temp_type_list = random.sample(self.rel_list,num_of_type)
for relid in temp_type_list:
if balanced == True:
temp_sample_num = int(num_of_data/num_of_type)
else:
temp_sample_num = int((0.5+random.random())*num_of_data/num_of_type)
if temp_sample_num > len(self.lists_for_single_rel_mention[relid]):
temp_sample_num = len(self.lists_for_single_rel_mention[relid])
temp_data_index = random.sample(np.arange(len(self.lists_for_single_rel_mention[relid])).tolist()
,temp_sample_num)
for index in temp_data_index:
data_to_cluster.append(self.data_to_padded_idx_data(self.processed_data[self.lists_for_single_rel_mention[relid][index]]))
data_relid += [relid]*temp_sample_num
return data_to_cluster, data_relid
def _data_(self):
data_relid = []
data_to_cluster = []
for item in self.processed_data:
data_to_cluster.append(self.data_to_padded_idx_data(item))
data_relid.append(item['relid'])
return data_to_cluster, data_relid
# data_information functions
def _word_emb_dim_(self):
return self.word_emb_dim
def _word_vec_mat_(self):
return self.word_vec_mat
if __name__=='__main__':
''' test the dataloader'''
dataloader_test = dataloader('../../data/fewrel/testset_test.json', '../../data/wordvec/word_vec.json')
batch_data_left, batch_data_right, batch_data_label = dataloader_test.next_batch(10)
print(batch_data_left[0])
print(batch_data_right[0])
print(batch_data_label)
|
11596254
|
import os as os
import subprocess as _subprocess
from .utils import netcdf2dict, psp_name
# from .input import AbinitInput
class AbiFiles:
"""
Read an 'abinit.files' file and extract the
names of input, output filenames and put them
into dictionary called 'files'.
"""
basedir = "."
filename = "abinit.files"
files = {'in': 'abinit.in',
'out': 'abinit.out',
'tmpin': 'abinit-i',
'tmpout': 'abinit-o',
'tmp': 'abinit',
'psps': []}
def __init__(self, *args, **kwargs):
"""
Creates an abifiles object
Args:
args:
If args[0] is an existing filename, assume that as the
name of the 'abinit.files' and their path is the
basedir, otherwise will consider that as the basedir.
kwargs:
Valid keywords are: basedir,filename,files,in.out,
tmpin,tmpout,tmp and psps
"""
self.inp = None
if len(args) == 1:
if os.path.isfile(args[0]):
(self.basedir, self.filename) = os.path.split(args[0])
if self.basedir == "":
self.basedir = "."
inputfile = open(args[0], "r")
self.files['in'] = inputfile.readline()[:-1]
self.files['out'] = inputfile.readline()[:-1]
self.files['tmpin'] = inputfile.readline()[:-1]
self.files['tmpout'] = inputfile.readline()[:-1]
self.files['tmp'] = inputfile.readline()[:-1]
self.files['psps'] = map(str.strip, inputfile.readlines())
elif os.path.isdir(args[0]):
self.basedir = args[0]
elif not os.path.exists(args[0]):
self.basedir = args[0]
if len(kwargs) > 0:
if 'basedir' in kwargs:
self.basedir = kwargs['basedir']
print(self.filename)
if 'filename' in kwargs:
self.filename = kwargs['filename']
if 'files' in kwargs:
self.files['in'] = kwargs['files'] + ".in"
self.files['out'] = kwargs['files'] + ".out"
self.files['tmpin'] = kwargs['files'] + "-i"
self.files['tmpout'] = kwargs['files'] + "-o"
self.files['tmp'] = kwargs['files']
for x in ['in', 'out', 'tmpin', 'tmpout', 'tmp', 'psps']:
if x in kwargs:
self.files[x] = kwargs[x]
def check(self):
if os.path.exists(self.filename):
print("ABINIT files exists: %s" % self.filename)
else:
print("WARNING: ABINIT files does not exists: %s" % self.filename)
if os.path.exists(self.files['in']):
print("ABINIT input file exists: %s" % self.files['in'])
# abi = AbinitInput(self.files['in'])
# abi.check()
else:
print("WARNING: ABINIT input does not exists: %s" % self.files['in'])
for ifile in self.files['psps']:
if os.path.exists(ifile):
print("PSP file is present: %s" % ifile)
else:
print("WARNING: PSP is not present: %s" % ifile)
def write(self, filename):
"""
Write the file 'filename' with the format of an
usual '.files'
:param filename: (str) Filename to write the 'abinit.files' file
"""
wf = open(filename, 'w')
wf.write(self.__str__())
wf.close()
def create(self):
"""
Create the '.files' file and the directory
if it does not exists
"""
if not os.path.exists(self.basedir):
os.makedirs(self.basedir)
self.write(self.basedir + "/" + self.filename)
# Write the input file
if self.inp is not None:
self.inp.write(self.get_input_filename())
def __str__(self):
"""
String version of the object, shows the same
info as the contents of the .files file
"""
ret = ''
for x in ['in', 'out', 'tmpin', 'tmpout', 'tmp']:
ret = ret + self.files[x] + "\n"
for x in self.files['psps']:
ret = ret + x + "\n"
return ret
def __repr__(self):
"""
Representation of an abifiles object
"""
ret = "basedir: " + self.basedir + '\n' + \
"filename: " + self.filename + '\n' + \
" \-> in " + self.files['in'] + '\n' + \
" \-> out " + self.files['out'] + '\n' + \
" \-> tmpin " + self.files['tmpin'] + '\n' + \
" \-> tmpout " + self.files['tmpout'] + '\n' + \
" \-> tmp " + self.files['tmp'] + '\n'
for x in self.files['psps']:
ret = ret + " \-> psps " + x + '\n'
return ret
def get_input_filename(self):
"""
Return the input filename
"""
return self.basedir + "/" + self.files['in']
def get_output_filename(self):
"""
Return the output filename
"""
return self.basedir + "/" + self.files['out']
def get_out_filename(self):
"""
Return the OUT.nc filename
"""
return self.basedir + "/" + self.files['tmpout'] + '_OUT.nc'
def clean(self):
"""
Remove all the output
"""
if os.path.isdir(self.basedir):
os.remove(self.get_out_filename())
outfile = self.files['out']
outs = [x for x in os.listdir(self.basedir) if x[:len(outfile)] == outfile]
for i in outs:
os.remove(self.basedir + '/' + i)
def cleanall(self):
if os.path.isdir(self.basedir):
outfile = self.files['out']
outs = [x for x in os.listdir(self.basedir)]
for i in outs:
os.remove(self.basedir + '/' + i)
self.create()
def set_input(self, inp):
"""
Associate an inputvars object to the abifiles object
"""
self.inp = inp
def get_output(self):
"""
Return the output as a dictionary
"""
return netcdf2dict(self.get_out_filename())
def execute(self, abinit_binary):
"""
Utility that copy a given script and execute the given
command inside the directory
"""
cwd = os.getcwd()
os.chdir(self.basedir)
abifile = open(self.filename)
logfile = open('abinit.log', 'w')
_subprocess.call([abinit_binary], stdin=abifile, stdout=logfile)
logfile.close()
abifile.close()
os.chdir(cwd)
def set_psps(self, exchange='LDA', kind='FHI'):
"""
Set the pseudopotentials acording to
given exchange and kind
The pair (exchange,kind) could be:
('LDA','FHI')
('LDA','TM')
('GGA','FHI')
"""
if self.inp is None:
print('ABINIT input file not declared, the pseudopotentials cannot be set')
else:
self.files['psps'] = []
pspdir = os.getenv('HOME') + '/.abinit/' + exchange + '_' + kind
if isinstance(self.inp.variables['znucl'], (int, float)):
lstznucl = [self.inp.variables['znucl']]
else:
lstznucl = self.inp.variables['znucl']
for i in lstznucl:
self.files['psps'].append(pspdir + '/' + psp_name(i, exchange, kind))
|
11596256
|
from django.core.management.base import NoArgsCommand
from time import sleep, time
from django_bitcoin.utils import bitcoind
from django_bitcoin.models import BitcoinAddress
from django_bitcoin.models import Wallet
from django.conf import settings
from decimal import Decimal
class Command(NoArgsCommand):
help = """fix balances
"""
def handle_noargs(self, **options):
print "starting..."
for w in Wallet.objects.all():
w.last_balance = w.total_balance()
w.save()
|
11596267
|
import os
import torch
import torch.nn as nn
import numpy as np
from tqdm import tqdm
class sequential_ablation(object):
def __init__(self, G : nn.Module, device, args):
self.G = G
self.device = device
G.to(self.device)
self.model = args.model
self.sample_size = args.sample_size
# random sampling latent z
self.latents = torch.randn((self.sample_size, 512))
self.batch_size = args.batch_size
self.layers = [0, 1, 3, 5] if self.model == 'stylegan2' else [1, 3, 5, 7]
# if you have saved .npy file, then it will load it. Otherwise, it should be calculated.
if args.freq_path != "":
self.neurons_freq = {f'layer{i:d}' : np.load(os.path.join(args.freq_path,f'rate_layer{i:d}_{args.dataset}_{args.model}.npy')) for i in self.layers}
print("Frequencies of the neurons are loaded !")
else:
print("Calculating frequency of the neurons ...")
self.neurons_freq = self.neurons_statistic()
print("Frequencies of the neurons are calculated !")
self.r_indices, self.r_indices_ = self.index_ratio()
# sequential ablation for given latent codes.
@torch.no_grad()
def seq_abl(self, sample_idx : list, layer_idx : list, rate = '30', under = True):
assert len(sample_idx) <= self.batch_size, "len(sample_idx) should be lower than batch_size"
if under:
r_indice = [index[rate] for index in self.r_indices_.values()]
else:
r_indice = [index[rate] for index in self.r_indices.values()]
act_idx = []
sep = []
def hook_function(index):
def fn(_, __, o):
nonlocal act_idx
o = o.view(o.size(0), -1) if self.model == 'pggan' else o[0].view(o[0].size(0), -1)
# for mask
mask = o[:,index].detach().cpu() > 0
idx_expand = torch.tensor(index).expand(len(sample_idx),-1)
act_idx.append(idx_expand[mask])
sep.append(mask.numpy().sum(axis = 1).cumsum())
# ablation
o[:,index] = torch.where(o[:, index] > 0, torch.tensor(0.).to(self.device), o[:, index])
return fn
hook = [getattr(self.G if self.model == 'pggan' else self.G.synthesis, 'layer' + str(layer)).register_forward_hook(hook_function(index)) \
for index, layer in zip(r_indice, layer_idx)]
repaired_image = self.G(self.latents[sample_idx].to(self.device))['image'].detach().cpu()
for h in hook:
h.remove()
original_image = self.G(self.latents[sample_idx].to(self.device))['image'].detach().cpu()
# for masking what neurons are turned off.
mask_idx = []
for sample in range(len(sample_idx)):
temp = []
for layer in range(len(layer_idx)):
if sample == 0:
temp.append(act_idx[layer][0 : sep[layer][sample]])
else:
temp.append(act_idx[layer][sep[layer][sample - 1] : sep[layer][sample]])
mask_idx.append(temp)
return original_image, repaired_image, mask_idx
# you can detect artifact images and normal images in generated N (sample_num) images.
@torch.no_grad()
def arti_detection(self, layer_idx : list, sample_num = 30000, topn = 30, rate = '30'):
# we use new samples differ from calculating statistics.
assert sample_num % self.batch_size == 0, 'sample_sum should be devided by batch_size.'
z = torch.randn((sample_num, 512))
total_list = []
for i in tqdm(range(sample_num // self.batch_size)):
r_indice = [index[rate] for index in self.r_indices_.values()]
temp = torch.zeros(self.batch_size) * 1.
def hook_function(index):
def fn(_, __, o):
nonlocal temp
if self.model == 'stylegan2':
mask = o[0].detach().cpu().view(self.batch_size, -1)[:, index] > 0
elif self.model == 'pggan':
mask = o.detach().cpu().view(self.batch_size, -1)[:, index] > 0
temp += mask.sum(dim = -1)
return fn
hook = [getattr(self.G if self.model == 'pggan' else self.G.synthesis, 'layer' + str(layer)).register_forward_hook(hook_function(index)) \
for index, layer in zip(r_indice, layer_idx)]
images = self.G(z[i * self.batch_size : (i+1) * self.batch_size].to(self.device))
for h in hook:
h.remove()
total_list.append(temp)
total_list = torch.cat(total_list, dim = 0)
sorted_index = total_list.numpy().argsort()
normal = sorted_index[:topn]
artifact = sorted_index[-topn:]
# for plotting images
normal_img = self.generate_images(z[normal])
artifact_img = self.generate_images(z[artifact])
return normal_img, artifact_img
# Calculating the probability of each neuron empitically, since we don't know distribution of neuron frequency for the generator.
@torch.no_grad()
def neurons_statistic(self,):
ratio = {f'layer{i:d}' : None for i in self.layers}
def function(layer_num):
def fn(_ ,__, o):
key = 'layer' + str(layer_num)
if ratio[key] == None:
if self.model == 'stylegan2':
ratio[key] = (o[0].detach().cpu().view(self.batch_size, -1) > 0) * 1.0
elif self.model == 'pggan':
ratio[key] = (o.detach().cpu().view(self.batch_size, -1) > 0) * 1.0
else:
if self.model == 'stylegan2':
ratio[key] += (o[0].detach().cpu().view(self.batch_size, -1) > 0) * 1.0
elif self.model == 'pggan':
ratio[key] += (o.detach().cpu().view(self.batch_size, -1) > 0) * 1.0
return fn
hook = [getattr(self.G if self.model == 'pggan' else self.G.synthesis, 'layer' + str(i)).register_forward_hook(function(i)) \
for i in self.layers]
for j in tqdm(range(self.sample_size // self.batch_size)):
temp = self.G(self.latents[j * self.batch_size : (j + 1) * self.batch_size].to(self.device))
for h in hook:
h.remove()
ratio = {key : val.sum(axis = 0) / (1. * self.sample_size) for key, val in ratio.items()}
return ratio
# neurons of indices with activation rate R in each layer.
def index_ratio(self,):
r_index = {} # neurons with activation upper rate R
r_index_ = {} # neurons with activation under rate R
for k, v in tqdm(self.neurons_freq.items()):
v = v.flatten()
temp_r_index = {}
temp_r_index_ = {}
for i in range(10,0,-1):
temp_r_index[str(i * 10)] = np.where(v >= i * 1. / 10)[0]
temp_r_index_[str(i * 10)] = np.intersect1d(np.where(v <= i * 1. / 10)[0], np.where(v > 0)[0])
r_index[k] = (temp_r_index)
r_index_[k] = (temp_r_index_)
return r_index, r_index_
@torch.no_grad()
def generate_images(self, latents : torch.tensor):
images = []
for i in range(latents.size(0) // self.batch_size):
images.append(self.G(latents[i * self.batch_size : (i + 1) * self.batch_size].to(self.device))['image'].permute(0,2,3,1).detach().cpu())
images = torch.cat(images, dim = 0)
return images
|
11596288
|
import alpenglow.Getter as rs
import alpenglow as prs
class AsymmetricFactorExperiment(prs.OnlineExperiment):
"""AsymmetricFactorExperiment(dimension=10,begin_min=-0.01,begin_max=0.01,learning_rate=0.05,regularization_rate=0.0,negative_rate=20,cumulative_item_updates=True,norm_type="exponential",gamma=0.8)
Implements the recommendation model introduced in [Koren2008]_.
.. [Paterek2007] <NAME>. „Improving regularized singular value decomposition for collaborative filtering”. In: Proc. KDD Cup Workshop at SIGKDD’07, 13th ACM Int. Conf. on Knowledge Discovery and Data Mining. San Jose, CA, USA, 2007, pp. 39–42.
.. [Koren2008] <NAME>. "Factorization meets the neighborhood: a multifaceted collaborative filtering model." Proceedings of the 14th ACM SIGKDD international conference on Knowledge discovery and data mining. ACM, 2008.
Parameters
----------
dimension : int
The latent factor dimension of the factormodel.
begin_min : double
The factors are initialized randomly, sampling each element uniformly from the interval (begin_min, begin_max).
begin_max : double
See begin_min.
learning_rate : double
The learning rate used in the stochastic gradient descent updates.
regularization_rate : double
The coefficient for the L2 regularization term.
negative_rate : int
The number of negative samples generated after each update. Useful for implicit recommendation.
norm_type : str
Type of time decay; either "constant", "exponential" or "disabled".
gamma : double
Coefficient of time decay in the case of **norm_type** == "exponential".
"""
def _config(self, top_k, seed):
model = rs.AsymmetricFactorModel(**self.parameter_defaults(
begin_min=-0.01,
begin_max=0.01,
dimension=10,
use_sigmoid=False,
norm_type="exponential",
gamma=0.8,
initialize_all=False
))
gradient_updater = rs.AsymmetricFactorModelGradientUpdater(**self.parameter_defaults(
learning_rate=0.05,
cumulative_item_updates=False,
))
gradient_updater.set_model(model)
simple_updater = rs.AsymmetricFactorModelUpdater()
simple_updater.set_model(model)
point_wise = rs.ObjectiveMSE()
gradient_computer = rs.GradientComputerPointWise()
gradient_computer.set_objective(point_wise)
gradient_computer.set_model(model)
gradient_computer.add_gradient_updater(gradient_updater)
negative_sample_generator = rs.UniformNegativeSampleGenerator(**self.parameter_defaults(
negative_rate=20,
initialize_all=False,
seed=928357823,
))
negative_sample_generator.add_updater(gradient_computer)
return (model, [negative_sample_generator, simple_updater], [])
|
11596334
|
import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
from sklearn.feature_extraction.text import TfidfTransformer
import pickle
import chardet
max_features = 50000
webshell_dir = "Data/WebShell/asp/"
normal_dir = "Data/normal/asp/"
white_count = 0
black_count = 0
def check_style(filepath):
with open(filepath, mode='rb') as f:
data = f.read()
style = chardet.detect(data)['encoding']
return style
def load_str(filepath):
t = ""
try:
style = check_style(filepath)
with open(filepath, encoding=style) as f:
for line in f:
line = line.strip('\r')
line = line.strip('\n')
t += line
except UnicodeDecodeError:
with open(filepath, mode='rb') as f:
t = f.read()
return t
def load_files(dir):
files_list = []
g = os.walk(dir)
for path, d, filelist in g:
for filename in filelist:
if filename.endswith('.asp'):
fulpath = os.path.join(path, filename)
print ("Load %s" % fulpath)
t = load_str(fulpath)
files_list.append(t)
return files_list
def get_feature_by_wordbag_tfidf():
global max_features
global white_count
global black_count
print ("max_features = %d" % max_features)
webshell_files_list = load_files(webshell_dir)
y1 = [1] * len(webshell_files_list)
black_count = len(webshell_files_list)
normal_files_list = load_files(normal_dir)
y2 = [0] * len(normal_files_list)
white_count = len(normal_files_list)
x = webshell_files_list + normal_files_list
y = y1 + y2
CV = CountVectorizer(ngram_range = (2, 2), decode_error = 'ignore', max_features = max_features, token_pattern = r'\b\w+\b', min_df = 1, max_df = 1.0)
x = CV.fit_transform(x).toarray()
vocabulary = CV.vocabulary_
with open('vocabulary_asp.pickle', 'wb') as f:
pickle.dump(vocabulary, f)
transformer = TfidfTransformer(smooth_idf = False)
x_tfidf = transformer.fit_transform(x)
x = x_tfidf.toarray()
return x, y
def do_metrics(y_test, y_pred):
print ("metrics.accuracy_score:")
print (metrics.accuracy_score(y_test, y_pred))
print ("metrics.confusion_matrix:")
print (metrics.confusion_matrix(y_test, y_pred))
print ("metrics.precision_score:")
print (metrics.precision_score(y_test, y_pred))
print ("metrics.recall_score:")
print (metrics.recall_score(y_test, y_pred))
def do_GNB(x, y):
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=0)
clf = GaussianNB()
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
with open('GNB_asp.pickle', 'wb') as f:
pickle.dump(clf, f)
do_metrics(y_test, y_pred)
if __name__ == '__main__':
x, y = get_feature_by_wordbag_tfidf()
print ("Load %d white files %d black files" % (white_count, black_count))
do_GNB(x, y)
|
11596360
|
class Solution:
def isOneEditDistance(self, s: str, t: str) -> bool:
sLen, tLen = len(s), len(t)
if sLen > tLen:
return self.isOneEditDistance(t, s)
if tLen - sLen > 1:
return False
for i in range(sLen):
if s[i] != t[i]:
if sLen == tLen:
return s[i + 1:] == t[i + 1:]
else:
return s[i:] == t[i + 1:]
return sLen + 1 == tLen
|
11596398
|
import json
from django.core.management.base import BaseCommand
from uniauth_saml2_idp.utils import get_idp_config
from uniauth_saml2_idp.models import MetadataStore
class Command(BaseCommand):
help = 'Metadata Query protocol'
def add_arguments(self, parser):
parser.epilog = 'Example: ./manage.py mdquery -e https://auth.unical.it/idp/metadata/ -f json'
parser.add_argument('-e', required=True,
help="Entity to request metadata for")
parser.add_argument(
'-f', default=['json', 'saml2'], help='output format')
parser.add_argument('-debug', required=False, action="store_true",
help="see debug message")
def handle(self, *args, **options):
idp = get_idp_config()
for md in MetadataStore.objects.filter(is_active=1,
is_valid=1):
if md.type in ('file', 'local'):
idp.metadata.load(md.type, md.url)
else:
idp.metadata.load(md.type,
url=md.url, ca_cert=md.file,
**json.loads(md.kwargs))
res = idp.metadata[options['e']]
if options['f'] == 'json':
print(json.dumps(res, indent=2))
else:
print(idp.metadata.dumps())
|
11596485
|
import random
import numpy as np
import torch
from torch.utils.data import Dataset
class DummyData(Dataset):
def __init__(
self,
max_val: int,
sample_count: int,
sample_length: int,
sparsity_percentage: int
):
r"""
A data class that generates random data.
Args:
max_val (int): the maximum value for an element
sample_count (int): count of training samples
sample_length (int): number of elements in a sample
sparsity_percentage (int): the percentage of
embeddings used by the input data in each iteration
"""
self.max_val = max_val
self.input_samples = sample_count
self.input_dim = sample_length
self.sparsity_percentage = sparsity_percentage
def generate_input():
precentage_of_elements = (100 - self.sparsity_percentage) / float(100)
index_count = int(self.max_val * precentage_of_elements)
elements = list(range(self.max_val))
random.shuffle(elements)
elements = elements[:index_count]
data = [
[
elements[random.randint(0, index_count - 1)]
for _ in range(self.input_dim)
]
for _ in range(self.input_samples)
]
return torch.from_numpy(np.array(data))
self.input = generate_input()
self.target = torch.randint(0, max_val, [sample_count])
def __len__(self):
return len(self.input)
def __getitem__(self, index):
return self.input[index], self.target[index]
|
11596492
|
from __future__ import division
import datetime
import os
import numpy as np
from scipy import linalg
import matplotlib
if os.environ.get('DISPLAY') is None:
matplotlib.use('Agg')
else:
matplotlib.use('Qt5Agg')
from matplotlib import rcParams
import matplotlib.pyplot as plt
# import bokeh.plotting as b_plt
# from bokeh.io import vplot, hplot, output_file, show
# from bokeh.models.tools import WheelZoomTool
from alg_tools_1d import dirac_recon_time, periodicSinc, distance
# for latex rendering
os.environ['PATH'] = os.environ['PATH'] + ':/usr/texbin' + \
':/opt/local/bin' + ':/Library/TeX/texbin/'
rcParams['text.usetex'] = True
rcParams['text.latex.unicode'] = True
if __name__ == '__main__':
# various experiment settings
save_fig = True # save figure or not
fig_format = r'png' # file type used to save the figure, e.g., pdf, png, etc.
stop_cri = 'max_iter' # stopping criteria: 1) mse; or 2) max_iter
web_fig = False # generate html file for the figures
K = 5 # number of Diracs
M = K * 8 # number of Fourier samples (at least K)
tau = 1 # period of the Dirac stream
# number of time domain samples
L = (2 * M + 1)
Tmax = tau / L # the average sampling step size (had we used a uniform sampling setup)
# generate the random sampling time instances
t_samp = np.arange(0, L, dtype=float) * Tmax
t_samp += np.sign(np.random.randn(L)) * np.random.rand(L) * Tmax / 2.
# round t_samp to [0, tau)
t_samp -= np.floor(t_samp / tau) * tau
# generate parameters for the periodic stream of Diracs
B = (2. * M + 1.) / tau # bandwidth of the sampling filter
'''
# generate random values for Dirac amplitudes and locations
# amplitudes of the Diracs
ak = np.sign(np.random.randn(K)) * (1 + (np.random.rand(K) - 0.5) / 1.)
# locations of the Diracs
if K == 1:
tk = np.random.rand()
else:
a = 4. / L
uk = np.random.exponential(scale=1. / K, size=(K - 1, 1))
tk = np.cumsum(a + (1. - K * a) * (1 - 0.1 * np.random.rand()) / uk.sum() * uk)
tk = np.sort(np.hstack((np.random.rand() * tk[0] / 2., tk)) + (1 - tk[-1]) / 2.) * tau
# save Dirac parameter
time_stamp = datetime.datetime.now().strftime("%-d-%-m_%H_%M")
file_name = './data/Dirac_Data_' + time_stamp + '.npz'
np.savez(file_name, tk=tk, ak=ak, K=K, time_stamp=time_stamp)
'''
# load saved data
time_stamp = '20-12_02_22'
stored_param = np.load('./data/Dirac_Data_' + time_stamp + '.npz')
tk = stored_param['tk']
ak = stored_param['ak']
print('time stamp: ' + time_stamp +
'\n=======================================\n')
# compute the noiseless Fourier series coefficients
tk_grid, m_grid_gt = np.meshgrid(tk, np.arange(-np.floor(B * tau / 2.), 1 + np.floor(B * tau / 2.)))
x_hat_noiseless = 1. / tau * np.dot(np.exp(-2j * np.pi / tau * m_grid_gt * tk_grid), ak)
m_grid, t_samp_grid = np.meshgrid(np.arange(-np.floor(B * tau / 2.), 1 + np.floor(B * tau / 2.)), t_samp)
# build the linear transformation matrix that links x_hat with the samples
G = 1. / B * np.exp(2j * np.pi / tau * m_grid * t_samp_grid)
y_ell_noiseless = np.real(np.dot(G, x_hat_noiseless))
# add noise
P = 5
noise = np.random.randn(L)
noise = noise / linalg.norm(noise) * linalg.norm(y_ell_noiseless) * 10 ** (-P / 20.)
y_ell = y_ell_noiseless + noise
# noise energy, in the noiseless case 1e-10 is considered as 0
noise_level = np.max([1e-10, linalg.norm(noise)])
max_ini = 100 # maximum number of random initialisations
# FRI reconstruction
xhat_recon, min_error, c_opt, ini = dirac_recon_time(G, y_ell, K, noise_level, max_ini, stop_cri)
print(r'Noise level: {0:.2e}'.format(noise_level))
print(r'Minimum approximation error |a - Gb|_2: {0:.2e}'.format(min_error))
# reconstruct Diracs' locations tk
z = np.roots(c_opt)
z = z / np.abs(z)
tk_recon = np.real(tau * 1j / (2 * np.pi) * np.log(z))
tk_recon = np.sort(tk_recon - np.floor(tk_recon / tau) * tau)
# reconstruct amplitudes ak
Phi_recon = periodicSinc(np.pi * B * (np.reshape(t_samp, (-1, 1), order='F') -
np.reshape(tk_recon, (1, -1), order='F')),
B * tau)
ak_recon = np.real(linalg.lstsq(Phi_recon, y_ell)[0])
# location estimation error
t_error = distance(tk_recon, tk)[0]
# plot reconstruction
plt.close()
fig = plt.figure(num=1, figsize=(5.5, 2.5), dpi=90)
# sub-figure 1
ax1 = plt.axes([0.125, 0.59, 0.85, 0.31])
markerline211_1, stemlines211_1, baseline211_1 = \
ax1.stem(tk, ak, label='Original Diracs')
plt.setp(stemlines211_1, linewidth=1.5, color=[0, 0.447, 0.741])
plt.setp(markerline211_1, marker='^', linewidth=1.5, markersize=8,
markerfacecolor=[0, 0.447, 0.741], mec=[0, 0.447, 0.741])
plt.setp(baseline211_1, linewidth=0)
markerline211_2, stemlines211_2, baseline211_2 = \
plt.stem(tk_recon, ak_recon, label='Estimated Diracs')
plt.setp(stemlines211_2, linewidth=1.5, color=[0.850, 0.325, 0.098])
plt.setp(markerline211_2, marker='*', linewidth=1.5, markersize=10,
markerfacecolor=[0.850, 0.325, 0.098], mec=[0.850, 0.325, 0.098])
plt.setp(baseline211_2, linewidth=0)
plt.axhline(0, color='k')
plt.xlim([0, tau])
plt.ylim([1.17 * np.min(np.concatenate((ak, ak_recon, np.array(0)[np.newaxis]))),
1.17 * np.max(np.concatenate((ak, ak_recon, np.array(0)[np.newaxis])))])
# plt.xlabel(r'$t$', fontsize=12)
plt.ylabel('amplitudes', fontsize=12)
ax1.yaxis.set_label_coords(-0.095, 0.5)
plt.legend(numpoints=1, loc=0, fontsize=9, framealpha=0.3,
handletextpad=.2, columnspacing=0.6, labelspacing=0.05, ncol=2)
t_error_pow = np.int(np.floor(np.log10(t_error)))
if np.isinf(P):
plt.title(r'$K={0}$, $L={1}$, '
r'$\mbox{{SNR}}=\mbox{{inf }}$dB, '
r'$t_{{\mbox{{\footnotesize err}}}}={2:.2f}\times10^{other}$'.format(repr(K), repr(L),
t_error / 10 ** t_error_pow,
other='{' + str(
t_error_pow) + '}'),
fontsize=12)
else:
plt.title(r'$K={0}$, $L={1}$, '
r'$\mbox{{SNR}}={2}$dB, '
r'$t_{{\mbox{{\footnotesize err}}}}={3:.2f}\times10^{other}$'.format(repr(K), repr(L), repr(P),
t_error / 10 ** t_error_pow,
other='{' + str(
t_error_pow) + '}'),
fontsize=12)
# sub-figure 2
t_plt = np.linspace(0, tau, num=np.max([10 * L, 1000]))
m_plt_grid, t_plt_grid = np.meshgrid(np.arange(-np.floor(B * tau / 2.),
1 + np.floor(B * tau / 2.)),
t_plt)
G_plt = 1. / B * np.exp(2j * np.pi / tau * m_plt_grid * t_plt_grid)
y_plt = np.real(np.dot(G_plt, x_hat_noiseless)) # for plotting purposes only
ax2 = plt.axes([0.125, 0.18, 0.85, 0.31])
line212_1 = ax2.plot(t_plt, y_plt, label='Ground Truth')
plt.setp(line212_1, linestyle='-', color=[0, 0.447, 0.741], linewidth=1)
line212_2 = ax2.plot(t_samp, y_ell, label='Samples')
plt.setp(line212_2, marker='.', linestyle='None', markersize=5, color=[0.850, 0.325, 0.098])
plt.ylim([1.05 * np.min(np.concatenate((y_plt, y_ell))),
1.05 * np.max(np.concatenate((y_plt, y_ell)))])
plt.ylabel(r'$x(t) * \mathrm{{sinc}}(B t)$', fontsize=12)
plt.xlabel(r'$t$', fontsize=12)
ax2.xaxis.set_label_coords(0.5, -0.21)
ax2.yaxis.set_label_coords(-0.095, 0.5)
plt.legend(numpoints=1, loc=0, fontsize=9, framealpha=0.3,
handletextpad=.2, columnspacing=0.6, labelspacing=0.05, ncol=2)
if save_fig:
file_name = (r'./result/TSP_eg1_K_{0}_L_{1}_noise_{2}dB' +
time_stamp + r'.' + fig_format).format(repr(K), repr(L), repr(P))
plt.savefig(file_name, format=fig_format, dpi=300, transparent=True)
plt.show()
# for web rendering
# if web_fig:
# output_file('./html/eg1.html')
# TOOLS = 'pan,box_zoom,box_select,reset'
# p_hdl1 = b_plt.figure(title='K={0}, L={1}, SNR={2:.1f}dB, error={3:.2e}'.format(repr(K), repr(L), P, t_error),
# tools=TOOLS,
# x_axis_label='time', y_axis_label='amplitudes',
# plot_width=550, plot_height=220,
# x_range=(0, tau),
# y_range=(1.17 * np.min(np.concatenate((ak, ak_recon,
# np.array(0)[np.newaxis]))),
# 1.17 * np.max(np.concatenate((ak, ak_recon,
# np.array(0)[np.newaxis]))))
# )
# p_hdl1.title.text_font_size = '12pt'
# p_hdl1.add_tools(WheelZoomTool(dimensions=["width"]))
# p_hdl1.triangle(x=tk, y=ak,
# color='#0072BD',
# fill_color='#0072BD',
# line_width=1.5, size=8,
# legend='Original Diracs')
# p_hdl1.multi_line(xs=np.vstack((tk, tk)).T.tolist(),
# ys=np.vstack((np.zeros(ak.shape), ak)).T.tolist(),
# color='#0072BD',
# line_width=1.5,
# line_color='#0072BD')
# p_hdl1.diamond(x=tk_recon, y=ak_recon,
# color='#D95319',
# line_width=1.5, size=10,
# legend='Estimated Diracs')
# p_hdl1.multi_line(xs=np.vstack((tk_recon, tk_recon)).T.tolist(),
# ys=np.vstack((np.zeros(ak_recon.shape), ak_recon)).T.tolist(),
# color='#D95319',
# line_width=1.5,
# line_color='#D95319')
# p_hdl1.legend.location = 'bottom_right'
# p_hdl1.legend.border_line_alpha = 0.6
# p_hdl1.xaxis.axis_label_text_font_size = "11pt"
# p_hdl1.yaxis.axis_label_text_font_size = "11pt"
# p_hdl1.legend.legend_spacing = 1
# p_hdl1.legend.legend_padding = 5
# p_hdl1.legend.label_text_font_size = "9pt"
#
# # subplot 2
# TOOLS2 = 'pan,box_zoom,wheel_zoom,box_select,reset'
# p_hdl2 = b_plt.figure(tools=TOOLS2, x_axis_label='time', y_axis_label='lowpssed signal',
# plot_width=550, plot_height=220,
# x_range=p_hdl1.x_range,
# y_range=(1.05 * np.min(np.concatenate((y_plt, y_ell))),
# 1.05 * np.max(np.concatenate((y_plt, y_ell))))
# )
#
# p_hdl2.line(x=t_plt, y=y_plt,
# color='#0072BD',
# line_color='#0072BD',
# line_width=1.5,
# legend='Ground Truth')
# p_hdl2.circle(x=t_samp, y=y_ell,
# color='#D95319',
# fill_color='#D95319',
# line_width=1.5, size=2,
# legend='Samples')
#
# p_hdl2.xaxis.axis_label_text_font_size = "11pt"
# p_hdl2.yaxis.axis_label_text_font_size = "11pt"
# p_hdl2.legend.location = 'bottom_right'
# p_hdl2.legend.border_line_alpha = 0.6
# p_hdl2.legend.legend_spacing = 1
# p_hdl2.legend.legend_padding = 5
# p_hdl2.legend.label_text_font_size = "9pt"
#
# p_hdl = b_plt.gridplot([[p_hdl1], [p_hdl2]], toolbar_location='above')
# show(p_hdl)
|
11596494
|
import keras
import numpy as np
from keras.datasets import mnist
from keras.layers import Dropout, BatchNormalization, LeakyReLU, Dense, Input, Activation
from keras.models import Model
from keras.utils.np_utils import to_categorical
def build_model():
x = Input((28 * 28,), name="x")
hidden_dim = 512
h = x
h = Dense(hidden_dim)(h)
h = BatchNormalization()(h)
h = LeakyReLU(0.2)(h)
h = Dropout(0.5)(h)
h = Dense(hidden_dim / 2)(h)
h = BatchNormalization()(h)
h = LeakyReLU(0.2)(h)
h = Dropout(0.5)(h)
h = Dense(10)(h)
h = Activation('softmax')(h)
m = Model(x, h)
m.compile('adam', 'categorical_crossentropy', metrics=['accuracy'])
return m
def mnist_process(x, y):
return x.astype(np.float32).reshape((x.shape[0], -1)) / 255.0, to_categorical(y, 10)
def mnist_data():
data = mnist.load_data()
return [mnist_process(*d) for d in data]
def mnist_model(verbose=1, callbacks=[]):
m = build_model()
(xtrain, ytrain), (xtest, ytest) = mnist_data()
if int(keras.__version__.split(".")[0]) == 2:
m.fit(xtrain, ytrain, validation_data=(xtest, ytest), epochs=10, batch_size=32, verbose=verbose,
callbacks=callbacks)
else:
m.fit(xtrain, ytrain, validation_data=(xtest, ytest), nb_epoch=10, batch_size=32, verbose=verbose,
callbacks=callbacks)
def mnist_generator(n):
(xtrain, ytrain), (xtest, ytest) = mnist_data()
while True:
idx = np.random.random_integers(0, xtrain.shape[0] - 1, (n,))
yield xtrain[idx, ...], ytrain[idx, ...]
def mnist_model_generator(verbose=1, callbacks=[]):
m = build_model()
(xtrain, ytrain), (xtest, ytest) = mnist_data()
if int(keras.__version__.split(".")[0]) == 2:
m.fit_generator(mnist_generator(32), validation_data=(xtest, ytest), epochs=10, steps_per_epoch=32 * 20,
verbose=verbose,
callbacks=callbacks)
else:
m.fit_generator(mnist_generator(32), validation_data=(xtest, ytest), nb_epoch=10, samples_per_epoch=32 * 20,
verbose=verbose,
callbacks=callbacks)
if __name__ == "__main__":
mnist_model()
|
11596500
|
from Display import Display
from StringDisplayImpl import StringDisplayImpl
from CountDisplay import CountDisplay
if __name__ == '__main__':
d1 = Display(StringDisplayImpl("Hello, China."))
d2 = CountDisplay(StringDisplayImpl("Hello, World."))
d3 = CountDisplay(StringDisplayImpl("Hello, Universe."))
d1.display()
d2.display()
d3.display()
d3.multi_display(5)
|
11596519
|
from django.urls import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from faker import Faker
from hsv_dot_beer.users.test.factories import UserFactory
from venues.test.factories import VenueFactory
from .factories import TapFactory
fake = Faker()
class TestTapDetailTestCase(APITestCase):
def setUp(self):
self.venue = VenueFactory()
self.tap = TapFactory(venue=self.venue)
self.url = reverse("tap-detail", kwargs={"pk": self.tap.pk})
self.user = UserFactory(is_staff=True)
self.client.credentials(HTTP_AUTHORIZATION=f"Token {self.user.auth_token}")
def test_get_request_returns_a_tap(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_patch_rejects_default(self):
other = TapFactory(venue=self.venue)
payload = {"tap_number": other.tap_number}
response = self.client.patch(self.url, payload)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, response.data
)
|
11596526
|
import hyperopt
# from . import register_hpo
from .advisorbase import AdvisorBaseHPOptimizer
from .suggestion.algorithm.chocolate_grid_search import ChocolateGridSearchAlgorithm
# @register_hpo("randadvisor")
class GridAdvisorChoco(AdvisorBaseHPOptimizer):
def __init__(self, args):
super().__init__(args)
self.method = ChocolateGridSearchAlgorithm()
@classmethod
def build_hpo_from_args(cls, args):
"""Build a new hpo instance."""
return cls(args)
|
11596540
|
import pytest
from asynctest import mock
from asyncpraw.exceptions import ClientException
from asyncpraw.models import RemovalReason
from ... import IntegrationTest
class TestRemovalReason(IntegrationTest):
@mock.patch("asyncio.sleep", return_value=None)
async def test__fetch(self, _):
self.reddit.read_only = False
subreddit = await self.reddit.subreddit(pytest.placeholders.test_subreddit)
with self.use_cassette():
reason = await subreddit.mod.removal_reasons.get_reason("159bqhvme3rxe")
assert reason.title.startswith("Be Kind")
@mock.patch("asyncio.sleep", return_value=None)
async def test__fetch_int(self, _):
self.reddit.read_only = False
subreddit = await self.reddit.subreddit(pytest.placeholders.test_subreddit)
with self.use_cassette("TestRemovalReason.test__fetch"):
reason = await subreddit.mod.removal_reasons.get_reason(0)
assert isinstance(reason, RemovalReason)
@mock.patch("asyncio.sleep", return_value=None)
async def test__fetch_slice(self, _):
self.reddit.read_only = False
subreddit = await self.reddit.subreddit(pytest.placeholders.test_subreddit)
with self.use_cassette("TestRemovalReason.test__fetch"):
reasons = await subreddit.mod.removal_reasons.get_reason(slice(-3, None))
assert len(reasons) == 3
for reason in reasons:
assert isinstance(reason, RemovalReason)
@mock.patch("asyncio.sleep", return_value=None)
async def test__fetch__invalid_reason(self, _):
self.reddit.read_only = False
subreddit = await self.reddit.subreddit(pytest.placeholders.test_subreddit)
with self.use_cassette():
with pytest.raises(ClientException) as excinfo:
await subreddit.mod.removal_reasons.get_reason("invalid")
assert str(excinfo.value) == (
f"Subreddit {subreddit} does not have the removal reason invalid"
)
@mock.patch("asyncio.sleep", return_value=None)
async def test_update(self, _):
self.reddit.read_only = False
with self.use_cassette():
subreddit = await self.reddit.subreddit(pytest.placeholders.test_subreddit)
reason = await subreddit.mod.removal_reasons.get_reason("159bqhvme3rxe")
await reason.update(message="New Message", title="New Title")
@mock.patch("asyncio.sleep", return_value=None)
async def test_update_empty(self, _):
self.reddit.read_only = False
with self.use_cassette():
subreddit = await self.reddit.subreddit(pytest.placeholders.test_subreddit)
reasons = [reason async for reason in subreddit.mod.removal_reasons]
reason = reasons[0]
await reason.update()
@mock.patch("asyncio.sleep", return_value=None)
async def test_delete(self, _):
self.reddit.read_only = False
with self.use_cassette():
subreddit = await self.reddit.subreddit(pytest.placeholders.test_subreddit)
reason = await subreddit.mod.removal_reasons.get_reason("157l8fono55wf")
await reason.delete()
class TestSubredditRemovalReasons(IntegrationTest):
@mock.patch("asyncio.sleep", return_value=None)
async def test__aiter(self, _):
self.reddit.read_only = False
with self.use_cassette():
count = 0
subreddit = await self.reddit.subreddit(pytest.placeholders.test_subreddit)
async for reason in subreddit.mod.removal_reasons:
assert isinstance(reason, RemovalReason)
count += 1
assert count > 0
@mock.patch("asyncio.sleep", return_value=None)
async def test_add(self, _):
self.reddit.read_only = False
with self.use_cassette():
subreddit = await self.reddit.subreddit(pytest.placeholders.test_subreddit)
reason = await subreddit.mod.removal_reasons.add("test", "Test")
assert isinstance(reason, RemovalReason)
|
11596541
|
import tensorflow as tf
from model.common import shape_list, dense_bn_relu, dense, dense_relu, dense_tanh, split_heads, combine_last_two_dimensions, prelu
import sys
VAR2STD_EPSILON = 1e-12
def statistics_pooling(features, aux_features, endpoints, params, is_training):
"""Statistics pooling
Note that we need to take care of the zeros in the variance since the sqrt on 0 will lead to NaN.
Args:
features: A tensor with shape [batch, length, dim].
aux_features: Auxiliary input features with shape [batch, length, dim].
endpoints: Outputs of different parts of the network.
params:
is_training:
:return:
Statistics pooling result [mean, stddev] with shape [batch, dim].
"""
with tf.variable_scope("stat_pooling"):
mean = tf.reduce_mean(features, axis=1, keep_dims=True, name="mean")
variance = tf.reduce_mean(tf.squared_difference(features, mean), axis=1, keep_dims=True, name="variance")
mean = tf.squeeze(mean, 1)
variance = tf.squeeze(variance, 1)
mask = tf.to_float(tf.less_equal(variance, VAR2STD_EPSILON))
variance = (1.0 - mask) * variance + mask * VAR2STD_EPSILON
stddev = tf.sqrt(variance)
stat_pooling = tf.concat([mean, stddev], 1, name="concat")
return stat_pooling
def self_attention(features, aux_features, endpoints, params, is_training=None):
"""Self-attention.
In this implementation, `self` is not accurate because the key and value may come from different nodes.
Note that the key should be the same length with the value, i.e. no convnet is applied after the key layer, or
some trimming strategy should be applied before the weighted sum.
Note: We do not use features in this function. The key and value are specified using params
and are extracted from endpoints.
Args:
features: A tensor with shape [batch, length, dim].
aux_features: Auxiliary input features with shape [batch, length, dim].
endpoints: Outputs of different parts of the network. Useful when doing attention.
params: Parameters for self-attention.
params.att_key_input: endpoints[params.att_key_input] is used to compute the key.
params.att_key_num_nodes: #nodes of the network to compute the key.
params.att_key_network_type: The last layer to compute the key.
In the intermediate layers, affine+bn+relu is usually applied
0: affine
1: affine + relu
2: affine + bn + relu
3: affine + tanh
params.att_value_input: endpoints[params.att_value_input] is used as the value of the component.
params.att_value_num_nodes: #nodes of the network to compute the value.
params.att_value_network_type: The layer layer to compute value (if exists).
params.att_apply_nonlinear: The nonlinearity is applied after the attention weighted sum (default: false).
params.att_use_scale: Whether to apply a scaling factor when doing the key*query operation.
params.att_num_heads: The number of heads in multi-head attention.
params.att_split_key: Whether to split the key when multi-head attention is used.
params.att_penalty_term: The coefficient of the penalty term.
is_training: Used in BN.
:return:
Attention result. Also in the statistic format [weighted_mean, weighted_stddev]
"""
relu = tf.nn.relu
if "network_relu_type" in params.dict:
if params.network_relu_type == "prelu":
relu = prelu
if params.network_relu_type == "lrelu":
relu = tf.nn.leaky_relu
with tf.variable_scope("attention"):
value_features = endpoints[params.att_value_input]
key_features = endpoints[params.att_key_input]
# Key forward
if len(params.att_key_num_nodes) > 1:
for index, num_nodes in enumerate(params.att_key_num_nodes[:-1]):
# The intermediate layers use affine+bn+relu
key_features = dense_bn_relu(key_features, num_nodes, endpoints, params, is_training, name=("att_key%d" % index))
# The last layer has different choices
if params.att_key_network_type == 0:
key_features = dense(key_features, params.att_key_num_nodes[-1], endpoints, params, is_training,
name=("att_key%d" % (len(params.att_key_num_nodes) - 1)))
elif params.att_key_network_type == 1:
key_features = dense_relu(key_features, params.att_key_num_nodes[-1], endpoints, params, is_training,
name=("att_key%d" % (len(params.att_key_num_nodes) - 1)))
elif params.att_key_network_type == 2:
key_features = dense_bn_relu(key_features, params.att_key_num_nodes[-1], endpoints, params, is_training,
name=("att_key%d" % (len(params.att_key_num_nodes) - 1)))
elif params.att_key_network_type == 3:
key_features = dense_tanh(key_features, params.att_key_num_nodes[-1], endpoints, params, is_training,
name=("att_key%d" % (len(params.att_key_num_nodes) - 1)))
# Value forward
if len(params.att_value_num_nodes) > 0:
if len(params.att_value_num_nodes) > 1:
for index, num_nodes in enumerate(params.att_value_num_nodes[:-1]):
value_features = dense_bn_relu(value_features, num_nodes, endpoints, params, is_training,
name=("att_value%d" % index))
if params.att_value_network_type == 0:
value_features = dense(value_features, params.att_value_num_nodes[-1], endpoints, params, is_training,
name=("att_value%d" % (len(params.att_value_num_nodes) - 1)))
elif params.att_value_network_type == 1:
value_features = dense_relu(value_features, params.att_value_num_nodes[-1], endpoints, params, is_training,
name=("att_value%d" % (len(params.att_value_num_nodes) - 1)))
elif params.att_value_network_type == 2:
value_features = dense_bn_relu(value_features, params.att_value_num_nodes[-1], endpoints, params, is_training,
name=("att_value%d" % (len(params.att_value_num_nodes) - 1)))
elif params.att_value_network_type == 3:
value_features = dense_tanh(value_features, params.att_value_num_nodes[-1], endpoints, params, is_training,
name=("att_value%d" % (len(params.att_value_num_nodes) - 1)))
# The last element in att_key_num_nodes and att_value_num_nodes
# is the dimension of the key and the value. In multi-head attention, they are extended n times.
n_heads = params.att_num_heads
assert shape_list(value_features)[2] % n_heads == 0, "The dim of the value must be divided by the num of heads."
if params.att_split_key:
assert shape_list(key_features)[2] % n_heads == 0
# Split the value and key.
value_features = split_heads(value_features, n_heads)
if params.att_split_key:
key_features = split_heads(key_features, n_heads)
else:
key_features = tf.expand_dims(key_features, axis=1)
val_shape = shape_list(value_features)
key_shape = shape_list(key_features)
tf.logging.info(
"Attention:\n"
" The dim of the value: %d, the dim of the key: %d\n"
" The layer has %d heads, resulting in the dim of value/key each head %d/%d.\n"
" With weighted mean and stddev, the attention layer results in output with dim %d."
% (val_shape[1] * val_shape[-1], key_shape[1] * key_shape[-1],
n_heads, val_shape[-1], key_shape[-1], val_shape[1] * val_shape[-1] * 2))
# Initialize query thus the weight for each time step is equal at the beginning.
# TODO: How to decide the initial number of query?
query = tf.get_variable("query", [n_heads, key_shape[-1]], dtype=tf.float32,
initializer=tf.initializers.truncated_normal(stddev=0.1))
if not params.att_split_key:
query_time_key = tf.einsum('bmld, hd->blh', key_features, query, name="query_time_key")
else:
query_time_key = tf.einsum('bhld, hd->blh', key_features, query, name="query_time_key")
if params.att_use_scale:
query_time_key = query_time_key * tf.rsqrt(tf.to_float(key_shape[-1]))
# weights is [b, h, l]
weights = tf.nn.softmax(tf.transpose(query_time_key, [0, 2, 1]), name="weights")
endpoints["attention_weights"] = weights
att_mean = tf.einsum('bhld,bhl->bhd', value_features, weights, name="att_mean")
att_stddev = tf.einsum('bhld,bhl->bhd',
tf.squared_difference(value_features, tf.expand_dims(att_mean, axis=2)), weights,
name="att_stddev")
att_mean = combine_last_two_dimensions(att_mean)
att_stddev = combine_last_two_dimensions(att_stddev)
mask = tf.to_float(tf.less_equal(att_stddev, VAR2STD_EPSILON))
att_stddev = (1.0 - mask) * att_stddev + mask * VAR2STD_EPSILON
att_stddev = tf.sqrt(att_stddev)
att = tf.concat([att_mean, att_stddev], axis=1, name="concat")
endpoints["att_output_before_nonlinear"] = att
if params.att_apply_nonlinear:
att = tf.layers.batch_normalization(att,
momentum=params.batchnorm_momentum,
training=is_training,
name="att_post_bn")
endpoints["att_post_bn"] = att
att = relu(att, name='att_post_relu')
endpoints["att_post_relu"] = att
# Penalty term when multi-head attention is used.
penalty = tf.einsum('ijk,ikl->ijl', weights, tf.transpose(weights, [0, 2, 1])) - tf.eye(n_heads, batch_shape=[val_shape[0]])
# Normalize using the batch size
penalty = tf.reduce_sum(tf.square(penalty)) / tf.to_float(val_shape[0])
penalty = params.att_penalty_term * penalty
tf.add_to_collection("PENALTY", penalty)
tf.summary.scalar("attention_penalty", penalty)
return att
def ghost_vlad(features, aux_features, endpoints, params, is_training):
"""NetVLAD and GhostVLAD
See:
NetVLAD: https://arxiv.org/abs/1511.07247
GhostVLAD: https://arxiv.org/abs/1810.09951
Args:
features: A tensor with shape [batch, length, dim].
aux_features:
endpoints: Outputs of different parts of the network.
params:
params.vlad_num_centers: #centers of the NetVLAD.
params.vlad_num_ghosts: #centers for the ghost clusters
params.vlad_key_input: The key used to compute the weights
params.vlad_key_num_nodes: #nodes of the network to compute the key.
An additional layer is applied to obtain the weights.
params.vlad_value_input: The value to be aggregated
params.vlad_value_num_nodes: #nodes of the network to compute the value.
params.vlad_final_l2_norm: Do the final L2 normalization after concatenation.
is_training: Used in BN.
:return:
"""
relu = tf.nn.relu
if "network_relu_type" in params.dict:
if params.network_relu_type == "prelu":
relu = prelu
if params.network_relu_type == "lrelu":
relu = tf.nn.leaky_relu
with tf.variable_scope("vlad"):
value_features = endpoints[params.vlad_value_input]
key_features = endpoints[params.vlad_key_input]
# Value forward -> [b, l, d]
if len(params.vlad_value_num_nodes) > 0:
for index, num_nodes in enumerate(params.vlad_value_num_nodes):
value_features = dense_bn_relu(value_features, num_nodes, endpoints, params, is_training,
name=("vlad_value%d" % index))
# Key forward
if len(params.vlad_key_num_nodes) > 0:
for index, num_nodes in enumerate(params.vlad_key_num_nodes):
key_features = dense_bn_relu(key_features, num_nodes, endpoints, params, is_training,
name=("vlad_key%d" % index))
# Affine: wx+b -> [b, l, nclusters]
key_features = tf.layers.dense(key_features,
params.vlad_num_centers + params.vlad_num_ghosts,
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer),
name="vlad_weight_affine")
# The weights
A = tf.nn.softmax(key_features, axis=-1, name="vlad_weights")
endpoints["vlad_weights"] = A
# Compute the residual
cluster = tf.get_variable("vlad_centers", [params.vlad_num_centers + params.vlad_num_ghosts, shape_list(value_features)[-1]],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(),
regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer))
res = tf.expand_dims(value_features, axis=2) - cluster
A = tf.expand_dims(A, axis=-1)
weighted_res = A * res
cluster_res = tf.reduce_sum(weighted_res, axis=1)
tf.logging.info("VLAD is used: %d clusters" % params.vlad_num_centers)
if params.vlad_num_ghosts > 0:
tf.logging.info(" %d ghost clusters is added" % params.vlad_num_ghosts)
cluster_res = cluster_res[:, :params.vlad_num_centers, :]
cluster_res = tf.nn.l2_normalize(cluster_res, axis=-1)
output = tf.reshape(cluster_res, [-1, params.vlad_num_centers * shape_list(cluster_res)[-1]])
if params.vlad_final_l2_norm:
output = tf.nn.l2_normalize(output, axis=-1)
endpoints["vlad_value"] = value_features
endpoints["vlad_key"] = key_features
endpoints["vlad_centers"] = cluster
return output
# def aux_attention(features, aux_features, endpoints, params, is_training=None):
# """Attention using auxiliary features.
#
# The attention layer has a minor problem that the length of the key may be different with the length of the value,
# due to the convnet. The key usually has the original feature length while the length of the value is shorter.
# We always using the fully-connected layer in the key network, so the length remains the same.
# A workaround is to use the center of the key to make length of the key and the value the same.
#
# Note: When auxiliary key is used, the hypothesis is that the length of this auxiliary feature is the same with the value.
#
# Args:
# features: A tensor with shape [batch, length, dim].
# aux_features: A dict.
# aux_featuers["aux_feat_name"]: The length is LONGER than features!!!
# The features is processed by convnet thus the length becomes shorter.
# TODO: How to trim the auxiliary features? Align left or center?
# endpoints: Outputs of different parts of the network.
# params: Parameters for self-attention.
# params.att_aux_name: The name of the auxiliary features.
# params.att_aux_key_input: Additional key input except for the auxiliary features.
# If None then only the auxiliary features are used.
# params.att_key_num_nodes: The network to compute the key.
# params.att_value_num_nodes: The network to compute the value.
# params.att_num_heads: The number of heads in multi-head attention.
# params.att_penalty_term: The coefficient of the penalty term.
# The final dimension of the key and the value is decided by self_att_key_num_nodes and self_att_value_num_nodes.
# If multi-head attention is used, the value will be split first (the key remains the original dim).
# is_training: Used in BN.
# :return:
# """
# assert "att_aux_name" in params.dict
# assert "att_key_input" in params.dict
# assert "att_key_num_nodes" in params.dict
# assert "att_value_num_nodes" in params.dict
# assert "att_num_heads" in params.dict
# assert "att_penalty_term" in params.dict
#
# with tf.variable_scope("attention"):
# value_features = features
# for aux_name in params.att_aux_name:
# if aux_name not in aux_features:
# sys.exit("The aux features %s is not in aux_features." % aux_name)
#
# key_features = []
# for aux_name in params.att_aux_name:
# # Center trimming. Use the center of the key to match the length of the value.
# trim_length = (shape_list(aux_features[aux_name])[1] - shape_list(value_features)[1]) / 2
# # This requires the total kernel size is a odd number.
# key_features.append(aux_features[aux_name][:, trim_length:-trim_length, :])
#
# # # TODO: If the length of the key and the value is the same, the next line is useful.
# # # But the above line looks more neat (What...).
# # key_features = tf.cond(tf.equal(trim_length, 0),
# # lambda: aux_features[aux_name],
# # lambda: aux_features[aux_name][:, trim_length:-trim_length, :])
#
# tf.logging.info("Attention using auxiliary features:")
# if params.att_key_input is not None:
# if params.att_key_input not in endpoints:
# sys.exit(
# "You specify the appended key %s, but I cannot find it in the endpoints." % params.att_key_input)
# tf.logging.info("Append %s to the auxiliary features" % params.att_key_input)
# key_features.append(endpoints[params.att_key_input])
#
# # Concatenate all the features to the key.
# key_features = tf.concat(key_features, axis=-1, name="key_features")
#
# if len(params.att_key_num_nodes) != 0:
# # According to "A STRUCTURED SELF-ATTENTIVE SENTENCE EMBEDDING",
# # the last layer of the key network is `affine + tanh`.
# if len(params.att_key_num_nodes) > 1:
# for index, node in enumerate(params.att_key_num_nodes[:-1]):
# key_features = dense_relu(key_features, node, endpoints, params, is_training, name=("att_key%d" % index))
# key_features = dense_tanh(key_features, params.att_key_num_nodes[-1], endpoints, params, is_training,
# name=("att_key%d" % (len(params.att_key_num_nodes) - 1)))
#
# if len(params.att_value_num_nodes) != 0:
# tf.logging.info("Note: Add network to process the value input %s" % value_features.name)
# for index, node in enumerate(params.att_value_num_nodes):
# value_features = dense_relu(value_features, node, endpoints, params, is_training, name=("att_value%d" % index))
#
# # The last element in self_att_key_num_nodes and self_att_value_num_nodes
# # is the dimension of the key and the value. In multi-head attention, they are extended n times.
# n_heads = params.att_num_heads
# assert shape_list(value_features)[2] % n_heads == 0, "The dim of the value must be divided by the num of heads."
#
# # Split the value. The key can use the entire vector.
# value_features = split_heads(value_features, n_heads)
# val_shape = shape_list(value_features)
# key_shape = shape_list(key_features)
#
# tf.logging.info(
# " The dim of the value: %d, the dim of the key: %d\n"
# " The layer has %d heads, resulting in the dim of value of each head %d.\n"
# " With weighted mean and stddev, the attention layer results in output with dim %d."
# % (val_shape[1] * val_shape[-1], key_shape[-1], n_heads, val_shape[-1], val_shape[1] * val_shape[-1] * 2))
#
# # Initialize query thus the weight for each time step is equal at the beginning.
# query = tf.get_variable("query", [n_heads, key_shape[-1]], dtype=tf.float32,
# initializer=tf.initializers.truncated_normal(stddev=0.1))
#
# query_time_key = tf.einsum('ijl,kl->ijk', key_features, query, name="query_time_key")
# weights = tf.nn.softmax(tf.transpose(query_time_key, [0, 2, 1]), name="weights")
#
# att_mean = tf.einsum('bnld,bnl->bnd', value_features, weights, name="att_mean")
# att_stddev = tf.einsum('bnld,bnl->bnd',
# tf.squared_difference(value_features, tf.expand_dims(att_mean, axis=2)), weights,
# name="att_stddev")
#
# att_mean = combine_last_two_dimensions(att_mean)
# att_stddev = combine_last_two_dimensions(att_stddev)
#
# mask = tf.to_float(tf.less_equal(att_stddev, VAR2STD_EPSILON))
# att_stddev = (1.0 - mask) * att_stddev + mask * VAR2STD_EPSILON
# att_stddev = tf.sqrt(att_stddev)
#
# att = tf.concat([att_mean, att_stddev], 1, name="concat")
# endpoints["attention_weights"] = weights
#
# # Penalty term
# penalty = tf.einsum('ijk,ikl->ijl', weights, tf.transpose(weights, [0, 2, 1])) - tf.eye(n_heads, batch_shape=[val_shape[0]])
# penalty = tf.reduce_sum(tf.square(penalty)) / tf.to_float(val_shape[0])
# tf.add_to_collection("PENALTY", params.att_penalty_term * penalty)
# tf.summary.scalar("attention_penalty", params.att_penalty_term * penalty)
#
# # # Debug
# # # Comment lines when running the code
# # endpoints["att_query"] = query
# # endpoints["att_key"] = key_features
# # endpoints["att_value"] = value_features
# return att
if __name__ == "__main__":
num_labels = 10
num_data = 10
num_length = 100
num_dim = 512
features = tf.placeholder(tf.float32, shape=[None, None, num_dim], name="features")
# aux_features = tf.placeholder(tf.float32, shape=[None, None, 100], name="aux_features")
# linguistic_features = tf.placeholder(tf.float32, shape=[None, None, 500], name="linguistic_features")
# linguistic_features_all = {"linguistic": linguistic_features}
from collections import OrderedDict
endpoints = OrderedDict()
endpoints["value"] = features
endpoints["key"] = features
from misc.utils import ParamsPlain
# # Self-attention (key transform)
# params = ParamsPlain()
# params.dict["att_key_input"] = "key"
# params.dict["att_key_num_nodes"] = [1500, 500]
# params.dict["att_key_network_type"] = 1
# params.dict["att_value_input"] = "value"
# params.dict["att_value_num_nodes"] = [1500]
# params.dict["att_value_network_type"] = 2
# params.dict["att_apply_nonlinear"] = False # True
# params.dict["att_use_scale"] = False # True
# params.dict["att_num_heads"] = 10
# params.dict["att_split_key"] = True # False
# params.dict["att_penalty_term"] = 1
#
# params.dict["weight_l2_regularizer"] = 1e-2
# params.dict["batchnorm_momentum"] = 0.99
#
# self_att = self_attention(None, None, endpoints, params, is_training=True)
# penalty_loss = tf.reduce_sum(tf.get_collection("PENALTY"))
# grads = tf.gradients(self_att, features)
# grads_penalty = tf.gradients(penalty_loss, features)
#
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# import numpy as np
# features_val = np.random.rand(num_data, num_length, num_dim).astype(np.float32)
# features_val[0, :, :] = 1e-8 * features_val[0, :, :]
# features_val[1, :, :] = 0
# features_val[2, :, :] = 100 * features_val[2, :, :]
# features_val[3, :, :] = 100
# self_att_val, penalty_loss_val, grads_val, grads_penalty_val, endpoints_val = sess.run([self_att, penalty_loss, grads, grads_penalty, endpoints], feed_dict={features: features_val})
# key = endpoints_val["att_key"]
# value = endpoints_val["att_value"]
# query = endpoints_val["att_query"]
# att_output = endpoints_val["att_output_before_nonlinear"]
#
# from model.test_utils import compute_self_attention
# self_att_np, penalty_loss_np = compute_self_attention(value, key, query, params)
# assert not np.any(np.isnan(grads_val)), "Gradient should not be nan"
# assert not np.any(np.isnan(grads_penalty_val)), "Gradient should not be nan"
# assert np.allclose(penalty_loss_val, penalty_loss_np)
#
# for i in range(att_output.shape[0]):
# for j in range(att_output.shape[1]):
# if np.abs((att_output[i, j] - self_att_np[i ,j]) / (att_output[i, j]+1e-16)) > 1e-4:
# print("%d %d %.10f %.10f" % (i, j, att_output[i, j], self_att_np[i, j]))
# assert np.allclose(att_output, self_att_np, rtol=1e-3)
# Self-attention (key transform)
params = ParamsPlain()
params.dict["vlad_num_centers"] = 10
params.dict["vlad_num_ghosts"] = 2
params.dict["vlad_key_input"] = "key"
params.dict["vlad_key_num_nodes"] = [512]
params.dict["vlad_value_input"] = "value"
params.dict["vlad_value_num_nodes"]= []
params.dict["vlad_final_l2_norm"] = True
params.dict["weight_l2_regularizer"] = 1e-2
params.dict["batchnorm_momentum"] = 0.99
vlad = ghost_vlad(None, None, endpoints, params, is_training=True)
grads = tf.gradients(vlad, endpoints["value"])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
import numpy as np
features_val = np.random.rand(num_data, num_length, num_dim).astype(np.float32)
features_val[0, :, :] = 1e-8 * features_val[0, :, :]
features_val[1, :, :] = 0
features_val[2, :, :] = 100 * features_val[2, :, :]
features_val[3, :, :] = 100
vlad_tf, grads_tf, endpoints_val = sess.run([vlad, grads, endpoints], feed_dict={features: features_val})
value = endpoints_val["vlad_value"]
key = endpoints_val["vlad_key"]
centers = endpoints_val["vlad_centers"]
from model.test_utils import compute_ghost_vlad
vlad_np = compute_ghost_vlad(value, key, centers, params)
assert not np.any(np.isnan(grads_tf)), "Gradient should not be nan"
assert np.allclose(vlad_tf, vlad_np)
# # Linguistic attention
# params = ParamsPlain()
# params.dict["att_aux_key_input"] = "key"
# params.dict["att_key_num_nodes"] = []
# params.dict["att_value_num_nodes"] = []
# params.dict["att_num_heads"] = 1
# params.dict["att_penalty_term"] = 1
# params.dict["weight_l2_regularizer"] = 1e-2
# params.dict["batchnorm_momentum"] = 0.99
#
# endpoints["key"] = aux_features
# att = linguistic_attention(features, linguistic_features_all, endpoints, params, is_training=True)
# penalty_loss = tf.reduce_sum(tf.get_collection("PENALTY"))
# grads = tf.gradients(att, features)
# grads_penalty = tf.gradients(penalty_loss, linguistic_features_all["linguistic"])
#
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# import numpy as np
#
# features_val = np.random.rand(num_data, num_length-4, num_dim).astype(np.float32)
# features_val[0, :, :] = 1e-8 * features_val[0, :, :]
# features_val[1, :, :] = 0
# features_val[2, :, :] = 100 * features_val[2, :, :]
# features_val[3, :, :] = 100
#
# aux_features_val = np.random.rand(num_data, num_length-4, 100).astype(np.float32)
# linguistic_features_val = np.random.rand(num_data, num_length, 500).astype(np.float32)
#
# att_val, penalty_loss_val, grads_val, grads_penalty_val, endpoints_val = sess.run(
# [att, penalty_loss, grads, grads_penalty, endpoints], feed_dict={features: features_val,
# aux_features: aux_features_val,
# linguistic_features: linguistic_features_val})
# query = endpoints_val["att_query"]
# value = np.reshape(features_val, [features_val.shape[0], features_val.shape[1], params.att_num_heads, features_val.shape[2]/params.att_num_heads])
# value = np.transpose(value, [0,2,1,3])
# key = np.concatenate([linguistic_features_val[:,2:-2,:], aux_features_val], axis=-1)
#
# from model.test_utils import compute_attention
# att_np, penalty_loss_np = compute_attention(value, key, query, params)
#
# assert np.allclose(np.sum(att_val), np.sum(att_np))
# assert np.allclose(penalty_loss_val, penalty_loss_np)
#
# assert not np.any(np.isnan(grads_val)), "Gradient should not be nan"
# assert not np.any(np.isnan(grads_penalty_val)), "Gradient should not be nan"
|
11596543
|
import logging
from ...splunktalib.common import log as stclog
import six
def set_log_level(log_level):
"""
Set log level.
"""
if isinstance(log_level, six.string_types):
if log_level.upper() == "DEBUG":
stclog.Logs().set_level(logging.DEBUG)
elif log_level.upper() == "INFO":
stclog.Logs().set_level(logging.INFO)
elif log_level.upper() == "WARN":
stclog.Logs().set_level(logging.WARN)
elif log_level.upper() == "ERROR":
stclog.Logs().set_level(logging.ERROR)
elif log_level.upper() == "WARNING":
stclog.Logs().set_level(logging.WARNING)
elif log_level.upper() == "CRITICAL":
stclog.Logs().set_level(logging.CRITICAL)
else:
stclog.Logs().set_level(logging.INFO)
elif isinstance(log_level, int):
if log_level in [logging.DEBUG, logging.INFO, logging.ERROR,
logging.WARN, logging.WARNING, logging.CRITICAL]:
stclog.Logs().set_level(log_level)
else:
stclog.Logs().set_level(logging.INFO)
else:
stclog.Logs().set_level(logging.INFO)
# Global logger
logger = stclog.Logs().get_logger("cloud_connect_engine")
def reset_logger(name):
"""
Reset logger.
"""
stclog.reset_logger(name)
global logger
logger = stclog.Logs().get_logger(name)
|
11596551
|
import pycxsimulator
from pylab import *
width = 100
height = 100
initProb = 0.1
maxState = 6
def initialize():
global time, config, nextConfig
time = 0
config = zeros([height, width])
for x in range(width):
for y in range(height):
if random() < initProb:
state = maxState
else:
state = 0
config[y, x] = state
nextConfig = zeros([height, width])
def observe():
cla()
imshow(config, vmin = 0, vmax = maxState, cmap = cm.binary)
axis('image')
title('t = ' + str(time))
def update():
global time, config, nextConfig
time += 1
for x in range(width):
for y in range(height):
state = config[y, x]
if state == 0:
num = 0
for dx in range(-1, 2):
for dy in range(-1, 2):
if config[(y+dy)%height, (x+dx)%width] == maxState:
num += 1
if random() * 3 < num:
state = maxState
else:
state = 0
else:
state -= 1
nextConfig[y, x] = state
config, nextConfig = nextConfig, config
pycxsimulator.GUI().start(func=[initialize, observe, update])
|
11596560
|
import torch
from nff.utils.scatter import compute_grad
import numpy as np
import math
from ase import units
from torchmd.sovlers import odeint_adjoint, odeint
from ase.geometry import wrap_positions
'''
Here contains object for simulation and computing the equation of state
'''
class Simulations():
"""Simulation object for handing runnindg MD and logging
Attributes:
device (str or int): int for GPU, "cpu" for cpu
integrator (nn.module): function that updates force and velocity n
keys (list): name of the state variables e.g. "velocities" and "positions "
log (dict): save state vaiables in numpy arrays
solvemethod (str): integration method, current options are 4th order Runge-Kutta (rk4) and Verlet
system (torch.System): System object to contain state of molecular systems
wrap (bool): if True, wrap the coordinates based on system.cell
"""
def __init__(self,
system,
integrator,
wrap=True,
method="NH_verlet"):
self.system = system
self.device = system.device
self.integrator = integrator
self.solvemethod = method
self.wrap = wrap
self.keys = self.integrator.state_keys
self.initialize_log()
def initialize_log(self):
self.log = {}
for key in self.keys:
self.log[key] = []
def update_log(self, trajs):
for i, key in enumerate( self.keys ):
if trajs[i][0].device != 'cpu':
self.log[key].append(trajs[i][-1].detach().cpu().numpy())
else:
self.log[key].append(trajs[i][-1].detach().numpy())
def update_states(self):
if "positions" in self.log.keys():
self.system.set_positions(self.log['positions'][-1])
if "velocities" in self.log.keys():
self.system.set_velocities(self.log['velocities'][-1])
def get_check_point(self):
if hasattr(self, 'log'):
states = [torch.Tensor(self.log[key][-1]).to(self.device) for key in self.log]
if self.wrap:
wrapped_xyz = wrap_positions(self.log['positions'][-1], self.system.get_cell())
states[1] = torch.Tensor(wrapped_xyz).to(self.device)
return states
else:
raise ValueError("No log available")
def simulate(self, steps=1, dt=1.0 * units.fs, frequency=1):
if self.log['positions'] == []:
states = self.integrator.get_inital_states(self.wrap)
else:
states = self.get_check_point()
sim_epochs = int(steps//frequency)
t = torch.Tensor([dt * i for i in range(frequency)]).to(self.device)
for epoch in range(sim_epochs):
if self.integrator.adjoint:
trajs = odeint_adjoint(self.integrator, states, t, method=self.solvemethod)
else:
for var in states:
var.requires_grad = True
trajs = odeint(self.integrator, tuple(states), t, method=self.solvemethod)
self.update_log(trajs)
self.update_states()
states = self.get_check_point()
return trajs
class NVE(torch.nn.Module):
"""Equation of state for constant energy integrator (NVE ensemble)
Attributes:
adjoint (str): if True using adjoint sensitivity
dim (int): system dimensions
mass (torch.Tensor): masses of each particle
model (nn.module): energy functions that takes in coordinates
N_dof (int): total number of degree of freedoms
state_keys (list): keys of state variables "positions", "velocity" etc.
system (torchmd.System): system object
"""
def __init__(self, potentials, system, adjoint=True):
super().__init__()
self.model = potentials
self.system = system
self.mass = torch.Tensor(system.get_masses()).to(self.system.device)
self.N_dof = self.mass.shape[0] * system.dim
self.dim = system.dim
self.adjoint = adjoint
self.state_keys = ['velocities', 'positions']
def forward(self, t, state):
# pq are the canonical momentum and position variables
with torch.set_grad_enabled(True):
v = state[0]
q = state[1]
if self.adjoint:
q.requires_grad = True
p = v * self.mass[:, None]
u = self.model(q)
f = -compute_grad(inputs=q, output=u.sum(-1))
dvdt = f
return (dvdt, v)
def get_inital_states(self, wrap=True):
states = [
self.system.get_velocities(),
self.system.get_positions(wrap=wrap)]
states = [torch.Tensor(var).to(self.system.device) for var in states]
return states
class NoseHooverChain(torch.nn.Module):
"""Equation of state for NVT integrator using Nose Hoover Chain
<NAME>. A unified formulation of the constant temperature molecular dynamics methods. The Journal of Chemical Physics 81, 511–519 (1984).
Attributes:
adjoint (str): if True using adjoint sensitivity
dim (int): system dimensions
mass (torch.Tensor): masses of each particle
model (nn.module): energy functions that takes in coordinates
N_dof (int): total number of degree of freedoms
state_keys (list): keys of state variables "positions", "velocity" etc.
system (torchmd.System): system object
num_chains (TYPE): number of chains
Q (TYPE): Heat bath mass
T (TYPE): Temperature
target_ke (TYPE): target Kinetic energy
traj (list): Description
"""
def __init__(self, potentials, system, T, num_chains=2, Q=1.0, adjoint=True):
super().__init__()
self.model = potentials
self.system = system
self.device = system.device # should just use system.device throughout
self.mass = torch.Tensor(system.get_masses()).to(self.device)
self.T = T # in energy unit(eV)
self.N_dof = self.mass.shape[0] * system.dim
self.target_ke = (0.5 * self.N_dof * T )
self.num_chains = num_chains
self.Q = np.array([Q,
*[Q/self.system.get_number_of_atoms()]*(num_chains-1)])
self.Q = torch.Tensor(self.Q).to(self.device)
self.dim = system.dim
self.adjoint = adjoint
self.state_keys = ['velocities', 'positions', 'baths']
def update_T(self, T):
self.T = T
def forward(self, t, state):
with torch.set_grad_enabled(True):
v = state[0]
q = state[1]
p_v = state[2]
if self.adjoint:
q.requires_grad = True
N = self.N_dof
p = v * self.mass[:, None]
sys_ke = 0.5 * (p.pow(2) / self.mass[:, None]).sum()
u = self.model(q)
f = -compute_grad(inputs=q, output=u.sum(-1))
coupled_forces = (p_v[0] * p.reshape(-1) / self.Q[0]).reshape(-1, 3)
dvdt = f - coupled_forces
dpvdt_0 = 2 * (sys_ke - self.T * self.N_dof * 0.5) - p_v[0] * p_v[1]/ self.Q[1]
dpvdt_mid = (p_v[:-2].pow(2) / self.Q[:-2] - self.T) - p_v[2:]*p_v[1:-1]/ self.Q[2:]
dpvdt_last = p_v[-2].pow(2) / self.Q[-2] - self.T
return (dvdt, v, torch.cat((dpvdt_0[None], dpvdt_mid, dpvdt_last[None])))
def get_inital_states(self, wrap=True):
states = [
self.system.get_velocities(),
self.system.get_positions(wrap=wrap),
[0.0] * self.num_chains]
states = [torch.Tensor(var).to(self.system.device) for var in states]
return states
class Isomerization(torch.nn.Module):
"""Quantum isomerization equation of state.
The hamiltonian is precomputed in the new basis obtained by orthogonalizing
the original tensor product space of vibrational and rotational coordinates
Attributes:
device (int or str): device
dim (int): the size of wave function
dipole (torch.nn.Parameter): dipole operator
e_field (torch.nn.Parameter): electric field
ham (torch.nn.Parameter): hamiltonian
max_e_t (int): max time the electric field can be on
"""
def __init__(self, dipole, e_field, ham, max_e_t, device=0):
super().__init__()
self.device = device
self.dipole = dipole.to(self.device)
self.ham = ham.to(self.device)
self.dim = len(ham)
self.e_field = torch.nn.Parameter(e_field)
self.max_e_t = max_e_t
def forward(self, t, psi):
with torch.set_grad_enabled(True):
psi.requires_grad = True
# real and imaginary parts of psi
psi_R = psi[:self.dim]
psi_I = psi[self.dim:]
if t < self.max_e_t:
# find the value of E at the time closest
# to now
t_index = torch.argmin(abs(self.e_field[:, 0] - t))
e_now = self.e_field[t_index][-1]
else:
e_now = 0
# total Hamiltonian = H - mu \dot E
H_eff = self.ham - self.dipole * e_now
# d/dt of real and imaginary parts of psi
dpsi_R = torch.matmul(H_eff, psi_I)
dpsi_I = -torch.matmul(H_eff, psi_R)
d_psi_dt = torch.cat((dpsi_R, dpsi_I))
return d_psi_dt
|
11596641
|
from Crypto import Random
from Crypto.Cipher import AES
import base64
from hashlib import md5
def pad(data):
length = 16 - (len(data) % 16)
return data + (chr(length) * length).encode()
def unpad(data):
return data[:-(data[-1] if isinstance(data[-1], int) else ord(data[-1]))]
def bytes_to_key(data, salt, output=48):
assert len(salt) == 8, len(salt)
data += salt
key = md5(data).digest()
final_key = key
while len(final_key) < output:
key = md5(key + data).digest()
final_key += key
return final_key[:output]
def encrypt(message, passphrase):
salt = Random.new().read(8)
key_iv = bytes_to_key(passphrase, salt, 32 + 16)
key = key_iv[:32]
iv = key_iv[32:]
aes = AES.new(key, AES.MODE_CBC, iv)
return base64.b64encode(b"Salted__" + salt + aes.encrypt(pad(message)))
def decrypt(encrypted, passphrase):
encrypted = base64.b64decode(encrypted)
assert encrypted[0:8] == b"Salted__"
salt = encrypted[8:16]
key_iv = bytes_to_key(passphrase, salt, 32 + 16)
key = key_iv[:32]
iv = key_iv[32:]
aes = AES.new(key, AES.MODE_CBC, iv)
return unpad(aes.decrypt(encrypted[16:]))
|
11596644
|
udon_types_rel = {
'AINavMeshAgentRef': 'UnityEngineAINavMeshAgentRef',
'AINavMeshDataRef': 'UnityEngineAINavMeshDataRef',
'AINavMeshHitRef': 'UnityEngineAINavMeshHitRef',
'AINavMeshObstacleRef': 'UnityEngineAINavMeshObstacleRef',
'AINavMeshPathRef': 'UnityEngineAINavMeshPathRef',
'AIOffMeshLinkRef': 'UnityEngineAIOffMeshLinkRef',
'AccelerationEvent': 'UnityEngineAccelerationEvent',
'AccelerationEventArray': 'UnityEngineAccelerationEventArray',
'AdditionalCanvasShaderChannels': 'UnityEngineAdditionalCanvasShaderChannels',
'AdjustmentRuleArray': 'SystemTimeZoneInfoAdjustmentRuleArray',
'AimConstraint': 'UnityEngineAnimationsAimConstraint',
'AimConstraintArray': 'UnityEngineAnimationsAimConstraintArray',
'AmbientMode': 'UnityEngineRenderingAmbientMode',
'Animation': 'UnityEngineAnimation',
'AnimationArray': 'UnityEngineAnimationArray',
'AnimationBlendMode': 'UnityEngineAnimationBlendMode',
'AnimationClip': 'UnityEngineAnimationClip',
'AnimationClipArray': 'UnityEngineAnimationClipArray',
'AnimationClipRef': 'UnityEngineAnimationClipRef',
'AnimationCullingType': 'UnityEngineAnimationCullingType',
'AnimationCurve': 'UnityEngineAnimationCurve',
'AnimationEvent': 'UnityEngineAnimationEvent',
'AnimationEventArray': 'UnityEngineAnimationEventArray',
'AnimationPlayMode': 'UnityEngineAnimationPlayMode',
'AnimationRef': 'UnityEngineAnimationRef',
'AnimationState': 'UnityEngineAnimationState',
'AnimationStateArray': 'UnityEngineAnimationStateArray',
'AnimationStateRef': 'UnityEngineAnimationStateRef',
'AnimationTriggers': 'UnityEngineUIAnimationTriggers',
'AnimationTriggersArray': 'UnityEngineUIAnimationTriggersArray',
'AnimationsAimConstraintRef': 'UnityEngineAnimationsAimConstraintRef',
'AnimationsConstraintSourceRef': 'UnityEngineAnimationsConstraintSourceRef',
'AnimationsLookAtConstraintRef': 'UnityEngineAnimationsLookAtConstraintRef',
'AnimationsParentConstraintRef': 'UnityEngineAnimationsParentConstraintRef',
'AnimationsPositionConstraintRef': 'UnityEngineAnimationsPositionConstraintRef',
'AnimationsRotationConstraintRef': 'UnityEngineAnimationsRotationConstraintRef',
'AnimationsScaleConstraintRef': 'UnityEngineAnimationsScaleConstraintRef',
'Animator': 'UnityEngineAnimator',
'AnimatorArray': 'UnityEngineAnimatorArray',
'AnimatorClipInfo': 'UnityEngineAnimatorClipInfo',
'AnimatorClipInfoArray': 'UnityEngineAnimatorClipInfoArray',
'AnimatorClipInfoRef': 'UnityEngineAnimatorClipInfoRef',
'AnimatorControllerParameter': 'UnityEngineAnimatorControllerParameter',
'AnimatorControllerParameterArray': 'UnityEngineAnimatorControllerParameterArray',
'AnimatorControllerParameterRef': 'UnityEngineAnimatorControllerParameterRef',
'AnimatorControllerParameterType': 'UnityEngineAnimatorControllerParameterType',
'AnimatorCullingMode': 'UnityEngineAnimatorCullingMode',
'AnimatorOverrideController': 'UnityEngineAnimatorOverrideController',
'AnimatorOverrideControllerArray': 'UnityEngineAnimatorOverrideControllerArray',
'AnimatorOverrideControllerRef': 'UnityEngineAnimatorOverrideControllerRef',
'AnimatorRecorderMode': 'UnityEngineAnimatorRecorderMode',
'AnimatorRef': 'UnityEngineAnimatorRef',
'AnimatorStateInfo': 'UnityEngineAnimatorStateInfo',
'AnimatorStateInfoArray': 'UnityEngineAnimatorStateInfoArray',
'AnimatorStateInfoRef': 'UnityEngineAnimatorStateInfoRef',
'AnimatorTransitionInfo': 'UnityEngineAnimatorTransitionInfo',
'AnimatorTransitionInfoArray': 'UnityEngineAnimatorTransitionInfoArray',
'AnimatorTransitionInfoRef': 'UnityEngineAnimatorTransitionInfoRef',
'AnimatorUpdateMode': 'UnityEngineAnimatorUpdateMode',
'AnimatorUtility': 'UnityEngineAnimatorUtility',
'AnimatorUtilityArray': 'UnityEngineAnimatorUtilityArray',
'AnimatorUtilityRef': 'UnityEngineAnimatorUtilityRef',
'AnisotropicFiltering': 'UnityEngineAnisotropicFiltering',
'AreaEffector2D': 'UnityEngineAreaEffector2D',
'AreaEffector2DArray': 'UnityEngineAreaEffector2DArray',
'AreaEffector2DRef': 'UnityEngineAreaEffector2DRef',
'Array': 'SystemArray',
'AspectMode': 'UnityEngineUIAspectRatioFitterAspectMode',
'AspectRatioFitter': 'UnityEngineUIAspectRatioFitter',
'AspectRatioFitterArray': 'UnityEngineUIAspectRatioFitterArray',
'AudioChorusFilter': 'UnityEngineAudioChorusFilter',
'AudioChorusFilterArray': 'UnityEngineAudioChorusFilterArray',
'AudioChorusFilterRef': 'UnityEngineAudioChorusFilterRef',
'AudioClip': 'UnityEngineAudioClip',
'AudioClipArray': 'UnityEngineAudioClipArray',
'AudioClipLoadType': 'UnityEngineAudioClipLoadType',
'AudioClipRef': 'UnityEngineAudioClipRef',
'AudioDataLoadState': 'UnityEngineAudioDataLoadState',
'AudioDistortionFilter': 'UnityEngineAudioDistortionFilter',
'AudioDistortionFilterArray': 'UnityEngineAudioDistortionFilterArray',
'AudioDistortionFilterRef': 'UnityEngineAudioDistortionFilterRef',
'AudioEchoFilter': 'UnityEngineAudioEchoFilter',
'AudioEchoFilterArray': 'UnityEngineAudioEchoFilterArray',
'AudioEchoFilterRef': 'UnityEngineAudioEchoFilterRef',
'AudioHighPassFilter': 'UnityEngineAudioHighPassFilter',
'AudioHighPassFilterArray': 'UnityEngineAudioHighPassFilterArray',
'AudioHighPassFilterRef': 'UnityEngineAudioHighPassFilterRef',
'AudioLowPassFilter': 'UnityEngineAudioLowPassFilter',
'AudioLowPassFilterArray': 'UnityEngineAudioLowPassFilterArray',
'AudioLowPassFilterRef': 'UnityEngineAudioLowPassFilterRef',
'AudioMixerGroup': 'UnityEngineAudioAudioMixerGroup',
'AudioReverbFilter': 'UnityEngineAudioReverbFilter',
'AudioReverbFilterArray': 'UnityEngineAudioReverbFilterArray',
'AudioReverbFilterRef': 'UnityEngineAudioReverbFilterRef',
'AudioReverbPreset': 'UnityEngineAudioReverbPreset',
'AudioReverbZone': 'UnityEngineAudioReverbZone',
'AudioReverbZoneArray': 'UnityEngineAudioReverbZoneArray',
'AudioReverbZoneRef': 'UnityEngineAudioReverbZoneRef',
'AudioRolloffMode': 'UnityEngineAudioRolloffMode',
'AudioSource': 'UnityEngineAudioSource',
'AudioSourceArray': 'UnityEngineAudioSourceArray',
'AudioSourceCurveType': 'UnityEngineAudioSourceCurveType',
'AudioSourceRef': 'UnityEngineAudioSourceRef',
'AudioVelocityUpdateMode': 'UnityEngineAudioVelocityUpdateMode',
'Avatar': 'UnityEngineAvatar',
'AvatarArray': 'UnityEngineAvatarArray',
'AvatarBuilder': 'UnityEngineAvatarBuilder',
'AvatarBuilderArray': 'UnityEngineAvatarBuilderArray',
'AvatarBuilderRef': 'UnityEngineAvatarBuilderRef',
'AvatarIKGoal': 'UnityEngineAvatarIKGoal',
'AvatarIKHint': 'UnityEngineAvatarIKHint',
'AvatarMask': 'UnityEngineAvatarMask',
'AvatarMaskArray': 'UnityEngineAvatarMaskArray',
'AvatarMaskBodyPart': 'UnityEngineAvatarMaskBodyPart',
'AvatarMaskRef': 'UnityEngineAvatarMaskRef',
'AvatarRef': 'UnityEngineAvatarRef',
'AvatarTarget': 'UnityEngineAvatarTarget',
'Axis': 'UnityEngineUIGridLayoutGroupAxis',
'AxisEventData': 'UnityEngineEventSystemsAxisEventData',
'Base64FormattingOptions': 'SystemBase64FormattingOptions',
'BaseEventData': 'UnityEngineEventSystemsBaseEventData',
'BaseMeshEffect': 'UnityEngineUIBaseMeshEffect',
'BaseMeshEffectArray': 'UnityEngineUIBaseMeshEffectArray',
'BillboardAsset': 'UnityEngineBillboardAsset',
'BillboardRenderer': 'UnityEngineBillboardRenderer',
'BillboardRendererArray': 'UnityEngineBillboardRendererArray',
'BillboardRendererRef': 'UnityEngineBillboardRendererRef',
'BlendWeights': 'UnityEngineBlendWeights',
'BlockingObjects': 'UnityEngineUIGraphicRaycasterBlockingObjects',
'BoneWeightArray': 'UnityEngineBoneWeightArray',
'Boolean': 'SystemBoolean',
'BooleanArray': 'SystemBooleanArray',
'BooleanRef': 'SystemBooleanRef',
'Bounds': 'UnityEngineBounds',
'BoundsArray': 'UnityEngineBoundsArray',
'BoundsRef': 'UnityEngineBoundsRef',
'BoxCollider': 'UnityEngineBoxCollider',
'BoxCollider2D': 'UnityEngineBoxCollider2D',
'BoxCollider2DArray': 'UnityEngineBoxCollider2DArray',
'BoxCollider2DRef': 'UnityEngineBoxCollider2DRef',
'BoxColliderArray': 'UnityEngineBoxColliderArray',
'BoxColliderRef': 'UnityEngineBoxColliderRef',
'Burst': 'UnityEngineParticleSystemBurst',
'BurstArray': 'UnityEngineParticleSystemBurstArray',
'Button': 'UnityEngineUIButton',
'ButtonArray': 'UnityEngineUIButtonArray',
'ButtonClickedEvent': 'UnityEngineUIButtonButtonClickedEvent',
'ButtonClickedEventArray': 'UnityEngineUIButtonButtonClickedEventArray',
'Byte': 'SystemByte',
'ByteArray': 'SystemByteArray',
'ByteRef': 'SystemByteRef',
'Camera': 'UnityEngineCamera',
'CameraArray': 'UnityEngineCameraArray',
'CameraClearFlags': 'UnityEngineCameraClearFlags',
'CameraEvent': 'UnityEngineRenderingCameraEvent',
'CameraRef': 'UnityEngineCameraRef',
'CameraType': 'UnityEngineCameraType',
'Canvas': 'UnityEngineCanvas',
'CanvasArray': 'UnityEngineCanvasArray',
'CanvasRef': 'UnityEngineCanvasRef',
'CanvasRenderer': 'UnityEngineCanvasRenderer',
'CanvasRendererArray': 'UnityEngineCanvasRendererArray',
'CanvasRendererRef': 'UnityEngineCanvasRendererRef',
'CanvasScaler': 'UnityEngineUICanvasScaler',
'CanvasScalerArray': 'UnityEngineUICanvasScalerArray',
'CanvasUpdate': 'UnityEngineUICanvasUpdate',
'CapsuleCollider': 'UnityEngineCapsuleCollider',
'CapsuleCollider2D': 'UnityEngineCapsuleCollider2D',
'CapsuleCollider2DArray': 'UnityEngineCapsuleCollider2DArray',
'CapsuleCollider2DRef': 'UnityEngineCapsuleCollider2DRef',
'CapsuleColliderArray': 'UnityEngineCapsuleColliderArray',
'CapsuleColliderRef': 'UnityEngineCapsuleColliderRef',
'CapsuleDirection2D': 'UnityEngineCapsuleDirection2D',
'Char': 'SystemChar',
'CharArray': 'SystemCharArray',
'CharEnumerator': 'SystemCharEnumerator',
'CharRef': 'SystemCharRef',
'CharacterValidation': 'UnityEngineUIInputFieldCharacterValidation',
'CircleCollider2D': 'UnityEngineCircleCollider2D',
'CircleCollider2DArray': 'UnityEngineCircleCollider2DArray',
'CircleCollider2DRef': 'UnityEngineCircleCollider2DRef',
'Clipping': 'UnityEngineUIClipping',
'Collider': 'UnityEngineCollider',
'Collider2D': 'UnityEngineCollider2D',
'Collider2DArray': 'UnityEngineCollider2DArray',
'Collider2DRef': 'UnityEngineCollider2DRef',
'ColliderArray': 'UnityEngineColliderArray',
'ColliderDistance2D': 'UnityEngineColliderDistance2D',
'ColliderRef': 'UnityEngineColliderRef',
'Collision': 'UnityEngineCollision',
'Collision2D': 'UnityEngineCollision2D',
'Collision2DArray': 'UnityEngineCollision2DArray',
'Collision2DRef': 'UnityEngineCollision2DRef',
'CollisionArray': 'UnityEngineCollisionArray',
'CollisionDetectionMode': 'UnityEngineCollisionDetectionMode',
'CollisionDetectionMode2D': 'UnityEngineCollisionDetectionMode2D',
'CollisionModule': 'UnityEngineParticleSystemCollisionModule',
'CollisionModuleArray': 'UnityEngineParticleSystemCollisionModuleArray',
'CollisionRef': 'UnityEngineCollisionRef',
'Color': 'UnityEngineColor',
'Color32': 'UnityEngineColor32',
'Color32Array': 'UnityEngineColor32Array',
'ColorArray': 'UnityEngineColorArray',
'ColorBlock': 'UnityEngineUIColorBlock',
'ColorBlockArray': 'UnityEngineUIColorBlockArray',
'ColorBySpeedModule': 'UnityEngineParticleSystemColorBySpeedModule',
'ColorBySpeedModuleArray': 'UnityEngineParticleSystemColorBySpeedModuleArray',
'ColorOverLifetimeModule': 'UnityEngineParticleSystemColorOverLifetimeModule',
'ColorRef': 'UnityEngineColorRef',
'ColorSpace': 'UnityEngineColorSpace',
'CombineInstanceArray': 'UnityEngineCombineInstanceArray',
'CommandBuffer': 'UnityEngineRenderingCommandBuffer',
'CommandBufferArray': 'UnityEngineRenderingCommandBufferArray',
'CompareOptions': 'SystemGlobalizationCompareOptions',
'Compass': 'UnityEngineCompass',
'Component': 'UnityEngineComponent',
'ComponentArray': 'UnityEngineComponentArray',
'ComponentRef': 'UnityEngineComponentRef',
'CompositeCollider2D': 'UnityEngineCompositeCollider2D',
'CompositeCollider2DArray': 'UnityEngineCompositeCollider2DArray',
'CompositeCollider2DRef': 'UnityEngineCompositeCollider2DRef',
'ComputeBuffer': 'UnityEngineComputeBuffer',
'ComputeQueueType': 'UnityEngineRenderingComputeQueueType',
'ConfigurableJoint': 'UnityEngineConfigurableJoint',
'ConfigurableJointArray': 'UnityEngineConfigurableJointArray',
'ConfigurableJointMotion': 'UnityEngineConfigurableJointMotion',
'ConfigurableJointRef': 'UnityEngineConfigurableJointRef',
'ConstantForce': 'UnityEngineConstantForce',
'ConstantForce2D': 'UnityEngineConstantForce2D',
'ConstantForce2DArray': 'UnityEngineConstantForce2DArray',
'ConstantForce2DRef': 'UnityEngineConstantForce2DRef',
'ConstantForceArray': 'UnityEngineConstantForceArray',
'ConstantForceRef': 'UnityEngineConstantForceRef',
'Constraint': 'UnityEngineUIGridLayoutGroupConstraint',
'ConstraintSource': 'UnityEngineAnimationsConstraintSource',
'ConstraintSourceArray': 'UnityEngineAnimationsConstraintSourceArray',
'ContactFilter2D': 'UnityEngineContactFilter2D',
'ContactPoint': 'UnityEngineContactPoint',
'ContactPoint2D': 'UnityEngineContactPoint2D',
'ContactPoint2DArray': 'UnityEngineContactPoint2DArray',
'ContactPoint2DRef': 'UnityEngineContactPoint2DRef',
'ContactPointArray': 'UnityEngineContactPointArray',
'ContactPointRef': 'UnityEngineContactPointRef',
'ContentSizeFitter': 'UnityEngineUIContentSizeFitter',
'ContentSizeFitterArray': 'UnityEngineUIContentSizeFitterArray',
'ContentType': 'UnityEngineUIInputFieldContentType',
'Convert': 'SystemConvert',
'Corner': 'UnityEngineUIGridLayoutGroupCorner',
'Coroutine': 'UnityEngineCoroutine',
'Cubemap': 'UnityEngineCubemap',
'CullStateChangedEvent': 'UnityEngineUIMaskableGraphicCullStateChangedEvent',
'CultureInfo': 'SystemGlobalizationCultureInfo',
'CustomDataModule': 'UnityEngineParticleSystemCustomDataModule',
'CustomDataModuleArray': 'UnityEngineParticleSystemCustomDataModuleArray',
'DateTime': 'SystemDateTime',
'DateTimeArray': 'SystemDateTimeArray',
'DateTimeKind': 'SystemDateTimeKind',
'DateTimeOffset': 'SystemDateTimeOffset',
'DateTimeRef': 'SystemDateTimeRef',
'DateTimeStyles': 'SystemGlobalizationDateTimeStyles',
'DayOfWeek': 'SystemDayOfWeek',
'Debug': 'UnityEngineDebug',
'DebugArray': 'UnityEngineDebugArray',
'DebugRef': 'UnityEngineDebugRef',
'Decimal': 'SystemDecimal',
'DefaultControls': 'UnityEngineUIDefaultControls',
'DepthTextureMode': 'UnityEngineDepthTextureMode',
'DeviceOrientation': 'UnityEngineDeviceOrientation',
'Direction': 'UnityEngineUISliderDirection',
'DistanceJoint2D': 'UnityEngineDistanceJoint2D',
'DistanceJoint2DArray': 'UnityEngineDistanceJoint2DArray',
'DistanceJoint2DRef': 'UnityEngineDistanceJoint2DRef',
'Double': 'SystemDouble',
'DoubleArray': 'SystemDoubleArray',
'DoubleRef': 'SystemDoubleRef',
'Dropdown': 'UnityEngineUIDropdown',
'DropdownArray': 'UnityEngineUIDropdownArray',
'DropdownEvent': 'UnityEngineUIDropdownDropdownEvent',
'DropdownEventArray': 'UnityEngineUIDropdownDropdownEventArray',
'DurationUnit': 'UnityEngineDurationUnit',
'Edge': 'UnityEngineRectTransformEdge',
'EdgeCollider2D': 'UnityEngineEdgeCollider2D',
'EdgeCollider2DArray': 'UnityEngineEdgeCollider2DArray',
'EdgeCollider2DRef': 'UnityEngineEdgeCollider2DRef',
'Effector2D': 'UnityEngineEffector2D',
'Effector2DArray': 'UnityEngineEffector2DArray',
'Effector2DRef': 'UnityEngineEffector2DRef',
'EffectorForceMode2D': 'UnityEngineEffectorForceMode2D',
'EffectorSelection2D': 'UnityEngineEffectorSelection2D',
'EmissionModule': 'UnityEngineParticleSystemEmissionModule',
'EmissionModuleArray': 'UnityEngineParticleSystemEmissionModuleArray',
'EmitParams': 'UnityEngineParticleSystemEmitParams',
'EmitParamsArray': 'UnityEngineParticleSystemEmitParamsArray',
'Event': 'UnityEngineEvent',
'Exception': 'SystemException',
'ExperimentalAnimationsMuscleHandleRef': 'UnityEngineExperimentalAnimationsMuscleHandleRef',
'ExternalForcesModule': 'UnityEngineParticleSystemExternalForcesModule',
'ExternalForcesModuleArray': 'UnityEngineParticleSystemExternalForcesModuleArray',
'FFTWindow': 'UnityEngineFFTWindow',
'FillMethod': 'UnityEngineUIImageFillMethod',
'FitMode': 'UnityEngineUIContentSizeFitterFitMode',
'FixedJoint': 'UnityEngineFixedJoint',
'FixedJoint2D': 'UnityEngineFixedJoint2D',
'FixedJoint2DArray': 'UnityEngineFixedJoint2DArray',
'FixedJoint2DRef': 'UnityEngineFixedJoint2DRef',
'FixedJointArray': 'UnityEngineFixedJointArray',
'FixedJointRef': 'UnityEngineFixedJointRef',
'Flare': 'UnityEngineFlare',
'FogMode': 'UnityEngineFogMode',
'Font': 'UnityEngineFont',
'FontData': 'UnityEngineUIFontData',
'FontDataArray': 'UnityEngineUIFontDataArray',
'FontStyle': 'UnityEngineFontStyle',
'ForceMode': 'UnityEngineForceMode',
'ForceMode2D': 'UnityEngineForceMode2D',
'ForceOverLifetimeModule': 'UnityEngineParticleSystemForceOverLifetimeModule',
'ForceOverLifetimeModuleArray': 'UnityEngineParticleSystemForceOverLifetimeModuleArray',
'FrictionJoint2D': 'UnityEngineFrictionJoint2D',
'FrictionJoint2DArray': 'UnityEngineFrictionJoint2DArray',
'FrictionJoint2DRef': 'UnityEngineFrictionJoint2DRef',
'FrustumPlanes': 'UnityEngineFrustumPlanes',
'Func': 'SystemFuncUnityEngineUIILayoutElementSystemSingle',
'GameObject': 'UnityEngineGameObject',
'GameObjectArray': 'UnityEngineGameObjectArray',
'GameObjectRef': 'UnityEngineGameObjectRef',
'GateFitMode': 'UnityEngineCameraGateFitMode',
'GateFitParameters': 'UnityEngineCameraGateFitParameters',
'GenerationType': 'UnityEngineCompositeCollider2DGenerationType',
'GeometryType': 'UnityEngineCompositeCollider2DGeometryType',
'Gradient': 'UnityEngineGradient',
'Graphic': 'UnityEngineUIGraphic',
'GraphicArray': 'UnityEngineUIGraphicArray',
'GraphicRaycaster': 'UnityEngineUIGraphicRaycaster',
'GraphicRaycasterArray': 'UnityEngineUIGraphicRaycasterArray',
'GridLayoutGroup': 'UnityEngineUIGridLayoutGroup',
'GridLayoutGroupArray': 'UnityEngineUIGridLayoutGroupArray',
'Guid': 'SystemGuid',
'Gyroscope': 'UnityEngineGyroscope',
'HideFlags': 'UnityEngineHideFlags',
'HingeJoint': 'UnityEngineHingeJoint',
'HingeJoint2D': 'UnityEngineHingeJoint2D',
'HingeJoint2DArray': 'UnityEngineHingeJoint2DArray',
'HingeJoint2DRef': 'UnityEngineHingeJoint2DRef',
'HingeJointArray': 'UnityEngineHingeJointArray',
'HingeJointRef': 'UnityEngineHingeJointRef',
'HorizontalLayoutGroup': 'UnityEngineUIHorizontalLayoutGroup',
'HorizontalLayoutGroupArray': 'UnityEngineUIHorizontalLayoutGroupArray',
'HorizontalOrVerticalLayoutGroup': 'UnityEngineUIHorizontalOrVerticalLayoutGroup',
'HorizontalOrVerticalLayoutGroupArray': 'UnityEngineUIHorizontalOrVerticalLayoutGroupArray',
'HorizontalWrapMode': 'UnityEngineHorizontalWrapMode',
'HumanBodyBones': 'UnityEngineHumanBodyBones',
'HumanBone': 'UnityEngineHumanBone',
'HumanBoneArray': 'UnityEngineHumanBoneArray',
'HumanBoneRef': 'UnityEngineHumanBoneRef',
'HumanDescription': 'UnityEngineHumanDescription',
'HumanDescriptionArray': 'UnityEngineHumanDescriptionArray',
'HumanDescriptionRef': 'UnityEngineHumanDescriptionRef',
'HumanLimit': 'UnityEngineHumanLimit',
'HumanLimitArray': 'UnityEngineHumanLimitArray',
'HumanLimitRef': 'UnityEngineHumanLimitRef',
'HumanPartDof': 'UnityEngineHumanPartDof',
'HumanPose': 'UnityEngineHumanPose',
'HumanPoseArray': 'UnityEngineHumanPoseArray',
'HumanPoseHandler': 'UnityEngineHumanPoseHandler',
'HumanPoseHandlerArray': 'UnityEngineHumanPoseHandlerArray',
'HumanPoseHandlerRef': 'UnityEngineHumanPoseHandlerRef',
'HumanPoseRef': 'UnityEngineHumanPoseRef',
'HumanTrait': 'UnityEngineHumanTrait',
'HumanTraitArray': 'UnityEngineHumanTraitArray',
'HumanTraitRef': 'UnityEngineHumanTraitRef',
'IClippable': 'UnityEngineUIIClippable',
'IEnumerable': 'SystemCollectionsGenericIEnumerableVRCSDKBaseVRC_EventHandlerVrcEvent',
'IEnumerator': 'SystemCollectionsIEnumerator',
'IFormatProvider': 'SystemIFormatProvider',
'IList': 'SystemCollectionsGenericIListSystemCollectionsGenericKeyValuePairUnityEngineAnimationClipUnityEngineAnimationClip',
'ILogger': 'UnityEngineILogger',
'IMECompositionMode': 'UnityEngineIMECompositionMode',
'IUdonEventReceiver': 'VRCUdonCommonInterfacesIUdonEventReceiver',
'Image': 'UnityEngineUIImage',
'ImageArray': 'UnityEngineUIImageArray',
'IndexFormat': 'UnityEngineRenderingIndexFormat',
'InheritVelocityModule': 'UnityEngineParticleSystemInheritVelocityModule',
'InheritVelocityModuleArray': 'UnityEngineParticleSystemInheritVelocityModuleArray',
'Input': 'UnityEngineInput',
'InputArray': 'UnityEngineInputArray',
'InputField': 'UnityEngineUIInputField',
'InputFieldArray': 'UnityEngineUIInputFieldArray',
'InputManager': 'VRCSDKBaseInputManager',
'InputManagerArray': 'VRCSDKBaseInputManagerArray',
'InputRef': 'UnityEngineInputRef',
'InputType': 'UnityEngineUIInputFieldInputType',
'Int16': 'SystemInt16',
'Int16Array': 'SystemInt16Array',
'Int16Ref': 'SystemInt16Ref',
'Int32': 'SystemInt32',
'Int32Array': 'SystemInt32Array',
'Int32ArrayRef': 'SystemInt32ArrayRef',
'Int32Ref': 'SystemInt32Ref',
'Int64': 'SystemInt64',
'Int64Array': 'SystemInt64Array',
'Int64Ref': 'SystemInt64Ref',
'Joint': 'UnityEngineJoint',
'Joint2D': 'UnityEngineJoint2D',
'Joint2DArray': 'UnityEngineJoint2DArray',
'Joint2DRef': 'UnityEngineJoint2DRef',
'JointAngleLimits2D': 'UnityEngineJointAngleLimits2D',
'JointArray': 'UnityEngineJointArray',
'JointDrive': 'UnityEngineJointDrive',
'JointLimitState2D': 'UnityEngineJointLimitState2D',
'JointLimits': 'UnityEngineJointLimits',
'JointMotor': 'UnityEngineJointMotor',
'JointMotor2D': 'UnityEngineJointMotor2D',
'JointProjectionMode': 'UnityEngineJointProjectionMode',
'JointRef': 'UnityEngineJointRef',
'JointSpring': 'UnityEngineJointSpring',
'JointSuspension2D': 'UnityEngineJointSuspension2D',
'JointTranslationLimits2D': 'UnityEngineJointTranslationLimits2D',
'KeyCode': 'UnityEngineKeyCode',
'LayerMask': 'UnityEngineLayerMask',
'LayerMaskArray': 'UnityEngineLayerMaskArray',
'LayerMaskRef': 'UnityEngineLayerMaskRef',
'LayoutElement': 'UnityEngineUILayoutElement',
'LayoutElementArray': 'UnityEngineUILayoutElementArray',
'LayoutGroup': 'UnityEngineUILayoutGroup',
'LayoutGroupArray': 'UnityEngineUILayoutGroupArray',
'LayoutRebuilder': 'UnityEngineUILayoutRebuilder',
'LayoutRebuilderArray': 'UnityEngineUILayoutRebuilderArray',
'LayoutUtility': 'UnityEngineUILayoutUtility',
'Light': 'UnityEngineLight',
'LightArray': 'UnityEngineLightArray',
'LightBakingOutput': 'UnityEngineLightBakingOutput',
'LightEvent': 'UnityEngineRenderingLightEvent',
'LightProbeUsage': 'UnityEngineRenderingLightProbeUsage',
'LightRef': 'UnityEngineLightRef',
'LightRenderMode': 'UnityEngineLightRenderMode',
'LightShadowCasterMode': 'UnityEngineLightShadowCasterMode',
'LightShadowResolution': 'UnityEngineRenderingLightShadowResolution',
'LightShadows': 'UnityEngineLightShadows',
'LightType': 'UnityEngineLightType',
'LightsModule': 'UnityEngineParticleSystemLightsModule',
'LightsModuleArray': 'UnityEngineParticleSystemLightsModuleArray',
'LimitVelocityOverLifetimeModule': 'UnityEngineParticleSystemLimitVelocityOverLifetimeModule',
'LimitVelocityOverLifetimeModuleArray': 'UnityEngineParticleSystemLimitVelocityOverLifetimeModuleArray',
'LineAlignment': 'UnityEngineLineAlignment',
'LineRenderer': 'UnityEngineLineRenderer',
'LineRendererArray': 'UnityEngineLineRendererArray',
'LineRendererRef': 'UnityEngineLineRendererRef',
'LineTextureMode': 'UnityEngineLineTextureMode',
'LineType': 'UnityEngineUIInputFieldLineType',
'List': 'SystemCollectionsGenericListVRCSDKBaseVRCPlayerApi',
'LocationService': 'UnityEngineLocationService',
'LookAtConstraint': 'UnityEngineAnimationsLookAtConstraint',
'LookAtConstraintArray': 'UnityEngineAnimationsLookAtConstraintArray',
'MainModule': 'UnityEngineParticleSystemMainModule',
'MainModuleArray': 'UnityEngineParticleSystemMainModuleArray',
'Mask': 'UnityEngineUIMask',
'MaskArray': 'UnityEngineUIMaskArray',
'MaskUtilities': 'UnityEngineUIMaskUtilities',
'MaskUtilitiesArray': 'UnityEngineUIMaskUtilitiesArray',
'MaskableGraphic': 'UnityEngineUIMaskableGraphic',
'MaskableGraphicArray': 'UnityEngineUIMaskableGraphicArray',
'MatchTargetWeightMask': 'UnityEngineMatchTargetWeightMask',
'MatchTargetWeightMaskArray': 'UnityEngineMatchTargetWeightMaskArray',
'MatchTargetWeightMaskRef': 'UnityEngineMatchTargetWeightMaskRef',
'Material': 'UnityEngineMaterial',
'MaterialArray': 'UnityEngineMaterialArray',
'MaterialGlobalIlluminationFlags': 'UnityEngineMaterialGlobalIlluminationFlags',
'MaterialPropertyBlock': 'UnityEngineMaterialPropertyBlock',
'MaterialPropertyBlockArray': 'UnityEngineMaterialPropertyBlockArray',
'MaterialPropertyBlockRef': 'UnityEngineMaterialPropertyBlockRef',
'MaterialRef': 'UnityEngineMaterialRef',
'Mathf': 'UnityEngineMathf',
'MathfArray': 'UnityEngineMathfArray',
'MathfRef': 'UnityEngineMathfRef',
'Matrix4x4': 'UnityEngineMatrix4x4',
'Matrix4x4Array': 'UnityEngineMatrix4x4Array',
'Matrix4x4Ref': 'UnityEngineMatrix4x4Ref',
'Mesh': 'UnityEngineMesh',
'MeshArray': 'UnityEngineMeshArray',
'MeshCollider': 'UnityEngineMeshCollider',
'MeshColliderArray': 'UnityEngineMeshColliderArray',
'MeshColliderCookingOptions': 'UnityEngineMeshColliderCookingOptions',
'MeshColliderRef': 'UnityEngineMeshColliderRef',
'MeshFilter': 'UnityEngineMeshFilter',
'MeshFilterArray': 'UnityEngineMeshFilterArray',
'MeshFilterRef': 'UnityEngineMeshFilterRef',
'MeshRef': 'UnityEngineMeshRef',
'MeshRenderer': 'UnityEngineMeshRenderer',
'MeshRendererArray': 'UnityEngineMeshRendererArray',
'MeshRendererRef': 'UnityEngineMeshRendererRef',
'MeshTopology': 'UnityEngineMeshTopology',
'MinMaxCurve': 'UnityEngineParticleSystemMinMaxCurve',
'MinMaxCurveArray': 'UnityEngineParticleSystemMinMaxCurveArray',
'MinMaxGradient': 'UnityEngineParticleSystemMinMaxGradient',
'MinMaxGradientArray': 'UnityEngineParticleSystemMinMaxGradientArray',
'Mode': 'UnityEngineUINavigationMode',
'MonoOrStereoscopicEye': 'UnityEngineCameraMonoOrStereoscopicEye',
'Motion': 'UnityEngineMotion',
'MotionArray': 'UnityEngineMotionArray',
'MotionRef': 'UnityEngineMotionRef',
'MotionVectorGenerationMode': 'UnityEngineMotionVectorGenerationMode',
'MovementType': 'UnityEngineUIScrollRectMovementType',
'MuscleHandle': 'UnityEngineExperimentalAnimationsMuscleHandle',
'MuscleHandleArray': 'UnityEngineExperimentalAnimationsMuscleHandleArray',
'NavMesh': 'UnityEngineAINavMesh',
'NavMeshAgent': 'UnityEngineAINavMeshAgent',
'NavMeshAgentArray': 'UnityEngineAINavMeshAgentArray',
'NavMeshBuildSettings': 'UnityEngineAINavMeshBuildSettings',
'NavMeshData': 'UnityEngineAINavMeshData',
'NavMeshDataArray': 'UnityEngineAINavMeshDataArray',
'NavMeshDataInstance': 'UnityEngineAINavMeshDataInstance',
'NavMeshLinkData': 'UnityEngineAINavMeshLinkData',
'NavMeshLinkInstance': 'UnityEngineAINavMeshLinkInstance',
'NavMeshObstacle': 'UnityEngineAINavMeshObstacle',
'NavMeshObstacleArray': 'UnityEngineAINavMeshObstacleArray',
'NavMeshObstacleShape': 'UnityEngineAINavMeshObstacleShape',
'NavMeshPath': 'UnityEngineAINavMeshPath',
'NavMeshPathArray': 'UnityEngineAINavMeshPathArray',
'NavMeshPathStatus': 'UnityEngineAINavMeshPathStatus',
'NavMeshQueryFilter': 'UnityEngineAINavMeshQueryFilter',
'NavMeshTriangulation': 'UnityEngineAINavMeshTriangulation',
'Navigation': 'UnityEngineUINavigation',
'NavigationArray': 'UnityEngineUINavigationArray',
'NetworkEventTarget': 'VRCUdonCommonInterfacesNetworkEventTarget',
'Networking': 'VRCSDKBaseNetworking',
'NoiseModule': 'UnityEngineParticleSystemNoiseModule',
'NoiseModuleArray': 'UnityEngineParticleSystemNoiseModuleArray',
'NormalizationForm': 'SystemTextNormalizationForm',
'NumberStyles': 'SystemGlobalizationNumberStyles',
'Object': 'SystemObject',
'ObjectArray': 'SystemObjectArray',
'ObjectRef': 'UnityEngineObjectRef',
'ObstacleAvoidanceType': 'UnityEngineAIObstacleAvoidanceType',
'OffMeshLink': 'UnityEngineAIOffMeshLink',
'OffMeshLinkArray': 'UnityEngineAIOffMeshLinkArray',
'OffMeshLinkData': 'UnityEngineAIOffMeshLinkData',
'OnChangeEvent': 'UnityEngineUIInputFieldOnChangeEvent',
'OnChangeEventArray': 'UnityEngineUIInputFieldOnChangeEventArray',
'OnValidateInput': 'UnityEngineUIInputFieldOnValidateInput',
'OpaqueSortMode': 'UnityEngineRenderingOpaqueSortMode',
'OptionData': 'UnityEngineUIDropdownOptionData',
'OptionDataArray': 'UnityEngineUIDropdownOptionDataArray',
'OptionDataList': 'UnityEngineUIDropdownOptionDataList',
'OptionDataListArray': 'UnityEngineUIDropdownOptionDataListArray',
'Outline': 'UnityEngineUIOutline',
'OutlineArray': 'UnityEngineUIOutlineArray',
'PCMReaderCallback': 'UnityEngineAudioClipPCMReaderCallback',
'PCMSetPositionCallback': 'UnityEngineAudioClipPCMSetPositionCallback',
'ParentConstraint': 'UnityEngineAnimationsParentConstraint',
'ParentConstraintArray': 'UnityEngineAnimationsParentConstraintArray',
'Particle': 'UnityEngineParticleSystemParticle',
'ParticleArray': 'UnityEngineParticleSystemParticleArray',
'ParticleSystem': 'UnityEngineParticleSystem',
'ParticleSystemAnimationMode': 'UnityEngineParticleSystemAnimationMode',
'ParticleSystemAnimationTimeMode': 'UnityEngineParticleSystemAnimationTimeMode',
'ParticleSystemAnimationType': 'UnityEngineParticleSystemAnimationType',
'ParticleSystemArray': 'UnityEngineParticleSystemArray',
'ParticleSystemBurstRef': 'UnityEngineParticleSystemBurstRef',
'ParticleSystemCollisionMode': 'UnityEngineParticleSystemCollisionMode',
'ParticleSystemCollisionModuleRef': 'UnityEngineParticleSystemCollisionModuleRef',
'ParticleSystemCollisionQuality': 'UnityEngineParticleSystemCollisionQuality',
'ParticleSystemCollisionType': 'UnityEngineParticleSystemCollisionType',
'ParticleSystemColorBySpeedModuleRef': 'UnityEngineParticleSystemColorBySpeedModuleRef',
'ParticleSystemCullingMode': 'UnityEngineParticleSystemCullingMode',
'ParticleSystemCurveMode': 'UnityEngineParticleSystemCurveMode',
'ParticleSystemCustomData': 'UnityEngineParticleSystemCustomData',
'ParticleSystemCustomDataMode': 'UnityEngineParticleSystemCustomDataMode',
'ParticleSystemCustomDataModuleRef': 'UnityEngineParticleSystemCustomDataModuleRef',
'ParticleSystemEmissionModuleRef': 'UnityEngineParticleSystemEmissionModuleRef',
'ParticleSystemEmitParamsRef': 'UnityEngineParticleSystemEmitParamsRef',
'ParticleSystemEmitterVelocityMode': 'UnityEngineParticleSystemEmitterVelocityMode',
'ParticleSystemExternalForcesModuleRef': 'UnityEngineParticleSystemExternalForcesModuleRef',
'ParticleSystemForceField': 'UnityEngineParticleSystemForceField',
'ParticleSystemForceOverLifetimeModuleRef': 'UnityEngineParticleSystemForceOverLifetimeModuleRef',
'ParticleSystemGameObjectFilter': 'UnityEngineParticleSystemGameObjectFilter',
'ParticleSystemGradientMode': 'UnityEngineParticleSystemGradientMode',
'ParticleSystemInheritVelocityMode': 'UnityEngineParticleSystemInheritVelocityMode',
'ParticleSystemInheritVelocityModuleRef': 'UnityEngineParticleSystemInheritVelocityModuleRef',
'ParticleSystemLightsModuleRef': 'UnityEngineParticleSystemLightsModuleRef',
'ParticleSystemLimitVelocityOverLifetimeModuleRef': 'UnityEngineParticleSystemLimitVelocityOverLifetimeModuleRef',
'ParticleSystemMainModuleRef': 'UnityEngineParticleSystemMainModuleRef',
'ParticleSystemMeshShapeType': 'UnityEngineParticleSystemMeshShapeType',
'ParticleSystemMinMaxCurveRef': 'UnityEngineParticleSystemMinMaxCurveRef',
'ParticleSystemMinMaxGradientRef': 'UnityEngineParticleSystemMinMaxGradientRef',
'ParticleSystemNoiseModuleRef': 'UnityEngineParticleSystemNoiseModuleRef',
'ParticleSystemNoiseQuality': 'UnityEngineParticleSystemNoiseQuality',
'ParticleSystemOverlapAction': 'UnityEngineParticleSystemOverlapAction',
'ParticleSystemParticleRef': 'UnityEngineParticleSystemParticleRef',
'ParticleSystemRef': 'UnityEngineParticleSystemRef',
'ParticleSystemRingBufferMode': 'UnityEngineParticleSystemRingBufferMode',
'ParticleSystemRotationBySpeedModuleRef': 'UnityEngineParticleSystemRotationBySpeedModuleRef',
'ParticleSystemRotationOverLifetimeModuleRef': 'UnityEngineParticleSystemRotationOverLifetimeModuleRef',
'ParticleSystemScalingMode': 'UnityEngineParticleSystemScalingMode',
'ParticleSystemShapeModuleRef': 'UnityEngineParticleSystemShapeModuleRef',
'ParticleSystemShapeMultiModeValue': 'UnityEngineParticleSystemShapeMultiModeValue',
'ParticleSystemShapeTextureChannel': 'UnityEngineParticleSystemShapeTextureChannel',
'ParticleSystemShapeType': 'UnityEngineParticleSystemShapeType',
'ParticleSystemSimulationSpace': 'UnityEngineParticleSystemSimulationSpace',
'ParticleSystemSizeBySpeedModuleRef': 'UnityEngineParticleSystemSizeBySpeedModuleRef',
'ParticleSystemSizeOverLifetimeModuleRef': 'UnityEngineParticleSystemSizeOverLifetimeModuleRef',
'ParticleSystemStopAction': 'UnityEngineParticleSystemStopAction',
'ParticleSystemStopBehavior': 'UnityEngineParticleSystemStopBehavior',
'ParticleSystemSubEmitterProperties': 'UnityEngineParticleSystemSubEmitterProperties',
'ParticleSystemSubEmitterType': 'UnityEngineParticleSystemSubEmitterType',
'ParticleSystemSubEmittersModuleRef': 'UnityEngineParticleSystemSubEmittersModuleRef',
'ParticleSystemTextureSheetAnimationModuleRef': 'UnityEngineParticleSystemTextureSheetAnimationModuleRef',
'ParticleSystemTrailMode': 'UnityEngineParticleSystemTrailMode',
'ParticleSystemTrailModuleRef': 'UnityEngineParticleSystemTrailModuleRef',
'ParticleSystemTrailTextureMode': 'UnityEngineParticleSystemTrailTextureMode',
'ParticleSystemTriggerModuleRef': 'UnityEngineParticleSystemTriggerModuleRef',
'ParticleSystemVelocityOverLifetimeModuleRef': 'UnityEngineParticleSystemVelocityOverLifetimeModuleRef',
'PhysicMaterial': 'UnityEnginePhysicMaterial',
'PhysicMaterialArray': 'UnityEnginePhysicMaterialArray',
'PhysicMaterialCombine': 'UnityEnginePhysicMaterialCombine',
'PhysicMaterialRef': 'UnityEnginePhysicMaterialRef',
'Physics': 'UnityEnginePhysics',
'Physics2D': 'UnityEnginePhysics2D',
'Physics2DArray': 'UnityEnginePhysics2DArray',
'Physics2DRef': 'UnityEnginePhysics2DRef',
'PhysicsArray': 'UnityEnginePhysicsArray',
'PhysicsJobOptions2D': 'UnityEnginePhysicsJobOptions2D',
'PhysicsMaterial2D': 'UnityEnginePhysicsMaterial2D',
'PhysicsMaterial2DArray': 'UnityEnginePhysicsMaterial2DArray',
'PhysicsMaterial2DRef': 'UnityEnginePhysicsMaterial2DRef',
'PhysicsRef': 'UnityEnginePhysicsRef',
'PhysicsScene': 'UnityEnginePhysicsScene',
'PhysicsScene2D': 'UnityEnginePhysicsScene2D',
'PickupHand': 'VRCSDKBaseVRC_PickupPickupHand',
'Plane': 'UnityEnginePlane',
'PlatformEffector2D': 'UnityEnginePlatformEffector2D',
'PlatformEffector2DArray': 'UnityEnginePlatformEffector2DArray',
'PlatformEffector2DRef': 'UnityEnginePlatformEffector2DRef',
'PlayMode': 'UnityEnginePlayMode',
'PlayableGraph': 'UnityEnginePlayablesPlayableGraph',
'PointEffector2D': 'UnityEnginePointEffector2D',
'PointEffector2DArray': 'UnityEnginePointEffector2DArray',
'PointEffector2DRef': 'UnityEnginePointEffector2DRef',
'PointerEventData': 'UnityEngineEventSystemsPointerEventData',
'PolygonCollider2D': 'UnityEnginePolygonCollider2D',
'PolygonCollider2DArray': 'UnityEnginePolygonCollider2DArray',
'PolygonCollider2DRef': 'UnityEnginePolygonCollider2DRef',
'PositionAsUV1': 'UnityEngineUIPositionAsUV1',
'PositionAsUV1Array': 'UnityEngineUIPositionAsUV1Array',
'PositionConstraint': 'UnityEngineAnimationsPositionConstraint',
'PositionConstraintArray': 'UnityEngineAnimationsPositionConstraintArray',
'PrimitiveType': 'UnityEnginePrimitiveType',
'QualitySettings': 'UnityEngineQualitySettings',
'QualitySettingsArray': 'UnityEngineQualitySettingsArray',
'QualitySettingsRef': 'UnityEngineQualitySettingsRef',
'Quaternion': 'UnityEngineQuaternion',
'QuaternionArray': 'UnityEngineQuaternionArray',
'QuaternionRef': 'UnityEngineQuaternionRef',
'QueryTriggerInteraction': 'UnityEngineQueryTriggerInteraction',
'QueueMode': 'UnityEngineQueueMode',
'Random': 'UnityEngineRandom',
'RandomArray': 'UnityEngineRandomArray',
'RandomRef': 'UnityEngineRandomRef',
'RawImage': 'UnityEngineUIRawImage',
'RawImageArray': 'UnityEngineUIRawImageArray',
'Ray': 'UnityEngineRay',
'RayArray': 'UnityEngineRayArray',
'RayRef': 'UnityEngineRayRef',
'RaycastHit': 'UnityEngineRaycastHit',
'RaycastHit2D': 'UnityEngineRaycastHit2D',
'RaycastHit2DArray': 'UnityEngineRaycastHit2DArray',
'RaycastHit2DRef': 'UnityEngineRaycastHit2DRef',
'RaycastHitArray': 'UnityEngineRaycastHitArray',
'RaycastHitRef': 'UnityEngineRaycastHitRef',
'ReadOnlyCollection': 'SystemCollectionsObjectModelReadOnlyCollectionSystemTimeZoneInfo',
'ReapplyDrivenProperties': 'UnityEngineRectTransformReapplyDrivenProperties',
'Rect': 'UnityEngineRect',
'RectMask2D': 'UnityEngineUIRectMask2D',
'RectMask2DArray': 'UnityEngineUIRectMask2DArray',
'RectOffset': 'UnityEngineRectOffset',
'RectTransform': 'UnityEngineRectTransform',
'RectTransformArray': 'UnityEngineRectTransformArray',
'RectTransformRef': 'UnityEngineRectTransformRef',
'RelativeJoint2D': 'UnityEngineRelativeJoint2D',
'RelativeJoint2DArray': 'UnityEngineRelativeJoint2DArray',
'RelativeJoint2DRef': 'UnityEngineRelativeJoint2DRef',
'RenderBuffer': 'UnityEngineRenderBuffer',
'RenderBufferArray': 'UnityEngineRenderBufferArray',
'RenderMode': 'UnityEngineRenderMode',
'RenderSettings': 'UnityEngineRenderSettings',
'RenderSettingsArray': 'UnityEngineRenderSettingsArray',
'RenderSettingsRef': 'UnityEngineRenderSettingsRef',
'RenderTexture': 'UnityEngineRenderTexture',
'Renderer': 'UnityEngineRenderer',
'RendererArray': 'UnityEngineRendererArray',
'RendererExtensions': 'UnityEngineRendererExtensions',
'RendererRef': 'UnityEngineRendererRef',
'RenderingPath': 'UnityEngineRenderingPath',
'Resources': 'UnityEngineUIDefaultControlsResources',
'ResourcesArray': 'UnityEngineUIDefaultControlsResourcesArray',
'Rigidbody': 'UnityEngineRigidbody',
'Rigidbody2D': 'UnityEngineRigidbody2D',
'Rigidbody2DArray': 'UnityEngineRigidbody2DArray',
'Rigidbody2DRef': 'UnityEngineRigidbody2DRef',
'RigidbodyArray': 'UnityEngineRigidbodyArray',
'RigidbodyConstraints': 'UnityEngineRigidbodyConstraints',
'RigidbodyConstraints2D': 'UnityEngineRigidbodyConstraints2D',
'RigidbodyInterpolation': 'UnityEngineRigidbodyInterpolation',
'RigidbodyInterpolation2D': 'UnityEngineRigidbodyInterpolation2D',
'RigidbodyRef': 'UnityEngineRigidbodyRef',
'RigidbodySleepMode2D': 'UnityEngineRigidbodySleepMode2D',
'RigidbodyType2D': 'UnityEngineRigidbodyType2D',
'RotationBySpeedModule': 'UnityEngineParticleSystemRotationBySpeedModule',
'RotationBySpeedModuleArray': 'UnityEngineParticleSystemRotationBySpeedModuleArray',
'RotationConstraint': 'UnityEngineAnimationsRotationConstraint',
'RotationConstraintArray': 'UnityEngineAnimationsRotationConstraintArray',
'RotationDriveMode': 'UnityEngineRotationDriveMode',
'RotationOverLifetimeModule': 'UnityEngineParticleSystemRotationOverLifetimeModule',
'RotationOverLifetimeModuleArray': 'UnityEngineParticleSystemRotationOverLifetimeModuleArray',
'RuntimeAnimatorController': 'UnityEngineRuntimeAnimatorController',
'RuntimeAnimatorControllerArray': 'UnityEngineRuntimeAnimatorControllerArray',
'RuntimeAnimatorControllerRef': 'UnityEngineRuntimeAnimatorControllerRef',
'RuntimeTypeHandle': 'SystemRuntimeTypeHandle',
'SByte': 'SystemSByte',
'SByteArray': 'SystemSByteArray',
'SByteRef': 'SystemSByteRef',
'ScaleConstraint': 'UnityEngineAnimationsScaleConstraint',
'ScaleConstraintArray': 'UnityEngineAnimationsScaleConstraintArray',
'ScaleMode': 'UnityEngineUICanvasScalerScaleMode',
'Scene': 'UnityEngineSceneManagementScene',
'ScreenMatchMode': 'UnityEngineUICanvasScalerScreenMatchMode',
'ScriptableObject': 'UnityEngineScriptableObject',
'ScrollEvent': 'UnityEngineUIScrollbarScrollEvent',
'ScrollEventArray': 'UnityEngineUIScrollbarScrollEventArray',
'ScrollRect': 'UnityEngineUIScrollRect',
'ScrollRectArray': 'UnityEngineUIScrollRectArray',
'ScrollRectEvent': 'UnityEngineUIScrollRectScrollRectEvent',
'ScrollRectEventArray': 'UnityEngineUIScrollRectScrollRectEventArray',
'Scrollbar': 'UnityEngineUIScrollbar',
'ScrollbarArray': 'UnityEngineUIScrollbarArray',
'ScrollbarVisibility': 'UnityEngineUIScrollRectScrollbarVisibility',
'Selectable': 'UnityEngineUISelectable',
'SelectableArray': 'UnityEngineUISelectableArray',
'SendMessageOptions': 'UnityEngineSendMessageOptions',
'Shader': 'UnityEngineShader',
'Shadow': 'UnityEngineUIShadow',
'ShadowArray': 'UnityEngineUIShadowArray',
'ShadowCastingMode': 'UnityEngineRenderingShadowCastingMode',
'ShadowMapPass': 'UnityEngineRenderingShadowMapPass',
'ShadowProjection': 'UnityEngineShadowProjection',
'ShadowQuality': 'UnityEngineShadowQuality',
'ShadowResolution': 'UnityEngineShadowResolution',
'ShadowmaskMode': 'UnityEngineShadowmaskMode',
'ShapeModule': 'UnityEngineParticleSystemShapeModule',
'ShapeModuleArray': 'UnityEngineParticleSystemShapeModuleArray',
'Single': 'SystemSingle',
'SingleArray': 'SystemSingle[]',
'SingleRef': 'SystemSingleRef',
'SizeBySpeedModule': 'UnityEngineParticleSystemSizeBySpeedModule',
'SizeBySpeedModuleArray': 'UnityEngineParticleSystemSizeBySpeedModuleArray',
'SizeOverLifetimeModule': 'UnityEngineParticleSystemSizeOverLifetimeModule',
'SizeOverLifetimeModuleArray': 'UnityEngineParticleSystemSizeOverLifetimeModuleArray',
'SkeletonBone': 'UnityEngineSkeletonBone',
'SkeletonBoneArray': 'UnityEngineSkeletonBoneArray',
'SkeletonBoneRef': 'UnityEngineSkeletonBoneRef',
'SkinQuality': 'UnityEngineSkinQuality',
'SkinnedMeshRenderer': 'UnityEngineSkinnedMeshRenderer',
'SkinnedMeshRendererArray': 'UnityEngineSkinnedMeshRendererArray',
'SkinnedMeshRendererRef': 'UnityEngineSkinnedMeshRendererRef',
'Slider': 'UnityEngineUISlider',
'SliderArray': 'UnityEngineUISliderArray',
'SliderEvent': 'UnityEngineUISliderSliderEvent',
'SliderEventArray': 'UnityEngineUISliderSliderEventArray',
'SliderJoint2D': 'UnityEngineSliderJoint2D',
'SliderJoint2DArray': 'UnityEngineSliderJoint2DArray',
'SliderJoint2DRef': 'UnityEngineSliderJoint2DRef',
'SoftJointLimit': 'UnityEngineSoftJointLimit',
'SoftJointLimitSpring': 'UnityEngineSoftJointLimitSpring',
'Space': 'UnityEngineSpace',
'SpawnOrientation': 'VRCSDKBaseVRC_SceneDescriptorSpawnOrientation',
'SphereCollider': 'UnityEngineSphereCollider',
'SphereColliderArray': 'UnityEngineSphereColliderArray',
'SphereColliderRef': 'UnityEngineSphereColliderRef',
'SphericalHarmonicsL2': 'UnityEngineRenderingSphericalHarmonicsL2',
'SphericalHarmonicsL2Array': 'UnityEngineRenderingSphericalHarmonicsL2Array',
'SpringJoint': 'UnityEngineSpringJoint',
'SpringJointArray': 'UnityEngineSpringJointArray',
'SpringJointRef': 'UnityEngineSpringJointRef',
'Sprite': 'UnityEngineSprite',
'SpriteDrawMode': 'UnityEngineSpriteDrawMode',
'SpriteMaskInteraction': 'UnityEngineSpriteMaskInteraction',
'SpriteRenderer': 'UnityEngineSpriteRenderer',
'SpriteRendererArray': 'UnityEngineSpriteRendererArray',
'SpriteRendererRef': 'UnityEngineSpriteRendererRef',
'SpriteSortPoint': 'UnityEngineSpriteSortPoint',
'SpriteState': 'UnityEngineUISpriteState',
'SpriteStateArray': 'UnityEngineUISpriteStateArray',
'SpriteTileMode': 'UnityEngineSpriteTileMode',
'State': 'UnityEngineRandomState',
'StateMachineBehaviourArray': 'UnityEngineStateMachineBehaviourArray',
'StereoTargetEyeMask': 'UnityEngineStereoTargetEyeMask',
'StereoscopicEye': 'UnityEngineCameraStereoscopicEye',
'String': 'SystemString',
'StringArray': 'SystemStringArray',
'StringComparison': 'SystemStringComparison',
'StringRef': 'SystemStringRef',
'StringSplitOptions': 'SystemStringSplitOptions',
'StructLayoutAttribute': 'SystemRuntimeInteropServicesStructLayoutAttribute',
'SubEmittersModule': 'UnityEngineParticleSystemSubEmittersModule',
'SubEmittersModuleArray': 'UnityEngineParticleSystemSubEmittersModuleArray',
'SubmitEvent': 'UnityEngineUIInputFieldSubmitEvent',
'SubmitEventArray': 'UnityEngineUIInputFieldSubmitEventArray',
'SurfaceEffector2D': 'UnityEngineSurfaceEffector2D',
'SurfaceEffector2DArray': 'UnityEngineSurfaceEffector2DArray',
'SurfaceEffector2DRef': 'UnityEngineSurfaceEffector2DRef',
'SystemSingleArray': 'SystemSingleArray',
'TargetJoint2D': 'UnityEngineTargetJoint2D',
'TargetJoint2DArray': 'UnityEngineTargetJoint2DArray',
'TargetJoint2DRef': 'UnityEngineTargetJoint2DRef',
'Text': 'UnityEngineUIText',
'TextAnchor': 'UnityEngineTextAnchor',
'TextArray': 'UnityEngineUITextArray',
'TextAsset': 'UnityEngineTextAsset',
'TextAssetArray': 'UnityEngineTextAssetArray',
'TextAssetRef': 'UnityEngineTextAssetRef',
'TextGenerationSettings': 'UnityEngineTextGenerationSettings',
'TextGenerator': 'UnityEngineTextGenerator',
'Texture': 'UnityEngineTexture',
'Texture2D': 'UnityEngineTexture2D',
'TextureSheetAnimationModule': 'UnityEngineParticleSystemTextureSheetAnimationModule',
'TextureSheetAnimationModuleArray': 'UnityEngineParticleSystemTextureSheetAnimationModuleArray',
'Time': 'UnityEngineTime',
'TimeArray': 'UnityEngineTimeArray',
'TimeRef': 'UnityEngineTimeRef',
'TimeSpan': 'SystemTimeSpan',
'TimeSpanArray': 'SystemTimeSpanArray',
'TimeZoneInfo': 'SystemTimeZoneInfo',
'TimeZoneInfoArray': 'SystemTimeZoneInfoArray',
'TimeZoneInfoRef': 'SystemTimeZoneInfoRef',
'Toggle': 'UnityEngineUIToggle',
'ToggleArray': 'UnityEngineUIToggleArray',
'ToggleEvent': 'UnityEngineUIToggleToggleEvent',
'ToggleEventArray': 'UnityEngineUIToggleToggleEventArray',
'ToggleGroup': 'UnityEngineUIToggleGroup',
'ToggleGroupArray': 'UnityEngineUIToggleGroupArray',
'Touch': 'UnityEngineTouch',
'TouchArray': 'UnityEngineTouchArray',
'TouchScreenKeyboard': 'UnityEngineTouchScreenKeyboard',
'TouchScreenKeyboardType': 'UnityEngineTouchScreenKeyboardType',
'TrackingData': 'VRCSDKBaseVRCPlayerApiTrackingData',
'TrackingDataArray': 'VRCSDKBaseVRCPlayerApiTrackingDataArray',
'TrackingDataType': 'VRCSDKBaseVRCPlayerApiTrackingDataType',
'TrailModule': 'UnityEngineParticleSystemTrailModule',
'TrailModuleArray': 'UnityEngineParticleSystemTrailModuleArray',
'TrailRenderer': 'UnityEngineTrailRenderer',
'TrailRendererArray': 'UnityEngineTrailRendererArray',
'TrailRendererRef': 'UnityEngineTrailRendererRef',
'Transform': 'UnityEngineTransform',
'TransformArray': 'UnityEngineTransformArray',
'TransformRef': 'UnityEngineTransformRef',
'Transition': 'UnityEngineUISelectableTransition',
'TransparencySortMode': 'UnityEngineTransparencySortMode',
'TriggerModule': 'UnityEngineParticleSystemTriggerModule',
'TriggerModuleArray': 'UnityEngineParticleSystemTriggerModuleArray',
'Type': 'UnityEngineUIImageType',
'TypeArray': 'SystemTypeArray',
'TypeCode': 'SystemTypeCode',
'TypeRef': 'SystemTypeRef',
'UIAnimationTriggersRef': 'UnityEngineUIAnimationTriggersRef',
'UIAspectRatioFitterRef': 'UnityEngineUIAspectRatioFitterRef',
'UIBaseMeshEffectRef': 'UnityEngineUIBaseMeshEffectRef',
'UIButtonButtonClickedEventRef': 'UnityEngineUIButtonButtonClickedEventRef',
'UIButtonRef': 'UnityEngineUIButtonRef',
'UICanvasScalerRef': 'UnityEngineUICanvasScalerRef',
'UIColorBlockRef': 'UnityEngineUIColorBlockRef',
'UIContentSizeFitterRef': 'UnityEngineUIContentSizeFitterRef',
'UIDefaultControlsResourcesRef': 'UnityEngineUIDefaultControlsResourcesRef',
'UIDropdownDropdownEventRef': 'UnityEngineUIDropdownDropdownEventRef',
'UIDropdownOptionDataListRef': 'UnityEngineUIDropdownOptionDataListRef',
'UIDropdownOptionDataRef': 'UnityEngineUIDropdownOptionDataRef',
'UIDropdownRef': 'UnityEngineUIDropdownRef',
'UIFontDataRef': 'UnityEngineUIFontDataRef',
'UIGraphicRaycasterRef': 'UnityEngineUIGraphicRaycasterRef',
'UIGraphicRef': 'UnityEngineUIGraphicRef',
'UIGridLayoutGroupRef': 'UnityEngineUIGridLayoutGroupRef',
'UIHorizontalLayoutGroupRef': 'UnityEngineUIHorizontalLayoutGroupRef',
'UIHorizontalOrVerticalLayoutGroupRef': 'UnityEngineUIHorizontalOrVerticalLayoutGroupRef',
'UIILayoutElementRef': 'UnityEngineUIILayoutElementRef',
'UIImageRef': 'UnityEngineUIImageRef',
'UIInputFieldOnChangeEventRef': 'UnityEngineUIInputFieldOnChangeEventRef',
'UIInputFieldRef': 'UnityEngineUIInputFieldRef',
'UIInputFieldSubmitEventRef': 'UnityEngineUIInputFieldSubmitEventRef',
'UILayoutElementRef': 'UnityEngineUILayoutElementRef',
'UILayoutGroupRef': 'UnityEngineUILayoutGroupRef',
'UILayoutRebuilderRef': 'UnityEngineUILayoutRebuilderRef',
'UIMaskRef': 'UnityEngineUIMaskRef',
'UIMaskUtilitiesRef': 'UnityEngineUIMaskUtilitiesRef',
'UIMaskableGraphicRef': 'UnityEngineUIMaskableGraphicRef',
'UINavigationRef': 'UnityEngineUINavigationRef',
'UIOutlineRef': 'UnityEngineUIOutlineRef',
'UIPositionAsUV1Ref': 'UnityEngineUIPositionAsUV1Ref',
'UIRawImageRef': 'UnityEngineUIRawImageRef',
'UIRectMask2DRef': 'UnityEngineUIRectMask2DRef',
'UIScrollRectRef': 'UnityEngineUIScrollRectRef',
'UIScrollRectScrollRectEventRef': 'UnityEngineUIScrollRectScrollRectEventRef',
'UIScrollbarRef': 'UnityEngineUIScrollbarRef',
'UIScrollbarScrollEventRef': 'UnityEngineUIScrollbarScrollEventRef',
'UISelectableRef': 'UnityEngineUISelectableRef',
'UIShadowRef': 'UnityEngineUIShadowRef',
'UISliderRef': 'UnityEngineUISliderRef',
'UISliderSliderEventRef': 'UnityEngineUISliderSliderEventRef',
'UISpriteStateRef': 'UnityEngineUISpriteStateRef',
'UITextRef': 'UnityEngineUITextRef',
'UIToggleGroupRef': 'UnityEngineUIToggleGroupRef',
'UIToggleRef': 'UnityEngineUIToggleRef',
'UIToggleToggleEventRef': 'UnityEngineUIToggleToggleEventRef',
'UIVertex': 'UnityEngineUIVertex',
'UIVertexArray': 'UnityEngineUIVertexArray',
'UIVertexHelperRef': 'UnityEngineUIVertexHelperRef',
'UIVertexRef': 'UnityEngineUIVertexRef',
'UIVerticalLayoutGroupRef': 'UnityEngineUIVerticalLayoutGroupRef',
'UInt16': 'SystemUInt16',
'UInt16Array': 'SystemUInt16Array',
'UInt16Ref': 'SystemUInt16Ref',
'UInt32': 'SystemUInt32',
'UInt32Array': 'SystemUInt32Array',
'UInt32Ref': 'SystemUInt32Ref',
'UInt64': 'SystemUInt64',
'UInt64Array': 'SystemUInt64Array',
'UInt64Ref': 'SystemUInt64Ref',
'UVChannelFlags': 'UnityEngineRenderingUVChannelFlags',
'UnicodeCategory': 'SystemGlobalizationUnicodeCategory',
'Unit': 'UnityEngineUICanvasScalerUnit',
'UnityAction': 'UnityEngineEventsUnityActionUnityEngineVector2',
'UnityEngineObject': 'UnityEngineObject',
'UnityEngineObjectArray': 'UnityEngineObjectArray',
'UnityEventCallState': 'UnityEngineEventsUnityEventCallState',
'VRCAudioBank': 'VRCSDK3ComponentsVRCAudioBank',
'VRCAudioBankArray': 'VRCSDK3ComponentsVRCAudioBankArray',
'VRCAvatarPedestal': 'VRCSDK3ComponentsVRCAvatarPedestal',
'VRCAvatarPedestalArray': 'VRCSDK3ComponentsVRCAvatarPedestalArray',
'VRCCombatSystem': 'VRCSDK3ComponentsVRCCombatSystem',
'VRCCombatSystemArray': 'VRCSDK3ComponentsVRCCombatSystemArray',
'VRCInputMethod': 'VRCSDKBaseVRCInputMethod',
'VRCInputSetting': 'VRCSDKBaseVRCInputSetting',
'VRCObjectSync': 'VRCSDK3ComponentsVRCObjectSync',
'VRCObjectSyncArray': 'VRCSDK3ComponentsVRCObjectSyncArray',
'VRCPickup': 'VRCSDK3ComponentsVRCPickup',
'VRCPickupArray': 'VRCSDK3ComponentsVRCPickupArray',
'VRCPlayerApi': 'VRCSDKBaseVRCPlayerApi',
'VRCPlayerApiArray': 'VRCSDKBaseVRCPlayerApiArray',
'VRCPortalMarker': 'VRCSDK3ComponentsVRCPortalMarker',
'VRCPortalMarkerArray': 'VRCSDK3ComponentsVRCPortalMarkerArray',
'VRCSDK3ComponentsVRCAudioBankRef': 'VRCSDK3ComponentsVRCAudioBankRef',
'VRCSDK3ComponentsVRCAvatarPedestalRef': 'VRCSDK3ComponentsVRCAvatarPedestalRef',
'VRCSDK3ComponentsVRCCombatSystemRef': 'VRCSDK3ComponentsVRCCombatSystemRef',
'VRCSDK3ComponentsVRCObjectSyncRef': 'VRCSDK3ComponentsVRCObjectSyncRef',
'VRCSDK3ComponentsVRCPickupRef': 'VRCSDK3ComponentsVRCPickupRef',
'VRCSDK3ComponentsVRCPortalMarkerRef': 'VRCSDK3ComponentsVRCPortalMarkerRef',
'VRCSDK3ComponentsVRCStationApiRef': 'VRCSDK3ComponentsVRCStationApiRef',
'VRCSDK3ComponentsVRCStationInputRef': 'VRCSDK3ComponentsVRCStationInputRef',
'VRCSDK3ComponentsVRCStationRef': 'VRCSDK3ComponentsVRCStationRef',
'VRCSDKBaseInputManagerRef': 'VRCSDKBaseInputManagerRef',
'VRCSDKBaseVRCPlayerApiRef': 'VRCSDKBaseVRCPlayerApiRef',
'VRCSDKBaseVRCPlayerApiTrackingDataRef': 'VRCSDKBaseVRCPlayerApiTrackingDataRef',
'VRCStation': 'VRCSDK3ComponentsVRCStation',
'VRCStationApi': 'VRCSDK3ComponentsVRCStationApi',
'VRCStationApiArray': 'VRCSDK3ComponentsVRCStationApiArray',
'VRCStationArray': 'VRCSDK3ComponentsVRCStationArray',
'VRCStationInput': 'VRCSDK3ComponentsVRCStationInput',
'VRCStationInputArray': 'VRCSDK3ComponentsVRCStationInputArray',
'VRC_EventDispatcher': 'VRCSDKBaseVRC_EventDispatcher',
'VRC_EventHandler': 'VRCSDKBaseVRC_EventHandler',
'VRC_Pickup': 'VRCSDKBaseVRC_Pickup',
'Vector2': 'UnityEngineVector2',
'Vector2Array': 'UnityEngineVector2Array',
'Vector2Ref': 'UnityEngineVector2Ref',
'Vector3': 'UnityEngineVector3',
'Vector3Array': 'UnityEngineVector3Array',
'Vector3ArrayRef': 'UnityEngineVector3ArrayRef',
'Vector3Ref': 'UnityEngineVector3Ref',
'Vector4': 'UnityEngineVector4',
'Vector4Array': 'UnityEngineVector4Array',
'Vector4Ref': 'UnityEngineVector4Ref',
'VelocityOverLifetimeModule': 'UnityEngineParticleSystemVelocityOverLifetimeModule',
'VelocityOverLifetimeModuleArray': 'UnityEngineParticleSystemVelocityOverLifetimeModuleArray',
'VertexHelper': 'UnityEngineUIVertexHelper',
'VertexHelperArray': 'UnityEngineUIVertexHelperArray',
'VerticalLayoutGroup': 'UnityEngineUIVerticalLayoutGroup',
'VerticalLayoutGroupArray': 'UnityEngineUIVerticalLayoutGroupArray',
'VerticalWrapMode': 'UnityEngineVerticalWrapMode',
'VrcBroadcastType': 'VRCSDKBaseVRC_EventHandlerVrcBroadcastType',
'VrcTargetType': 'VRCSDKBaseVRC_EventHandlerVrcTargetType',
'WheelCollider': 'UnityEngineWheelCollider',
'WheelColliderArray': 'UnityEngineWheelColliderArray',
'WheelColliderRef': 'UnityEngineWheelColliderRef',
'WheelFrictionCurve': 'UnityEngineWheelFrictionCurve',
'WheelHitRef': 'UnityEngineWheelHitRef',
'WheelJoint2D': 'UnityEngineWheelJoint2D',
'WheelJoint2DArray': 'UnityEngineWheelJoint2DArray',
'WheelJoint2DRef': 'UnityEngineWheelJoint2DRef',
'WillRenderCanvases': 'UnityEngineCanvasWillRenderCanvases',
'WorldUpType': 'UnityEngineAnimationsAimConstraintWorldUpType',
'WrapMode': 'UnityEngineWrapMode',
}
|
11596701
|
import glob
import logging
import os
import queue
import tempfile
import threading
import typing
import zipfile
from enum import Enum
from multiprocessing.pool import ThreadPool
import pandas as pd
import quandl
def get_time_series(filters: typing.List[dict], threads=1, async=False, processor: typing.Callable = None):
"""
Get async data for a list of filters. Works only for the historical API
:param filters: a list of filters
:param threads: number of threads for data retrieval
:param async: if True, return queue. Otherwise, wait for the results
:param processor: process each result
:return Queue or pd.DataFrame with identifier, date set as multi index
"""
return __get_data(filters=filters, api_type=__APIType.TIME_SERIES, threads=threads, async=async, processor=processor)
def get_table(filters: typing.List[dict], threads=1, async=False, processor: typing.Callable = None):
"""
Get async data for a list of filters. Works only for the historical API
:param filters: a list of filters
:param threads: number of threads for data retrieval
:param async: if True, return queue. Otherwise, wait for the results
:param processor: process each result
:return Queue or pd.DataFrame with identifier, date set as multi index
"""
return __get_data(filters=filters, api_type=__APIType.TABLES, threads=threads, async=async, processor=processor)
class __APIType(Enum):
TIME_SERIES = 1
TABLES = 2
def __get_data(filters: typing.List[dict], api_type: __APIType, threads=1, async=False, processor: typing.Callable = None):
"""
Get async data for a list of filters using the tables or time series api
:param filters: a list of filters
:param api_type: whether to use time series or tables
:param threads: number of threads for data retrieval
:param async: if True, return queue. Otherwise, wait for the results
:param processor: process each result
:return Queue or pd.DataFrame with identifier, date set as multi index
"""
api_k = os.environ['QUANDL_API_KEY'] if 'QUANDL_API_KEY' in os.environ else None
q = queue.Queue(100)
global_counter = {'c': 0}
lock = threading.Lock()
no_data = set()
def mp_worker(f):
try:
data = None
if api_type == __APIType.TIME_SERIES:
data = quandl.get(**f, paginate=True, api_key=api_k)
if data is not None:
data = data.tz_localize('UTC', copy=False)
q.put((f['dataset'], processor(data, **f) if processor is not None else data))
elif api_type == __APIType.TABLES:
data = quandl.get_table(**f, paginate=True, api_key=api_k)
if data is not None:
q.put((f['datatable_code'], processor(data, **f) if processor is not None else data))
except Exception as err:
data = None
logging.getLogger(__name__).exception(err)
if data is None:
no_data.add(f)
with lock:
global_counter['c'] += 1
cnt = global_counter['c']
if cnt == len(filters):
q.put(None)
if cnt % 20 == 0 or cnt == len(filters):
logging.getLogger(__name__).info("Loaded " + str(cnt) + " queries")
if len(no_data) > 0:
no_data_list = list(no_data)
no_data_list.sort()
logging.getLogger(__name__).info("No data found for " + str(len(no_data_list)) + " datasets: " + str(no_data_list))
no_data.clear()
if threads > 1 and len(filters) > 1:
pool = ThreadPool(threads)
pool.map(mp_worker, (f for f in filters))
pool.close()
else:
for f in filters:
mp_worker(f)
if not async:
result = dict()
while True:
job = q.get()
if job is None:
break
if job[0] in result:
current = result[job[0]]
if isinstance(current, list):
current.append(job[1])
else:
result[job[0]] = [result[job[0]], job[1]]
else:
result[job[0]] = job[1]
return result
else:
return q
def bulkdownload(dataset: str, chunksize=None):
with tempfile.TemporaryDirectory() as td:
filename = os.path.join(td, dataset + '.zip')
logging.getLogger(__name__).info("Downloading dataset " + dataset + " to " + filename)
quandl.bulkdownload(dataset, filename=filename, api_key=os.environ['QUANDL_API_KEY'] if 'QUANDL_API_KEY' in os.environ else None)
zipfile.ZipFile(filename).extractall(td)
logging.getLogger(__name__).info("Done... Start yielding dataframes")
return pd.read_csv(glob.glob(os.path.join(td, '*.csv'))[0], header=None, chunksize=chunksize, parse_dates=[1])
def get_sf1(filters: typing.List[dict], threads=1, async=False):
"""
return core us fundamental data
:param filters: list of filters
:param threads: number of request threads
:param async: wait for the result or return a queue
:return:
"""
def _sf1_processor(df, dataset):
df.rename(columns={'Value': 'value'}, inplace=True)
df.index.rename('date', inplace=True)
df = df.tz_localize('UTC', copy=False)
df['symbol'], df['indicator'], df['dimension'] = dataset.split('/')[1].split('_')
df.set_index(['symbol', 'indicator', 'dimension'], drop=True, inplace=True, append=True)
return df
result = get_time_series(filters,
threads=threads,
async=async,
processor=_sf1_processor)
if not async and isinstance(result, list):
result = pd.concat(result)
result.sort_index(inplace=True, ascending=True)
return result
def bulkdownload_sf0():
df = bulkdownload(dataset='SF0', chunksize=None)
sid = df[0]
df.drop(0, axis=1, inplace=True)
df = pd.concat([df, sid.str.split('_', expand=True)], axis=1, copy=False)
df.columns = ['date', 'value', 'symbol', 'indicator', 'dimension']
df.set_index(['date', 'symbol', 'indicator', 'dimension'], drop=True, inplace=True, append=False)
return df
class QuandlEvents(object):
"""
Quandl requests via events
"""
def __init__(self, listeners):
self.listeners = listeners
self.listeners += self.listener
def listener(self, event):
if event['type'] == 'quandl_timeseries_request':
result = get_time_series(event['data'] if isinstance(event['data'], list) else event['data'],
threads=event['threads'] if 'threads' in event else 1,
async=event['async'] if 'async' in event else False)
self.listeners({'type': 'quandl_timeseries_result', 'data': result})
elif event['type'] == 'quandl_table_request':
result = get_table(event['data'] if isinstance(event['data'], list) else event['data'],
threads=event['threads'] if 'threads' in event else 1,
async=event['async'] if 'async' in event else False)
self.listeners({'type': 'quandl_table_result', 'data': result})
|
11596705
|
import os
import sys
import sphinx_rtd_theme
sys.path.insert(0,(os.path.abspath(os.path.join('..','snowshu'))))
sys.path.insert(0,(os.path.abspath('..')))
master_doc = 'index'
extensions = [
"sphinx_rtd_theme",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
# "sphinx_autodoc_typehints",
]
html_theme = "sphinx_rtd_theme"
project= "SnowShu"
|
11596727
|
from __future__ import division, print_function
import sys
import numpy as np
class PeriodicOptimizer(object):
def find_best_periods(self, model, n_periods=5, return_scores=False):
raise NotImplementedError()
def best_period(self, model):
periods = self.find_best_periods(model, n_periods=1,
return_scores=False)
return periods[0]
class LinearScanOptimizer(PeriodicOptimizer):
"""Optimizer based on a linear scan of candidate frequencies.
Parameters / Attributes
-----------------------
period_range : tuple
(min_period, max_period) for the linear scan
quiet : bool (default = False)
If true, then suppress printed output during optimization.
By default, information is printed to stdout.
first_pass_coverage : float (default = 5.0)
estimated number of points across the width of a typical peak for the
initial scan.
final_pass_coverage : float (default = 500.0)
estimated number of points across the width of a typical peak within
the final scan.
"""
def __init__(self, period_range=None, quiet=False,
first_pass_coverage=5, final_pass_coverage=500):
self._period_range = period_range
self.quiet = quiet
self.first_pass_coverage = first_pass_coverage
self.final_pass_coverage = final_pass_coverage
def set(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
@property
def period_range(self):
if self._period_range is None:
raise ValueError("period_range must be set in optimizer in order "
"to find the best periods. For example:\n"
" >>> model = LombScargle(fit_period=True)\n"
" >>> model.optimizer.period_range = (0.2, 1.0)")
return self._period_range
@period_range.setter
def period_range(self, value):
value = tuple(value)
assert len(value) == 2
self._period_range = value
def compute_grid_size(self, model):
# compute the estimated peak width from the data range
tmin, tmax = np.min(model.t), np.max(model.t)
width = 2 * np.pi / (tmax - tmin)
# our candidate steps in omega is controlled by period_range & coverage
omega_step = width / self.first_pass_coverage
omega_min = 2 * np.pi / np.max(self.period_range)
omega_max = 2 * np.pi / np.min(self.period_range)
N = (omega_max - omega_min) // omega_step
return N
def find_best_periods(self, model, n_periods=5, return_scores=False):
"""Find the `n_periods` best periods in the model"""
# compute the estimated peak width from the data range
tmin, tmax = np.min(model.t), np.max(model.t)
width = 2 * np.pi / (tmax - tmin)
# raise a ValueError if period limits are out of range
if tmax - tmin < np.max(self.period_range):
raise ValueError("The optimizer is not designed to search for "
"for periods larger than the data baseline. ")
# our candidate steps in omega is controlled by period_range & coverage
omega_step = width / self.first_pass_coverage
omega_min = 2 * np.pi / np.max(self.period_range)
omega_max = 2 * np.pi / np.min(self.period_range)
omegas = np.arange(omega_min, omega_max + omega_step, omega_step)
periods = 2 * np.pi / omegas
# print some updates if desired
if not self.quiet:
print("Finding optimal frequency:")
print(" - Estimated peak width = {0:.3g}".format(width))
print(" - Using {0} steps per peak; "
"omega_step = {1:.3g}".format(self.first_pass_coverage,
omega_step))
print(" - User-specified period range: "
" {0:.2g} to {1:.2g}".format(periods.min(), periods.max()))
print(" - Computing periods at {0:.0f} steps".format(len(periods)))
sys.stdout.flush()
# Compute the score on the initial grid
N = int(1 + width // omega_step)
score = model.score_frequency_grid(omega_min / (2 * np.pi),
omega_step / (2 * np.pi),
len(omegas))
# find initial candidates of unique peaks
minscore = score.min()
n_candidates = max(5, 2 * n_periods)
candidate_freqs = np.zeros(n_candidates)
candidate_scores = np.zeros(n_candidates)
for i in range(n_candidates):
j = np.argmax(score)
candidate_freqs[i] = omegas[j]
candidate_scores[i] = score[j]
score[max(0, j - N):(j + N)] = minscore
# If required, do a final pass on these unique at higher resolution
if self.final_pass_coverage <= self.first_pass_coverage:
best_periods = 2 * np.pi / candidate_freqs[:n_periods]
best_scores = candidate_scores[:n_periods]
else:
f0 = -omega_step / (2 * np.pi)
df = width / self.final_pass_coverage / (2 * np.pi)
Nf = abs(2 * f0) // df
steps = f0 + df * np.arange(Nf)
candidate_freqs /= (2 * np.pi)
freqs = steps + candidate_freqs[:, np.newaxis]
periods = 1. / freqs
if not self.quiet:
print("Zooming-in on {0} candidate peaks:"
"".format(n_candidates))
print(" - Computing periods at {0:.0f} "
"steps".format(periods.size))
sys.stdout.flush()
#scores = model.score(periods)
scores = np.array([model.score_frequency_grid(c + f0, df, Nf)
for c in candidate_freqs])
best_scores = scores.max(1)
j = np.argmax(scores, 1)
i = np.argsort(best_scores)[::-1]
best_periods = periods[i, j[i]]
best_scores = best_scores[i]
if return_scores:
return best_periods[:n_periods], best_scores[:n_periods]
else:
return best_periods[:n_periods]
|
11596733
|
import torch
import torch.nn as nn
from model.base_model import BaseModel
import network.hand_depth_net as depth_net
from network.projection import create_projection_net
import util.image as image
from util.joint import JointConverter
class Pix2DepthModel(BaseModel):
def setup(self, make_optimizer = True):
opt = self.opt
self.make_optimizer = make_optimizer
self.loss_names = []
self.network = depth_net.create_hdg_net(opt, self.gpu_ids)
self.joint_converter = JointConverter(opt.num_joints)
self.projection_net = create_projection_net(opt, self.gpu_ids)
if self.mode.is_train():
self.criterionL2 = nn.MSELoss()
self.heatmap_loss_weight = opt.heatmap_loss_weight
self.heatmap_interm_loss_weight = opt.heatmap_interm_loss_weight
self.joint_loss_weight = opt.joint_loss_weight
self.joint_interm_loss_weight = opt.joint_interm_loss_weight
self.depth_loss_weight = opt.depth_loss_weight
if make_optimizer:
# initialize optimizers
self.optimizer = torch.optim.Adam(self.network.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer)
else:
self.network.eval()
print("Pix2Depth network set to eval mode.")
self.check_and_load_pretrained()
self.run_interm = not self.mode.is_eval()
self.is_setup = True
def set_input(self, data):
pix = data['pix']
self.real_pix = pix.to(self.device)
self.real_pix = image.normalize_img(self.real_pix)
if 'depth' in data and (data['depth'] is not None):
self.real_depth = data['depth'].to(self.device)
self.real_depth = image.normalize_img(self.real_depth)
else:
self.real_depth = None
self.joint_true = None
self.heatmap_true = None
if self.run_interm:
joint_true = self.send_tensor_to_device(data['joint'])
normalized_joint = self.joint_converter.normalize(joint_true)
self.heatmap_true = self.projection_net(normalized_joint)
self.heatmap_true.requires_grad = False
self.joint_true = self.joint_converter.convert_for_training(joint_true)
def forward(self):
assert self.is_setup
result = self.network(self.real_pix)
self.fake_depth = result['fake']
if self.run_interm:
self.fake_interms = result['interms']
self.joint_interms = result['joint_interms']
self.heatmap_interms = result['heatmap_interms']
def optimize_parameters(self):
self.optimizer.zero_grad()
self.update_loss()
self.loss_total.backward()
self.optimizer.step()
def update_loss(self):
self.update_depth_loss()
self.update_depth_interm_loss()
self.update_joint_losses()
self.update_heatmap_losses()
self.loss_total = self.get_total_loss()
def get_total_loss(self):
total_G_loss = self.loss_depth + self.loss_depth_interm + self.loss_joint + self.loss_heatmap
return total_G_loss
def update_depth_loss(self):
self.loss_depth = 0
if self.real_depth is not None:
self.loss_depth = self.criterionL2(self.fake_depth, self.real_depth) * self.depth_loss_weight
self.add_loss_name('depth')
def update_depth_interm_loss(self):
self.loss_depth_interm = 0
if self.real_depth is None:
return
for i, interm in enumerate(self.fake_interms):
loss = self.criterionL2(interm, self.real_depth) * self.depth_loss_weight * 0.5
loss_name = "depth_interm_{}".format(i)
setattr(self, "loss_"+loss_name, loss)
self.add_loss_name(loss_name)
self.loss_depth_interm += loss
def update_joint_losses(self):
self.loss_joint = 0
for i, interm in enumerate(self.joint_interms):
interm_loss = self.criterionL2(interm, self.joint_true) * self.joint_loss_weight
loss_name = "joint_interm_{}".format(i)
setattr(self, 'loss_'+loss_name, interm_loss)
self.add_loss_name(loss_name)
self.loss_joint += interm_loss
def update_heatmap_losses(self):
self.loss_heatmap = 0
for i, interm in enumerate(self.heatmap_interms):
interm_loss = self.criterionL2(interm, self.heatmap_true) * self.heatmap_loss_weight
loss_name = "heatmap_interm{}".format(i)
setattr(self, 'loss_'+loss_name, interm_loss)
self.add_loss_name(loss_name)
self.loss_heatmap += interm_loss
def add_loss_name(self, loss_name):
if loss_name not in self.loss_names:
self.loss_names.append(loss_name)
def pack_as_checkpoint(self):
checkpoint = {}
checkpoint['network'] = self.extract_weights(self.network)
if self.make_optimizer:
checkpoint['optim'] = self.optimizer.state_dict()
return checkpoint
def add_loss_name(self, loss_name):
if loss_name not in self.loss_names:
self.loss_names.append(loss_name)
def load_from_checkpoint(self, checkpoint, model_only):
self.apply_weights(self.network, checkpoint['network'])
if not model_only:
self.optimizer.load_state_dict(checkpoint['optim'])
def get_fake(self):
return self.fake_depth
def get_current_results(self):
pix = image.unnormalize_as_img(self.real_pix)
fake_depth = image.unnormalize_as_img(self.fake_depth)
results = {'pix': pix, 'fake_depth': fake_depth}
if not self.mode.is_eval():
depth = image.unnormalize_as_img(self.real_depth)
results['depth'] = depth
results['heatmap_interms'] = self.heatmap_interms
return results
def get_grads(self):
grads = {}
for tag, param in self.network.named_parameters():
tag = "{}".format(tag)
grads[tag] = param.grad.data.detach().cpu()
return grads
def get_net_parameters(self):
return self.network.parameters()
|
11596759
|
from sofi.ui import FormGroup
def test_basic():
assert(str(FormGroup()) == "<div class=\"form-group\"></div>")
def test_text():
assert(str(FormGroup("text")) == "<div class=\"form-group\">text</div>")
def test_custom_class_ident_style_and_attrs():
assert(str(FormGroup("text", cl='abclass', ident='123', style="font-size:0.9em;", attrs={"data-test": 'abc'}))
== "<div id=\"123\" class=\"form-group abclass\" style=\"font-size:0.9em;\" data-test=\"abc\">text</div>")
|
11596760
|
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer, SpacegroupOperations, PointGroupAnalyzer
from pymatgen.util.coord import coord_list_mapping_pbc, coord_list_mapping
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Molecule, Structure
from bsym import SpaceGroup, SymmetryOperation, ConfigurationSpace, PointGroup
from copy import copy
from functools import partial
import numpy as np
def structure_cartesian_coordinates_mapping( structure, symmop ):
"""
Maps the coordinates of pymatgen ``Structure`` according to a ``SymmOp`` symmetry operation.
Args:
structure (``Structure``): The pymatgen ``Structure``.
symmop (``SymmOp``): The pymatgen symmetry operation object.
Returns
(np.array): The mapped Cartesian coordinates.
"""
return structure.lattice.get_cartesian_coords( symmop.operate_multi( structure.frac_coords ) )
def molecule_cartesian_coordinates_mapping( molecule, symmop ):
"""
Maps the coordinates of pymatgen ``Molecule`` according to a ``SymmOp`` symmetry operation.
Args:
molecule (``Structure``): The pymatgen ``Molecule``.
symmop (``SymmOp``): The pymatgen symmetry operation object.
Returns
(np.array): The mapped Cartesian coordinates.
"""
return symmop.operate_multi( molecule.cart_coords )
def structure_mapping_list( new_structure, mapping_structure, atol ):
"""
Gives the index mapping between two pymatgen ``Structure`` objects.
Args:
new_structure (``Structure``):
mapping_structure (``Structure``):
Returns:
list of indices such that mapping_structure.sites[indices] == new_structure.sites
"""
return coord_list_mapping_pbc( new_structure.frac_coords, mapping_structure.frac_coords, atol=atol )
def molecule_mapping_list( new_molecule, mapping_molecule, atol ):
"""
Gives the index mapping between two pymatgen ``Molecule`` objects.
Args:
new_structure (``Molecule``):
mapping_structure (``Molecule``):
Returns:
list of indices such that mapping_molecule.sites[indices] == new_molecule.sites
"""
return coord_list_mapping( new_molecule.cart_coords, mapping_molecule.cart_coords, atol=atol )
def unique_symmetry_operations_as_vectors_from_structure( structure, verbose=False, subset=None, atol=1e-5 ):
"""
Uses `pymatgen`_ symmetry analysis to find the minimum complete set of symmetry operations for the space group of a structure.
Args:
structure (pymatgen ``Structure``): structure to be analysed.
subset (Optional [list]): list of atom indices to be used for generating the symmetry operations.
atol (Optional [float]): tolerance factor for the ``pymatgen`` `coordinate mapping`_ under each symmetry operation.
Returns:
(list[list]): a list of lists, containing the symmetry operations as vector mappings.
.. _pymatgen:
http://pymatgen.org
.. _coordinate mapping:
http://pymatgen.org/pymatgen.util.coord_utils.html#pymatgen.util.coord_utils.coord_list_mapping_pbc
"""
if isinstance( structure, Structure ):
instantiate_structure = partial( Structure, lattice=structure.lattice, coords_are_cartesian=True )
coord_mapping = structure_cartesian_coordinates_mapping
mapping_list = structure_mapping_list
symmetry_analyzer = SpacegroupAnalyzer( structure )
if verbose:
print( "The space group for this structure is {}".format( symmetry_analyzer.get_space_group_symbol()) )
elif isinstance( structure, Molecule ):
instantiate_structure = Molecule
coord_mapping = molecule_cartesian_coordinates_mapping
mapping_list = molecule_mapping_list
symmetry_analyzer = PointGroupAnalyzer( structure, tolerance=atol )
if verbose:
print( "The point group for this structure is {}".format( symmetry_analyzer.get_pointgroup()) )
else:
raise ValueError( 'structure argument should be a Structure or Molecule object' )
symmetry_operations = symmetry_analyzer.get_symmetry_operations()
mappings = []
if subset:
species_subset = [ spec for i,spec in enumerate( structure.species ) if i in subset ]
cart_coords_subset = [ coord for i, coord in enumerate( structure.cart_coords ) if i in subset ]
mapping_structure = instantiate_structure( species=species_subset, coords=cart_coords_subset )
else:
mapping_structure = structure
for symmop in symmetry_operations:
cart_coords = coord_mapping( mapping_structure, symmop )
new_structure = instantiate_structure( species=mapping_structure.species, coords=cart_coords )
new_mapping = [ x+1 for x in list( mapping_list( new_structure, mapping_structure, atol ) ) ]
if new_mapping not in mappings:
mappings.append( new_mapping )
return mappings
def space_group_symbol_from_structure( structure ):
"""
Returns the symbol for the space group defined by this structure.
Args:
structure (pymatgen ``Structure``): The input structure.
Returns:
(str): The space group symbol.
"""
symmetry_analyzer = SpacegroupAnalyzer( structure )
symbol = symmetry_analyzer.get_space_group_symbol()
return symbol
def space_group_from_structure( structure, subset=None, atol=1e-5 ):
"""
Generates a ``SpaceGroup`` object from a `pymatgen` ``Structure``.
Args:
structure (pymatgen ``Structure``): structure to be used to define the :any:`SpaceGroup`.
subset (Optional [list]): list of atom indices to be used for generating the symmetry operations.
atol (Optional [float]): tolerance factor for the ``pymatgen`` `coordinate mapping`_ under each symmetry operation.
Returns:
a new :any:`SpaceGroup` instance
.. _coordinate mapping:
http://pymatgen.org/pymatgen.util.coord_utils.html#pymatgen.util.coord_utils.coord_list_mapping_pbc
"""
mappings = unique_symmetry_operations_as_vectors_from_structure( structure, subset=subset, atol=atol )
symmetry_operations = [ SymmetryOperation.from_vector( m ) for m in mappings ]
return SpaceGroup( symmetry_operations=symmetry_operations )
def point_group_from_molecule( molecule, subset=None, atol=1e-5 ):
"""
Generates a ``PointGroup`` object from a `pymatgen` ``Molecule``.
Args:
molecule (pymatgen ``Molecule``): molecule to be used to define the :any:`PointGroup`.
subset (Optional [list]): list of atom indices to be used for generating the symmetry operations.
atol (Optional [float]): tolerance factor for the ``pymatgen`` `coordinate mapping`_ under each symmetry operation.
Returns:
a new :any:`PointGroup` instance
.. _coordinate mapping:
http://pymatgen.org/pymatgen.util.coord_utils.html#pymatgen.util.coord_utils.coord_list_mapping
"""
molecule = Molecule( molecule.species, molecule.cart_coords - molecule.center_of_mass )
mappings = unique_symmetry_operations_as_vectors_from_structure( molecule, subset=subset, atol=atol )
symmetry_operations = [ SymmetryOperation.from_vector( m ) for m in mappings ]
return PointGroup( symmetry_operations=symmetry_operations )
def configuration_space_from_structure( structure, subset=None, atol=1e-5 ):
"""
Generate a ``ConfigurationSpace`` object from a `pymatgen` ``Structure``.
Args:
structure (pymatgen ``Structure``): structure to be used to define the :any:`ConfigurationSpace`.
subset (Optional [list]): list of atom indices to be used for generating the configuration space.
atol (Optional [float]): tolerance factor for the ``pymatgen`` `coordinate mapping`_ under each symmetry operation.
Returns:
a new :any:`ConfigurationSpace` instance.
.. _coordinate mapping:
http://pymatgen.org/pymatgen.util.coord_utils.html#pymatgen.util.coord_utils.coord_list_mapping_pbc
"""
space_group = space_group_from_structure( structure, subset=subset, atol=atol )
if subset is None:
subset = list( range( 1, len( structure )+1 ) )
config_space = ConfigurationSpace( objects=subset, symmetry_group=space_group )
return config_space
def configuration_space_from_molecule( molecule, subset=None, atol=1e-5 ):
"""
Generate a ``ConfigurationSpace`` object from a `pymatgen` ``Molecule``.
Args:
molecule (pymatgen ``Molecule``): molecule to be used to define the :any:`ConfigurationSpace`.
subset (Optional [list]): list of atom indices to be used for generating the configuration space.
atol (Optional [float]): tolerance factor for the ``pymatgen`` `coordinate mapping`_ under each symmetry operation.
Returns:
a new :any:`ConfigurationSpace` instance.
.. _coordinate mapping:
http://pymatgen.org/pymatgen.util.coord_utils.html#pymatgen.util.coord_utils.coord_list_mapping
"""
molecule = Molecule( molecule.species, molecule.cart_coords - molecule.center_of_mass )
point_group = point_group_from_molecule( molecule, subset=subset, atol=atol )
if subset is None:
subset = list( range( 1, len( molecule )+1 ) )
config_space = ConfigurationSpace( objects=subset, symmetry_group=point_group )
return config_space
def unique_structure_substitutions( structure, to_substitute, site_distribution, verbose=False, atol=1e-5, show_progress=False ):
"""
Generate all symmetry-unique structures formed by substituting a set of sites in a `pymatgen` structure.
Args:
structure (pymatgen.Structure): The parent structure.
to_substitute (str): atom label for the sites to be substituted.
site_distribution (dict): A dictionary that defines the number of each substituting element.
verbose (bool): verbose output.
atol (Optional [float]): tolerance factor for the ``pymatgen`` `coordinate mapping`_ under each symmetry operation. Default=1e-5.
show_progress (opt:default=False): Show a progress bar.
Setting to `True` gives a simple progress bar.
Setting to `"notebook"` gives a Jupyter notebook compatible progress bar.
Returns:
(list[Structure]): A list of Structure objects for each unique substitution.
Notes:
The number of symmetry-equivalent configurations for each structure
is stored in the `number_of_equivalent_configurations` attribute.
If the parent structure was previously generated using this function
(as part of a sequence of substitutions) the full configuration
degeneracy of each symmetry inequivalent configuration is stored in
the `full_configuration_degeneracy` attribute. If the parent structure
is a standard Pymatgen Structure object, `number_of_equivalent_configurations`
and `full_configuration_degeneracy` will be equal.
.. _coordinate mapping:
http://pymatgen.org/pymatgen.util.coord_utils.html#pymatgen.util.coord_utils.coord_list_mapping_pbc
"""
site_substitution_index = list( structure.indices_from_symbol( to_substitute ) )
if len( site_substitution_index ) != sum( site_distribution.values() ):
raise ValueError( "Number of sites from index does not match number from site distribution" )
if isinstance( structure, Structure ):
config_space = configuration_space_from_structure( structure, subset=site_substitution_index, atol=atol )
elif isinstance( structure, Molecule ):
structure = Molecule( structure.species, structure.cart_coords - structure.center_of_mass )
config_space = configuration_space_from_molecule( structure, subset=site_substitution_index, atol=atol )
else:
raise ValueError( "pymatgen Structure or Molecule object expected" )
numeric_site_distribution, numeric_site_mapping = parse_site_distribution( site_distribution )
unique_configurations = config_space.unique_configurations( numeric_site_distribution, verbose=verbose, show_progress=show_progress )
new_structures = [ new_structure_from_substitution( structure, site_substitution_index, [ numeric_site_mapping[k] for k in c.tolist() ] ) for c in unique_configurations ]
if hasattr( structure, 'number_of_equivalent_configurations' ):
for s, c in zip( new_structures, unique_configurations ):
s.number_of_equivalent_configurations = c.count
s.full_configuration_degeneracy = c.count * structure.full_configuration_degeneracy
else:
for s, c in zip( new_structures, unique_configurations ):
s.number_of_equivalent_configurations = c.count
s.full_configuration_degeneracy = c.count
return new_structures
def parse_site_distribution( site_distribution ):
"""
Converts a site distribution using species labels into one using integer labels.
Args:
site_distribution (dict): e.g. `{ 'Mg': 1, 'Li': 3 }`
Returns:
numeric_site_distribution ( dict): e.g. `{ 1:1, 0:3 }`
numeric_site_mapping (dict): e.g. `{ 0:'Mg', 1:'Li' }`
"""
numeric_site_distribution = {}
numeric_site_mapping = {}
for i,k in enumerate( site_distribution.keys() ):
numeric_site_distribution[i] = site_distribution[k]
numeric_site_mapping[i] = k
return numeric_site_distribution, numeric_site_mapping
def new_structure_from_substitution( parent_structure, site_substitution_index, new_species_list ):
"""
Generate a new pymatgen ``Structure`` from site substitution parameters.
Args:
parent_structure (Structure): The parent pymatgen ``Struture`` object.
site_substitution_index (list[int]): The list of site indices to be substituted.
new_species_list (list[str]): A list of the replacement atomic species.
Returns:
(``Structure``): The new pymatgen ``Structure``.
Notes:
pymatgen ``Structure`` and ``Molecule`` classes both subclass ``SiteCollection``.
This function will also accept a parent ``Molecule`` object, and return a new
``Molecule``.
"""
if len( site_substitution_index ) != len( new_species_list ):
raise ValueError
if any( i >= len( parent_structure ) for i in site_substitution_index ):
raise ValueError
s = parent_structure.copy()
for i, spec in zip( site_substitution_index, new_species_list ):
s[i] = spec
return s
|
11596806
|
from dataset.FlowInfer import FlowInfer
from models import LiteFlowNet
import math
from tqdm import tqdm
from torch.utils.data import DataLoader
import cvbase as cvb
import torch
import numpy as np
import sys
import os
import argparse
sys.path.append(os.path.abspath(os.path.join(__file__, '..', '..')))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--pretrained_model_liteflownet', type=str,
default='./pretrained_models/default.pth')
parser.add_argument('--img_size', type=list, default=(512, 1024, 3))
parser.add_argument('--rgb_max', type=float, default=255.)
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--data_list', type=str, default=None, help='Give the data list to extract flow')
parser.add_argument('--frame_dir', type=str, default=None,
help='Give the dir of the video frames and generate the data list to extract flow')
args = parser.parse_args()
return args
def estimate(model, tensorFirst, tensorSecond):
# print(tensorFirst.shape)
#assert(tensorFirst.size(1) == tensorSecond.size(1))
#assert(tensorFirst.size(2) == tensorSecond.size(2))
intWidth = tensorFirst.size(3)
intHeight = tensorFirst.size(2)
# assert(intWidth == 1024) # remember that there is no guarantee for correctness, comment this line out if you acknowledge this and want to continue
# assert(intHeight == 436) # remember that there is no guarantee for correctness, comment this line out if you acknowledge this and want to continue
#tensorPreprocessedFirst = tensorFirst.to(device).view(1, 3, intHeight, intWidth)
#tensorPreprocessedSecond = tensorSecond.to(device).view(1, 3, intHeight, intWidth)
intPreprocessedWidth = int(math.floor(math.ceil(1024 / 32.0) * 32.0))
intPreprocessedHeight = int(math.floor(math.ceil(436 / 32.0) * 32.0))
tensorPreprocessedFirst = torch.nn.functional.interpolate(input=tensorFirst, size=(intPreprocessedHeight, intPreprocessedWidth), mode='bilinear', align_corners=False)
tensorPreprocessedSecond = torch.nn.functional.interpolate(input=tensorSecond, size=(intPreprocessedHeight, intPreprocessedWidth), mode='bilinear', align_corners=False)
tensorFlow = torch.nn.functional.interpolate(input=model(tensorPreprocessedFirst, tensorPreprocessedSecond), size=(intHeight, intWidth), mode='bilinear', align_corners=False)
tensorFlow[:, 0, :, :] *= float(intWidth) / float(intPreprocessedWidth)
tensorFlow[:, 1, :, :] *= float(intHeight) / float(intPreprocessedHeight)
return tensorFlow
def infer(args):
assert args.data_list is not None or args.frame_dir is not None
if args.frame_dir is not None:
data_list = generate_flow_list(args.frame_dir)
args.data_list = data_list
device = torch.device('cuda:0')
print('====> Loading', args.pretrained_model_liteflownet)
Flownet = LiteFlowNet(args.pretrained_model_liteflownet)
Flownet.to(device)
Flownet.eval()
dataset_ = FlowInfer(args.data_list, size=args.img_size)
dataloader_ = DataLoader(dataset_, batch_size=1, shuffle=False, num_workers=0)
#task_bar = ProgressBar(dataset_.__len__())
with torch.no_grad():
for i, (f1, f2, output_path_) in tqdm(enumerate(dataloader_), total=len(dataset_)):
f1 = f1.to(device)
f2 = f2.to(device)
flow = estimate(Flownet, f1, f2)
output_path = output_path_[0]
output_file = os.path.dirname(output_path)
if not os.path.exists(output_file):
os.makedirs(output_file)
flow_numpy = flow[0].permute(1, 2, 0).data.cpu().numpy()
cvb.write_flow(flow_numpy, output_path)
sys.stdout.write('\n')
print('LiteFlowNet Inference has been finished~!')
print('Extracted Flow has been save in', output_file)
return output_file
def generate_flow_list(frame_dir):
dataset_root = os.path.dirname(frame_dir)
video_root = frame_dir
train_list = open(os.path.join(dataset_root, 'video.txt'), 'w')
flow_list = open(os.path.join(dataset_root, 'video_flow.txt'), 'w')
output_root = os.path.join(dataset_root, 'Flow')
img_total = 0
video_id = os.path.basename(frame_dir)
img_id_list = [x for x in os.listdir(video_root) if '.png' in x or '.jpg' in x]
img_id_list.sort()
img_num = len(img_id_list)
train_list.write(video_id)
train_list.write(' ')
train_list.write(str(img_num))
train_list.write('\n')
img_total += img_num
for i in range(img_num):
if i + 1 < img_num:
flow_list.write(os.path.join(video_root, img_id_list[i]))
flow_list.write(' ')
flow_list.write(os.path.join(video_root, img_id_list[i + 1]))
flow_list.write(' ')
flow_list.write(os.path.join(output_root, img_id_list[i][:-4] + '.flo'))
flow_list.write('\n')
if i - 1 >= 0:
flow_list.write(os.path.join(video_root, img_id_list[i]))
flow_list.write(' ')
flow_list.write(os.path.join(video_root, img_id_list[i - 1]))
flow_list.write(' ')
flow_list.write(os.path.join(output_root, img_id_list[i][:-4] + '.rflo'))
flow_list.write('\n')
print('This Video has', img_total, 'Images')
train_list.close()
flow_list.close()
print('The optical flow list has been generated:',
os.path.join(dataset_root, 'video_flow.txt'))
return os.path.join(dataset_root, 'video_flow.txt')
def main():
args = parse_args()
infer(args)
if __name__ == '__main__':
main()
|
11596817
|
from subprocess import check_call
import os
import os.path as op
import shutil as sh
import yaml
from nbclean import NotebookCleaner
import nbformat as nbf
from tqdm import tqdm
import numpy as np
from glob import glob
import argparse
DESCRIPTION = ("Convert a collection of Jupyter Notebooks into Jekyll "
"markdown suitable for a course textbook.")
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument("--site_root", default=None, help="Path to the root of the textbook repository.")
parser.add_argument("--overwrite", action='store_true', help="Overwrite md files if they already exist.")
parser.add_argument("--execute", action='store_true', help="Execute notebooks before converting to MD.")
parser.set_defaults(overwrite=False, execute=False)
def _markdown_to_files(path_markdown, indent=2):
"""Takes a markdown file containing chapters/sub-headings and
converts it to a file structure we can use to build a side bar."""
with open(path_markdown, 'r') as ff:
lines = ff.readlines()
files = []
for line in lines:
if line.strip().startswith('* '):
title = _between_symbols(line, '[', ']')
link = _between_symbols(line, '(', ')')
spaces = len(line) - len(line.lstrip(' '))
level = spaces / indent
files.append((title, link, level))
return files
def _prepare_link(link):
"""Prep the formatting for a link."""
link = _strip_suffixes(link)
link = link.lstrip('._')
link = link.replace(NOTEBOOKS_FOLDER_NAME + os.sep,
TEXTBOOK_FOLDER_NAME.lstrip('_') + os.sep)
return link
def _strip_suffixes(string, suffixes=None):
"""Remove suffixes so we can create links."""
suffixes = ['.ipynb', '.md'] if suffixes is None else suffixes
for suff in suffixes:
string = string.replace(suff, '')
return string
def _clean_notebook_cells(path_ntbk):
"""Clean up cell text of an nbformat NotebookNode."""
ntbk = nbf.read(path_ntbk, nbf.NO_CONVERT)
# Remove '#' from the end of markdown headers
for cell in ntbk.cells:
if cell.cell_type == "markdown":
cell_lines = cell.source.split('\n')
for ii, line in enumerate(cell_lines):
if line.startswith('#'):
cell_lines[ii] = line.rstrip('#').rstrip()
cell.source = '\n'.join(cell_lines)
nbf.write(ntbk, path_ntbk)
def _clean_lines(lines, filepath):
"""Replace images with jekyll image root and add escape chars as needed."""
inline_replace_chars = ['#']
for ii, line in enumerate(lines):
# Images: replace absolute nbconvert image paths to baseurl paths
path_rel_root = op.relpath(SITE_ROOT, op.dirname(filepath))
line = line.replace(IMAGES_FOLDER, op.join(path_rel_root, 'images'))
# Adding escape slashes since Jekyll removes them
# Make sure we have at least two dollar signs and they
# Aren't right next to each other
dollars = np.where(['$' == char for char in line])[0]
if len(dollars) > 2 and all(ii > 1 for ii in (dollars[1:] - dollars[:1])):
for char in inline_replace_chars:
line = line.replace('\\#', '\\\\#')
line = line.replace(' \\$', ' \\\\$')
lines[ii] = line
return lines
def _generate_sidebar(files):
"""Generate the sidebar text for the textbook, and add it to the textbook yaml."""
sidebar_text = []
sidebar_text.append({'title': 'Home', 'class': 'level_0', 'url': '/'})
chapter_ix = 1
for ix_file, (title, link, level) in list(enumerate(files)):
if level > 0 and len(link) == 0:
continue
if level == 0:
if site_yaml.get('number_chapters', False) is True:
title = '{}. {}'.format(chapter_ix, title)
chapter_ix += 1
new_link = _prepare_link(link)
new_item = {'title': title, "class": "level_{}".format(int(level)), 'url': new_link}
if level == 0:
if ix_file != (len(files) - 1) and level < files[ix_file + 1][-1]:
new_item['children'] = []
sidebar_text.append(new_item)
else:
sidebar_text[-1]['children'].append(new_item)
# Keep track of the URL for the first file in the textbook
if ix_file == 0:
textbook_yaml['first_chapter_url'] = new_link
textbook_yaml['chapters'] = sidebar_text
def _copy_non_content_files():
"""Copy non-markdown/notebook files in the notebooks/ folder into Chapters so relative links work."""
all_files = glob(op.join(NOTEBOOKS_FOLDER, '**', '*'), recursive=True)
non_content_files = [ii for ii in all_files if not any(ii.endswith(ext) for ext in ['.ipynb', '.md'])]
for ifile in non_content_files:
if op.isdir(ifile):
continue
# Convert the old link to the new path, note this may change folder name structure
old_link = ifile.split(NOTEBOOKS_FOLDER)[-1]
new_link = _prepare_link(old_link)
# The folder name may change if the permalink sanitizing changes it.
# this ensures that a new folder exists if needed
new_path = ifile.replace(NOTEBOOKS_FOLDER, TEXTBOOK_FOLDER).replace(old_link, new_link)
if not op.isdir(op.dirname(new_path)):
os.makedirs(op.dirname(new_path))
sh.copy2(ifile, new_path)
def _between_symbols(string, c1, c2):
"""Grab characters between symbols in a string.
Will return empty string if nothing is between c1 and c2."""
for char in [c1, c2]:
if char not in string:
raise ValueError("Couldn't find character {} in string {}".format(
char, string))
return string[string.index(c1)+1:string.index(c2)]
if __name__ == '__main__':
args = parser.parse_args()
overwrite = bool(args.overwrite)
execute = bool(args.execute)
if args.site_root is None:
args.site_root = op.join(op.dirname(op.abspath(__file__)), '..')
# Paths for our notebooks
SITE_ROOT = op.abspath(args.site_root)
SITE_TEXTBOOK = op.join(SITE_ROOT, '_data', 'textbook.yml')
CONFIG_FILE = op.join(SITE_ROOT, '_config.yml')
TEMPLATE_PATH = op.join(SITE_ROOT, 'assets', 'templates', 'jekyllmd.tpl')
TEXTBOOK_FOLDER_NAME = '_chapters'
NOTEBOOKS_FOLDER_NAME = 'notebooks'
TEXTBOOK_FOLDER = op.join(SITE_ROOT, TEXTBOOK_FOLDER_NAME)
NOTEBOOKS_FOLDER = op.join(SITE_ROOT, NOTEBOOKS_FOLDER_NAME)
IMAGES_FOLDER = op.join(SITE_ROOT, 'images')
MARKDOWN_FILE = op.join(SITE_ROOT, 'SUMMARY.md')
# Load the yaml for this site
with open(CONFIG_FILE, 'r') as ff:
site_yaml = yaml.load(ff.read())
# Load the textbook ymal for this site
if not op.exists(SITE_TEXTBOOK):
with open(SITE_TEXTBOOK, 'w') as ff:
pass
with open(SITE_TEXTBOOK, 'r') as ff:
textbook_yaml = yaml.load(ff.read())
textbook_yaml = {} if textbook_yaml is None else textbook_yaml
# --- Collect the files we'll convert over ---
files = _markdown_to_files(MARKDOWN_FILE)
# --- Loop through all ipynb/md files, convert to md as necessary and copy. ---
n_skipped_files = 0
n_built_files = 0
for ix_file, (title, link, level) in tqdm(list(enumerate(files))):
if len(link) == 0:
continue
if not op.exists(link):
raise ValueError("Could not find file {}.".format(link))
# Check new folder / file path
filename = op.basename(link)
new_folder = op.dirname(link).replace(NOTEBOOKS_FOLDER_NAME, TEXTBOOK_FOLDER_NAME)
new_file_path = op.join(new_folder, filename.replace('.ipynb', '.md'))
if overwrite is False and op.exists(new_file_path):
n_skipped_files += 1
continue
if not op.isdir(new_folder):
os.makedirs(new_folder)
# Collect previous/next md file for pagination
if ix_file == 0:
prev_page_link = ''
prev_file_title = ''
else:
prev_file_title, prev_page_link, _ = files[ix_file-1]
prev_page_link = _prepare_link(prev_page_link)
if ix_file == len(files) - 1:
next_page_link = ''
next_file_title = ''
else:
next_file_title, next_page_link, _ = files[ix_file+1]
next_page_link = _prepare_link(next_page_link)
# Convert notebooks or just copy md if no notebook.
if link.endswith('.ipynb'):
# Create a temporary version of the notebook we can modify
tmp_notebook = link + '_TMP'
sh.copy2(link, tmp_notebook)
# Clean up the file before converting
cleaner = NotebookCleaner(tmp_notebook)
cleaner.remove_cells(empty=True)
cleaner.remove_cells(search_text="# HIDDEN")
cleaner.clear('stderr')
cleaner.save(tmp_notebook)
_clean_notebook_cells(tmp_notebook)
# Run nbconvert moving it to the output folder
# This is the output directory for `.md` files
build_call = '--FilesWriter.build_directory={}'.format(new_folder)
# This is where images go - remove the _ so Jekyll will copy them over
images_call = '--NbConvertApp.output_files_dir={}'.format(
op.join(IMAGES_FOLDER, new_folder.lstrip('_')))
call = ['jupyter', 'nbconvert', '--log-level="CRITICAL"',
'--to', 'markdown', '--template', TEMPLATE_PATH,
images_call, build_call, tmp_notebook]
if execute is True:
call.insert(-1, '--execute')
check_call(call)
os.remove(tmp_notebook)
elif link.endswith('.md'):
# If a non-notebook file, just copy it over.
# If markdown we'll add frontmatter later
sh.copy2(link, new_file_path)
else:
raise ValueError("Files must end in ipynb or md")
# Extra slash to the inline math before `#` since Jekyll strips it
with open(new_file_path, 'r') as ff:
lines = ff.readlines()
lines = _clean_lines(lines, new_file_path)
# Front-matter YAML
yaml_fm = []
yaml_fm += ['---']
if link.endswith('.ipynb'):
yaml_fm += ['interact_link: {}'.format(link.lstrip('./'))]
yaml_fm += ["title: '{}'".format(title)]
yaml_fm += ["permalink: '{}'".format(_prepare_link(link))]
yaml_fm += ['previouschapter:']
yaml_fm += [' url: {}'.format(_prepare_link(prev_page_link).replace('"', "'"))]
yaml_fm += [" title: '{}'".format(prev_file_title)]
yaml_fm += ['nextchapter:']
yaml_fm += [' url: {}'.format(_prepare_link(next_page_link).replace('"', "'"))]
yaml_fm += [" title: '{}'".format(next_file_title)]
yaml_fm += ["redirect_from:"]
yaml_fm += [" - '{}'".format(_prepare_link(link).lower().replace('_', '-'))] # In case pre-existing links are sanitized
if ix_file == 0 and site_yaml.get('textbook_only') is True:
yaml_fm += [" - '/'"]
yaml_fm += ['---']
yaml_fm = [ii + '\n' for ii in yaml_fm]
lines = yaml_fm + lines
# Write the result
with open(new_file_path, 'w') as ff:
ff.writelines(lines)
n_built_files += 1
print("\n***\nGenerated {} new files\nSkipped {} already-built files".format(n_built_files, n_skipped_files))
if n_built_files == 0:
print("\nDelete the markdown files in '{}' for any pages that you wish to re-build.".format(TEXTBOOK_FOLDER_NAME))
print('***\n')
# Generate sidebar, replacing the old one if it exists
_generate_sidebar(files)
# Copy non-markdown files in notebooks/ in case they're referenced in the notebooks
print('Copying non-content files inside `notebooks/`...')
_copy_non_content_files()
# Update textbook yaml
print('Generating sidebar data...')
with open(SITE_TEXTBOOK, 'w') as ff:
yaml.dump(textbook_yaml, ff, default_flow_style=False)
with open(SITE_TEXTBOOK, 'r') as ff:
lines = '### PROGRAMATICALLY GENERATED, DO NOT MODIFY\n' + ff.read()
with open(SITE_TEXTBOOK, 'w') as ff:
ff.write(lines)
print('Done!')
|
11596838
|
import numpy as np
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern
import math
from sklearn.cluster import KMeans
from .base_predictor import BasePredictor
"""
Bayesian Learning optimizer
- We will use the Gaussian Prior by default with the Matern Kernel
"""
class BayesianLearning(BasePredictor):
def __init__(self, surrogate=None, alpha=None, domain_size=1000):
# initialzing some of the default values
# The default surrogate function is gaussian_process with matern kernel
if surrogate is None:
self.surrogate = GaussianProcessRegressor(kernel=Matern(nu=2.5),
n_restarts_optimizer=10,
# FIXME:check if we should be passing this
# random state
random_state=1,
normalize_y=True)
else:
self.surrogate = surrogate
# keep track of the iteration counts
self.iteration_count = 0
# The size of the exploration domain, default to 1000
self.domain_size = domain_size
self.alpha = alpha
def Upper_Confidence_Bound_Remove_Duplicates(self, X, X_Sample, batch_idx):
"""
Check if the returned index value is already present in X_Sample
"""
mu, sigma = self.surrogate.predict(X, return_std=True)
mu = mu.reshape(mu.shape[0], 1)
sigma = sigma.reshape(sigma.shape[0], 1)
# use fixed alpha if given
if self.alpha is not None:
alpha = self.alpha
else:
alpha_inter = self.domain_size * (self.iteration_count) * (self.iteration_count) * math.pi * math.pi / (
6 * 0.1)
if alpha_inter == 0:
raise ValueError('alpha_inter is zero in Upper_Confidence_Bound')
alpha = 2 * math.log(alpha_inter) # We have set delta = 0.1
alpha = math.sqrt(alpha)
if batch_idx == 0:
exploration_factor = alpha
else:
tolerance = 1e-6
sigma_inv_sq = 1.0 / (tolerance + (sigma * sigma)) # tolerance is used to avoid the divide by zero error
C = 8 / (np.log(1 + sigma_inv_sq))
beta = np.exp(2 * C) * alpha
beta = np.sqrt(beta)
exploration_factor = beta
Value = mu + exploration_factor * sigma
return self.remove_duplicates(X, X_Sample, mu, Value)
def Get_Upper_Confidence_Bound(self, X):
"""
Returns the acqutition function
"""
mu, sigma = self.surrogate.predict(X, return_std=True)
mu = mu.reshape(mu.shape[0], 1)
sigma = sigma.reshape(sigma.shape[0], 1)
if self.alpha is not None:
exploration_factor = self.alpha
else:
alpha_inter = self.domain_size * (self.iteration_count) * (self.iteration_count) * math.pi * math.pi / (
6 * 0.1)
if alpha_inter == 0:
raise ValueError('alpha_inter is zero in Upper_Confidence_Bound')
alpha = 2 * math.log(alpha_inter) # We have set delta = 0.1
alpha = math.sqrt(alpha)
exploration_factor = alpha
Value = mu + exploration_factor * sigma
return Value
"""
Returns the most optmal x along with mean value from the domain of x and making sure it is not a Duplicate (depending on closeness)
used in batch setting: As mean is also returned
"""
def remove_duplicates(self, X, X_Sample, mu, Value):
# print('*'*200)
v_sorting_index = np.argsort(-Value, axis=0)
index = 0
# go through all the values in X_Sample and check if anyvalue is close
# to the optimal x value, if yes, don't consider this optimal x value
while index < v_sorting_index.shape[0]:
x_optimal = X[v_sorting_index[index]]
# check if x_optimal is in X_Sample
check_closeness = self.closeness(x_optimal, X_Sample)
if check_closeness == False: # No close element to x_optimal in X_Sample
break
# we will look for next optimal value to try
else:
index = index + 1
# If entire domain is same to the already selected samples, we will just pick the best by value then
if (index == v_sorting_index.shape[0]):
index = 0
return X[v_sorting_index[index]], mu[v_sorting_index[index]]
"""
Returns the most optmal x only from the domain of x and making sure it is not a Duplicate (depending on closeness)
Intended for usage in serial and clustering setting: As no mean is also returned, and no hullicination is considered
"""
def remove_duplicates_serial(self, X, X_Sample, Value):
# print('*'*200)
v_sorting_index = np.argsort(-Value, axis=0)
index = 0
# go through all the values in X_Sample and check if anyvalue is close
# to the optimal x value, if yes, don't consider this optimal x value
while index < v_sorting_index.shape[0]:
x_optimal = X[v_sorting_index[index]]
# check if x_optimal is in X_Sample
check_closeness = self.closeness(x_optimal, X_Sample)
if check_closeness == False: # No close element to x_optimal in X_Sample
break
# we will look for next optimal value to try
else:
index = index + 1
# If entire domain is same to the already selected samples, we will just pick the best by value then
if (index == v_sorting_index.shape[0]):
index = 0
return X[v_sorting_index[index]]
def closeness(self, x_optimal, X_Sample):
# check if x_optimal is close to X_Sample
tolerance = 1e-3
for i in range(X_Sample.shape[0]):
diff = np.sum(np.absolute(X_Sample[i] - x_optimal))
if (diff < tolerance):
# print('Removed Duplicate')
return True
return False
"""
This is the main function which returns the next batch to try along with the mean values for this batch
"""
def get_next_batch(self, X, Y, X_tries, batch_size):
# print('In get_next_batch')
X_temp = X
Y_temp = Y
batch = []
for idx in range(batch_size):
self.iteration_count = self.iteration_count + 1
self.surrogate.fit(X_temp, Y_temp)
X_next, u_value = self.Upper_Confidence_Bound_Remove_Duplicates(X_tries, X_temp, idx)
u_value = u_value.reshape(-1, 1)
Y_temp = np.vstack((Y_temp, u_value))
X_temp = np.vstack((X_temp, X_next))
batch.append([X_next])
batch = np.array(batch)
batch = batch.reshape(-1, X.shape[1])
return batch
"""
Using clustering to select next batch
"""
def get_next_batch_clustering(self, X, Y, X_tries, batch_size):
# print('In get_next_batch')
X_temp = X
Y_temp = Y
self.surrogate.fit(X_temp, Y_temp)
self.iteration_count = self.iteration_count + 1
Acquition = self.Get_Upper_Confidence_Bound(X_tries)
if batch_size > 1:
gen = sorted(zip(Acquition, X_tries), key=lambda x: -x[0])
x_best_acq_value, x_best_acq_domain = (np.array(t)[:len(Acquition) // 4]
for t in zip(*gen))
# Do the domain space based clustering on the best points
kmeans = KMeans(n_clusters=batch_size, random_state=0).fit(x_best_acq_domain)
cluster_pred_domain = kmeans.labels_.reshape(kmeans.labels_.shape[0])
# partition the space into the cluster in X and select the best X from each space
partitioned_space = dict()
partitioned_acq = dict()
for i in range(batch_size):
partitioned_space[i] = []
partitioned_acq[i] = []
for i in range(x_best_acq_domain.shape[0]):
partitioned_space[cluster_pred_domain[i]].append(x_best_acq_domain[i])
partitioned_acq[cluster_pred_domain[i]].append(x_best_acq_value[i])
batch = []
for i in partitioned_space:
x_local = partitioned_space[i]
acq_local = partitioned_acq[i]
acq_local = np.array(acq_local)
x_index = np.argmax(acq_local)
x_final_selected = x_local[x_index]
batch.append([x_final_selected])
else: # batch_size ==1
batch = []
x_index = np.argmax(Acquition)
x_final_selected = self.remove_duplicates_serial(X_tries, X_temp, Acquition)
# x_final_selected = X_tries[x_index]
batch.append([x_final_selected])
batch = np.array(batch)
batch = batch.reshape(-1, X.shape[1])
return batch
#used by MetaTuner which also returns the surrogate function values
def remove_duplicates_MetaTuner(self, X, X_Sample, mu, Value, Value_ext):
# print('*'*200)
v_sorting_index = np.argsort(-Value, axis=0)
index = 0
# go through all the values in X_Sample and check if anyvalue is close
# to the optimal x value, if yes, don't consider this optimal x value
while index < v_sorting_index.shape[0]:
x_optimal = X[v_sorting_index[index]]
# check if x_optimal is in X_Sample
check_closeness = self.closeness(x_optimal, X_Sample)
if check_closeness == False: # No close element to x_optimal in X_Sample
break
# we will look for next optimal value to try
else:
index = index + 1
# If entire domain is same to the already selected samples, we will just pick the best by value then
if (index == v_sorting_index.shape[0]):
index = 0
return X[v_sorting_index[index]], Value[v_sorting_index[index]], Value_ext[v_sorting_index[index]], mu[v_sorting_index[index]]
"""
Used by MetaTuner by Exponentially scaling the exploration factor
"""
def Upper_Confidence_Bound_Remove_Duplicates_MetaTuner(self, X, X_Sample, batch_size, exploration_factor_tuner, Optimizer_iteration):
mu, sigma = self.surrogate.predict(X, return_std=True)
mu = mu.reshape(mu.shape[0], 1)
sigma = sigma.reshape(sigma.shape[0], 1)
#Optimizer_iteration: Number of times this function has been selected
alpha_inter = self.domain_size * (Optimizer_iteration) * (Optimizer_iteration) * math.pi * math.pi / (6 * 0.1)
if alpha_inter == 0:
print('Error: alpha_inter is zero in Upper_Confidence_Bound')
alpha = 2 * math.log(alpha_inter) # We have set delta = 0.1
alpha = math.sqrt(alpha)
if batch_size == 1:
exploration_factor = 2.0
else: #for both cases we don't modify exploration factor: ToDO see if this if optimal
exploration_factor = 2.0
#used for internal exploration
Value = mu + (exploration_factor * sigma)
#used for exploration among functions
Value_ext = mu + (exploration_factor_tuner * sigma)
return self.remove_duplicates_MetaTuner(X, X_Sample, mu, Value, Value_ext)
"""
Function used to select the next batch by MetaTuner
"""
def get_next_batch_MetaTuner(self, X, Y, X_tries, batch_size=3, exploration_factor =1.0, Optimizer_iteration=1.0, classifier_index=0, last_used_index=0):
# print('In get_next_batch')
X_temp = X
Y_temp = Y
batch = []
#value of the surrogate function
s_values = []
#value of the external surrogate functio
s_values_ext = []
#value used to do external exploration among functions
u_values = []
for i in range(batch_size):
# fit only if some modified
# if classifier_index==last_used_index:
# self.surrogate.fit(X_temp, Y_temp)
# print('Doing fit of surrogate:',classifier_index)
try:
self.surrogate.fit(X_temp, Y_temp)
except:
print('*'*100)
print(X_temp)
print('*'*10)
print(Y_temp)
#print('Doing fit of surrogate:',classifier_index)
X_next, s_value, s_value_ext, u_value = self.Upper_Confidence_Bound_Remove_Duplicates_MetaTuner(X_tries, X_temp, batch_size, exploration_factor, Optimizer_iteration)
Optimizer_iteration = Optimizer_iteration + 1
s_values.append(s_value[0][0])
s_values_ext.append(s_value_ext[0][0])
u_values.append(u_value[0][0])
u_value = u_value.reshape(-1, 1)
Y_temp = np.vstack((Y_temp, u_value))
X_temp = np.vstack((X_temp, X_next))
batch.append(X_next)
#batch = np.array(batch)
#s_value = np.array(s_value)
#batch = batch.reshape(-1, X.shape[1])
#s_value = s_value.reshape(-1, s_value.shape[0])
return batch, s_values, s_values_ext, u_values
"""
Get the predictions from the surrogate function
along with the variance
"""
def predict(self, X):
pred_y, sigma = self.surrogate.predict(X, return_std=True)
return pred_y, sigma
"""
fit the optimizer on the X and Y values
"""
def fit(self, X, Y):
self.surrogate.fit(X, Y)
|
11596839
|
from hippy.builtin import wrap, Optional
from rpython.rlib import rzlib
import sys
ZLIB_ENCODING_RAW = -15
ZLIB_ENCODING_GZIP = 31
ZLIB_ENCODING_DEFLATE = 15
ZLIB_ENCODING_ANY = 99
def _encode(data, level, encoding):
stream = rzlib.deflateInit(level=level, wbits=encoding)
bytes = rzlib.compress(stream, data)
bytes += rzlib.compress(stream, "", rzlib.Z_FINISH)
rzlib.deflateEnd(stream)
return bytes
def _decode(data, encoding):
stream = rzlib.inflateInit(wbits=encoding)
bytes, finished, unused = rzlib.decompress(stream, data,
rzlib.Z_FINISH)
rzlib.inflateEnd(stream)
return bytes
@wrap(['interp', str, Optional(int), Optional(int)])
def gzdeflate(interp, source, level=-1, encoding=ZLIB_ENCODING_RAW):
res = _encode(source, level, encoding)
return interp.space.wrap(res)
@wrap(['interp', str, Optional(int), Optional(int)])
def gzencode(interp, source, level=-1, encoding=ZLIB_ENCODING_GZIP):
res = _encode(source, level, encoding)
return interp.space.wrap(res)
@wrap(['interp', str, Optional(int), Optional(int)])
def gzcompress(interp, source, level=-1, encoding=ZLIB_ENCODING_DEFLATE):
res = _encode(source, level, encoding)
return interp.space.wrap(res)
@wrap(['interp', str, Optional(int)])
def gzinflate(interp, source, length=0):
res = _decode(source, ZLIB_ENCODING_RAW)
return interp.space.wrap(res)
@wrap(['interp', str, Optional(int)])
def gzdecode(interp, source, length=0):
res = _decode(source, ZLIB_ENCODING_GZIP)
return interp.space.wrap(res)
@wrap(['interp', str, Optional(int)])
def gzuncompress(interp, source, length=0):
res = _decode(source, ZLIB_ENCODING_DEFLATE)
return interp.space.wrap(res)
|
11596860
|
import io
class StringBuffer(object):
def __init__(self):
self.empty = True;
self._stringio = io.StringIO()
def __str__(self):
val = self._stringio.getvalue()
self._stringio.close()
return val
def append(self, obj):
data = unicode(obj)
if self.empty and len(data) > 0:
self.empty = False
self._stringio.write(unicode(obj))
return self
def isempty(self):
return self.empty
|
11596870
|
from copy import deepcopy
class MatchData:
"""Contains and collects metadata about a matching document.
A single instance of lunr.MatchData is returned as part of every
lunr.Index.Result.
"""
def __init__(self, term=None, field=None, metadata=None):
self.metadata = {}
if term is not None:
self.metadata[term] = {}
if field is not None:
self.metadata[term][field] = (
deepcopy(metadata) if metadata is not None else {}
)
def __repr__(self):
return '<MatchData "{}">'.format(",".join(sorted(self.metadata.keys())))
def combine(self, other):
"""An instance of lunr.MatchData will be created for every term that
matches a document.
However only one instance is required in a lunr.Index~Result. This
method combines metadata from another instance of MatchData with this
object's metadata.
"""
for term in other.metadata.keys():
if term not in self.metadata:
self.metadata[term] = {}
fields = other.metadata[term].keys()
for field in fields:
if field not in self.metadata[term]:
self.metadata[term][field] = {}
keys = other.metadata[term][field].keys()
for key in keys:
if key not in self.metadata[term][field]:
self.metadata[term][field][key] = other.metadata[term][field][
key
]
else:
self.metadata[term][field][key].extend(
other.metadata[term][field][key]
)
def add(self, term, field, metadata):
"""Add metadata for a term/field pair to this instance of match data"""
if term not in self.metadata:
self.metadata[term] = {field: metadata}
return
if field not in self.metadata[term]:
self.metadata[term][field] = metadata
return
for key in metadata.keys():
if key in self.metadata[term][field]:
self.metadata[term][field][key].extend(metadata[key])
else:
self.metadata[term][field][key] = metadata[key]
def __eq__(self, other):
return self.metadata == other.metadata
|
11596872
|
import wrapt
import sqlite3
from aws_xray_sdk.ext.dbapi2 import XRayTracedConn
def patch():
wrapt.wrap_function_wrapper(
'sqlite3',
'connect',
_xray_traced_connect
)
def _xray_traced_connect(wrapped, instance, args, kwargs):
conn = wrapped(*args, **kwargs)
meta = {}
meta['name'] = args[0]
meta['database_version'] = sqlite3.sqlite_version
traced_conn = XRayTracedSQLite(conn, meta)
return traced_conn
class XRayTracedSQLite(XRayTracedConn):
def execute(self, *args, **kwargs):
return self.cursor().execute(*args, **kwargs)
def executemany(self, *args, **kwargs):
return self.cursor().executemany(*args, **kwargs)
|
11596900
|
expected_output = {
"TenGigabitEthernet0/0/0.101": {
"service_policy": {
"input": {
"policy_name": {
"L3VPNin": {
"class_map": {
"IPP11111": {
"match_evaluation": "match-all",
"bandwidth_percent": 4,
"bandwidth_kbps": 536,
"packets": 253,
"bytes": 5656,
"rate": {
"interval": 300,
"offered_rate_bps": 0,
"drop_rate_bps": 0,
},
"match": ["ip precedence 6 7"],
"queueing": True,
"queue_limit_packets": "32",
"queue_depth": 98,
"total_drops": 666,
"no_buffer_drops": 0,
"pkts_output": 125,
"bytes_output": 253654,
}
}
}
}
}
}
}
}
|
11596968
|
from DeepSparseCoding.tf1x.params.base_params import BaseParams
TRAIN_ON_RECON = False
class params(BaseParams):
def __init__(self):
"""
Additional modifiable parameters:
rectify_a [bool] If set, rectify layer 1 activity
norm_weights [bool] If set, l2 normalize weights after updates
batch_size [int] Number of images in a training batch
num_neurons [int] Number of LCA neurons
num_steps [int] Number of inference steps
dt [float] Discrete global time constant
tau [float] LCA time constant
thresh_type [str] "hard" or "soft" - LCA threshold function specification
"""
super(params, self).__init__()
self.model_type = "mlp_lca_subspace"
self.train_on_recon = TRAIN_ON_RECON # if False, train on activations
if(self.train_on_recon):
self.model_name = "mlp_lca_subspace_recon"
else:
self.model_name = "mlp_lca_subspace_latent_1024"
self.version = "0.0"
self.vectorize_data = True
self.norm_data = False
self.rescale_data = False
self.center_data = False
self.standardize_data = False
self.tf_standardize_data = False
self.contrast_normalize = False
self.whiten_data = True
self.whiten_method = "FT"
self.lpf_data = False # FT whitening already does LPF
self.lpf_cutoff = 0.7
self.extract_patches = True
self.num_patches = 1e6
self.patch_edge_size = 16
self.overlapping_patches = True
self.randomize_patches = True
self.patch_variance_threshold = 0.0
self.batch_size = 100
# LCA Params
self.lca_conv = False
self.num_neurons = 768
self.num_groups = 192
self.num_steps = 50
self.dt = 0.001
self.tau = 0.03
self.rectify_a = True
self.norm_weights = True
self.thresh_type = "soft"
self.optimizer = "sgd"
# MLP Params
self.train_on_recon = TRAIN_ON_RECON # if False, train on LCA latent activations
self.num_val = 10000
self.num_labeled = 50000
self.num_classes = 10
self.mlp_layer_types = ["fc", "fc", "fc"]
self.mlp_output_channels = [300, 500, self.num_classes]
self.mlp_patch_size = []
self.mlp_conv_strides = []
self.batch_norm = [None, None, None]
self.mlp_dropout = [1.0, 1.0, 1.0]
self.max_pool = [False, False, False]
self.max_pool_ksize = [None, None, None]
self.max_pool_strides = [None, None, None]
self.lrn = [None]*len(self.mlp_output_channels)
self.mlp_decay_mult = 0
self.mlp_norm_mult = 1e-4
#Adversarial params
self.adversarial_num_steps = 40
self.adversarial_attack_method = "kurakin_untargeted"
self.adversarial_step_size = 0.01
self.adversarial_max_change = 0.3
# DEPRECATE self.adversarial_target_method = "random" #Not used if attack_method is untargeted
self.adversarial_clip = True
#TODO get these params from other params
self.adversarial_clip_range = [0.0, 1.0]
#Tradeoff in carlini attack between input pert and target
self.carlini_recon_mult = 1
# Others
self.cp_int = 10000
self.val_on_cp = True
self.eval_batch_size = 100
self.max_cp_to_keep = 1
self.cp_load = True
self.cp_load_name = "lca_subspace_1024_mnist"
self.cp_load_step = None # latest checkpoint
self.cp_load_ver = "0.0"
self.cp_load_var = ["lca_subspace/weights/w:0"]
self.log_int = 100
self.log_to_file = True
self.gen_plot_int = 5e3
self.save_plots = True
self.schedule = [
{"weights": None,
"train_lca": False,
"train_on_adversarial": False,
"num_batches": int(1e4),
"sparse_mult": 0.01,
"weight_lr": 0.01,
"decay_steps": int(1e4*0.8),
"decay_rate": 0.8,
"staircase": True},
]
def set_data_params(self, data_type):
self.data_type = data_type
if data_type.lower() == "mnist":
self.model_name += "_mnist"
self.vectorize_data = True
self.rescale_data = True
self.center_data = False
self.whiten_data = False
self.extract_patches = False
self.cp_int = 1e4
self.gen_plot_int = 1e4
# LCA params
self.lca_conv = False
self.num_neurons = 512
self.num_groups = 128
if self.train_on_recon:
self.full_data_shape = [28, 28, 1]
self.num_classes = 10
self.optimizer = "adam"
self.mlp_layer_types = ["conv", "conv", "fc", "fc"]
self.mlp_activation_functions = ["relu", "relu", "relu", "identity"]
self.mlp_output_channels = [32, 64, 1024, self.num_classes]
self.mlp_patch_size = [(5, 5), (5, 5)]
self.mlp_conv_strides = [(1,1,1,1), (1,1,1,1)]
self.batch_norm = [None, None, None, None]
self.mlp_dropout = [1.0, 1.0, 0.4, 1.0]
self.max_pool = [True, True, False, False]
self.max_pool_ksize = [(1,2,2,1), (1,2,2,1), None, None]
self.max_pool_strides = [(1,2,2,1), (1,2,2,1), None, None]
self.lrn = [None]*len(self.mlp_output_channels)
# NOTE schedule index will change if lca training is happening
self.schedule[-1]["num_batches"] = int(1e5)
for sched_idx in range(len(self.schedule)):
self.schedule[sched_idx]["weights"] = [
"mlp/layer0/conv_w_0:0",
"mlp/layer0/conv_b_0:0",
"mlp/layer1/conv_w_1:0",
"mlp/layer1/conv_b_1:0",
"mlp/layer2/fc_w_2:0",
"mlp/layer2/fc_b_2:0",
"mlp/layer3/fc_w_3:0",
"mlp/layer3/fc_b_3:0"]
self.schedule[sched_idx]["train_on_adversarial"] = False
self.schedule[sched_idx]["group_orth_mult"] = 0.04
self.schedule[sched_idx]["sparse_mult"] = 0.45
self.schedule[sched_idx]["weight_lr"] = 1e-4
self.schedule[sched_idx]["decay_steps"] = int(0.5*self.schedule[sched_idx]["num_batches"])
self.schedule[sched_idx]["decay_rate"] = 0.9
else: #96.34 val 98.00 train
self.mlp_output_channels = [128, self.num_classes]
self.mlp_layer_types = ["fc", "fc"]
self.mlp_activation_functions = ["lrelu", "identity"]
self.optimizer = "adam"
self.mlp_patch_size = []
self.mlp_conv_strides = []
# TODO: tune decay & norm mults
self.mlp_decay_mult = 0
self.mlp_norm_mult = 1e-4
self.batch_norm = [None]*len(self.mlp_output_channels)
self.mlp_dropout = [0.8, 1.0]
self.max_pool = [False, False]
self.max_pool_ksize = [None, None]
self.max_pool_strides = [None, None]
self.lrn = [None]*len(self.mlp_output_channels)
for sched_idx in range(len(self.schedule)):
self.schedule[sched_idx]["weights"] = [
"mlp/layer0/fc_w_0:0",
"mlp/layer0/fc_b_0:0",
"mlp/layer1/fc_w_1:0",
"mlp/layer1/fc_b_1:0"]
self.schedule[sched_idx]["train_on_adversarial"] = False
self.schedule[sched_idx]["num_batches"] = int(2e5)
self.schedule[sched_idx]["group_orth_mult"] = 0.04
self.schedule[sched_idx]["sparse_mult"] = 0.45
self.schedule[sched_idx]["weight_lr"] = 1e-5
self.schedule[sched_idx]["decay_steps"] = int(0.4*self.schedule[sched_idx]["num_batches"])
self.schedule[sched_idx]["decay_rate"] = 0.9
elif data_type.lower() == "synthetic":
self.model_name += "_synthetic"
self.epoch_size = 1000
self.dist_type = "gaussian"
self.num_edge_pixels = 16
self.vectorize_data = True
self.rescale_data = True
self.whiten_data = False
self.extract_patches = False
self.num_neurons = 768
self.num_groups = 192
self.train_on_recon = True # if False, train on activations
self.num_classes = 2
self.mlp_output_channels = [128, 64, self.num_classes]
self.mlp_activation_functions = ["lrelu"]*len(self.mlp_output_channels)
self.lrn = [None]*len(self.mlp_output_channels)
for sched_idx in range(len(self.schedule)):
self.schedule[sched_idx]["group_orth_mult"] = 0.04
self.schedule[sched_idx]["sparse_mult"] = 0.21
self.schedule[sched_idx]["weight_lr"] = 0.1
self.schedule[sched_idx]["num_batches"] = int(1e5)
self.schedule[sched_idx]["decay_steps"] = int(0.8*self.schedule[sched_idx]["num_batches"])
else:
assert False, ("Data type "+data_type+" is not supported.")
def set_test_params(self, data_type):
self.set_data_params(data_type)
self.epoch_size = 50
self.batch_size = 10
self.num_edge_pixels = 8
self.cp_load = False
for sched_idx in range(len(self.schedule)):
self.schedule[sched_idx]["weights"] = None
self.schedule[sched_idx]["num_batches"] = 2
self.schedule[sched_idx]["weight_lr"] = 1e-4
self.num_neurons = 100
self.num_groups = 25
self.num_steps = 5
|
11597017
|
import symjax
import symjax.tensor as T
# scope/graph naming and accessing
value1 = T.Variable(T.ones((1,)))
value2 = T.Variable(T.zeros((1,)))
g = symjax.Graph("special")
with g:
value3 = T.Variable(T.zeros((1,)))
value4 = T.Variable(T.zeros((1,)))
result = value3 + value4
h = symjax.Graph("inversion")
with h:
value5 = T.Variable(T.zeros((1,)))
value6 = T.Variable(T.zeros((1,)))
value7 = T.Variable(T.zeros((1,)), name="w")
print(g.variables)
# {'unnamed_variable': Variable(name=unnamed_variable, shape=(1,), dtype=float32, trainable=True, scope=/special/),
# 'unnamed_variable_1': Variable(name=unnamed_variable_1, shape=(1,), dtype=float32, trainable=True, scope=/special/)}
print(h.variables)
# {'unnamed_variable': Variable(name=unnamed_variable, shape=(1,), dtype=float32, trainable=True, scope=/special/inversion/),
# 'unnamed_variable_1': Variable(name=unnamed_variable_1, shape=(1,), dtype=float32, trainable=True, scope=/special/inversion/),
# 'w': Variable(name=w, shape=(1,), dtype=float32, trainable=True, scope=/special/inversion/)}
print(h.variable("w"))
# Variable(name=w, shape=(1,), dtype=float32, trainable=True, scope=/special/inversion/)
# now suppose that we did not hold the value for the graph g/h, we can still
# recover a variable based on the name AND the scope
print(symjax.get_variables("/special/inversion/w"))
# Variable(name=w, shape=(1,), dtype=float32, trainable=True, scope=/special/inversion/)
# now if the exact scope name is not know, it is possible to use smart indexing
# for example suppose we do not remember, then we can get all variables named
# 'w' among scopes
print(symjax.get_variables("*/w"))
# Variable(name=w, shape=(1,), dtype=float32, trainable=True, scope=/special/inversion/)
# if only part of the scope is known, all the variables of a given scope can
# be retreived
print(symjax.get_variables("/special/*"))
# [Variable(name=unnamed_variable, shape=(1,), dtype=float32, trainable=True, scope=/special/),
# Variable(name=unnamed_variable_1, shape=(1,), dtype=float32, trainable=True, scope=/special/),
# Variable(name=unnamed_variable, shape=(1,), dtype=float32, trainable=True, scope=/special/inversion/),
# Variable(name=unnamed_variable_1, shape=(1,), dtype=float32, trainable=True, scope=/special/inversion/),
# Variable(name=w, shape=(1,), dtype=float32, trainable=True, scope=/special/inversion/)]
print(symjax.get_ops("*add"))
# Op(name=add, shape=(1,), dtype=float32, scope=/special/)
|
11597043
|
import json
from flask import g, request, render_template
import requests
from agaveflask.logs import get_logger
logger = get_logger(__name__)
from models import dict_to_camel, display_time
def dashboard():
# default to using the local instance
try:
jwt = g.jwt
except AttributeError:
error = "JWT mising. context: {}".format(dir(g))
return render_template('dashboard.html',
actors=[],
jwt="",
jwt_header="",
base_url="",
url="",
error=error)
jwt_header = g.jwt_header_name
base_url = 'http://172.17.0.1:8000'
url = "{}/admin/actors".format(base_url)
error = None
actors = None
logger.info("jwt_header from context: {}".format(jwt_header))
logger.debug("jwt from context: {}".format(jwt))
logger.info("url: {}".format(url))
if request.method == 'POST':
logger.info("validating post params.")
# validate POST parameters
form_base_url = request.form.get('base_url')
form_jwt_header = request.form.get('jwt_header')
form_jwt = request.form.get('jwt')
if not form_base_url:
logger.info("Empty base url.")
error = 'The Base URL is required.'
elif not form_jwt_header:
logger.info("Empty JWT header.")
error = "The JWT Header is required."
elif not form_jwt:
logger.info("Empty JWT.")
error = 'The JWT is required.'
else:
logger.info("Using form data.")
base_url = form_base_url
jwt_header = form_jwt_header
jwt = form_jwt
if not error:
# try and make a request to get the actors
headers = {jwt_header: jwt}
url = "{}/admin/actors".format(base_url)
logger.info("Submitting GET to: {}".format(url))
try:
rsp = requests.get(url, headers=headers)
except Exception as e:
logger.error("Got an exception from /admin/actors. Exception: {}".format(e))
error = "Unable to retrieve actors: {}".format(e)
return render_template('dashboard.html',
actors=None,
jwt=jwt,
jwt_header=jwt_header,
base_url=base_url,
error=error)
if rsp.status_code not in [200, 201]:
logger.error("Did not get 200 from /admin/actors. Status: {}. content: {}".format(
rsp.status_code, rsp.content))
if "message" in rsp:
msg = rsp.get("message")
else:
msg = rsp.content
error = "Unable to retrieve actors. Error was: {}".format(msg)
else:
logger.info("Request to /admin/actors successful.")
data = json.loads(rsp.content.decode('utf-8'))
actors_data = data.get("result")
if not actors_data and request.method == 'POST':
error = "No actors found."
else:
actors = []
for actor in actors_data:
a = dict_to_camel(actor)
worker = a.get('worker')
if worker:
try:
a['worker'] = dict_to_camel(worker)
a['worker']['lastHealthCheckTime'] = display_time(a['worker'].get('lastHealthCheckTime'))
a['worker']['lastExecutionTime'] = display_time(a['worker'].get('lastExecutionTime'))
except KeyError as e:
logger.error("Error pulling worker data from admin api. Exception: {}".format(e))
else:
a['worker'] = {'lastHealthCheckTime': '',
'lastExecutionTime': '',
'id': '',
'status': ''}
logger.info("Adding actor data after converting to camel: {}".format(a))
a['createTime'] = display_time(a.get('createTime'))
a['lastUpdateTime'] = display_time(a.get('lastUpdateTime'))
actors.append(a)
return render_template('dashboard.html',
actors=actors,
jwt=jwt,
jwt_header=jwt_header,
base_url=base_url,
url=url,
error=error)
|
11597109
|
import os
import autopep8
class Dumper:
DEFAULT_IMPORTS = 'from auror_core.v2.job import Command\n\n\n'
def __init__(self, path):
if not os.path.exists(path):
raise ValueError('Directory, {}, does not exist'.format(path))
self.path = path
def dump_jobs(self, *jobs):
_path = '{}/flow.py'.format(self.path)
with open(_path, 'w') as _file:
_file.write(self.DEFAULT_IMPORTS)
for job_number, job in enumerate(jobs):
_file.write('job_{} = {}\n\n'.format(job_number, repr(job)))
self.__format_file(_path)
def __format_file(self, path):
autopep8.fix_file(
path,
options=autopep8.parse_args(
['--in-place', '--aggressive', path]
)
)
|
11597115
|
from rest_framework import serializers
from ..models import Form
class FormSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='form-detail', lookup_field='slug')
class Meta:
fields = (
'url',
'slug',
'title',
'fields',
'layout',
'login_required',
'active',
'standalone',
)
model = Form
|
11597117
|
import unittest
from cert_core import Chain
from pycoin.serialize import b2h
from cert_issuer.merkle_tree_generator import MerkleTreeGenerator
from cert_issuer import helpers
from lds_merkle_proof_2019.merkle_proof_2019 import MerkleProof2019
def get_test_data_generator():
"""
Returns a generator (1-time iterator) of test data
:return:
"""
for num in range(1, 4):
yield str(num).encode('utf-8')
class TestMerkleTreeGenerator(unittest.TestCase):
def test_generate(self):
merkle_tree_generator = MerkleTreeGenerator()
merkle_tree_generator.populate(get_test_data_generator())
byte_array = merkle_tree_generator.get_blockchain_data()
self.assertEqual(b2h(byte_array), '0932f1d2e98219f7d7452801e2b64ebd9e5c005539db12d9b1ddabe7834d9044')
def test_proofs_bitcoin_mainnet(self):
self.do_test_signature(Chain.bitcoin_mainnet, 'bitcoinMainnet', 'BTCOpReturn')
def test_proofs_bitcoin_testnet(self):
self.do_test_signature(Chain.bitcoin_testnet, 'bitcoinTestnet', 'BTCOpReturn')
def test_proofs_bitcoin_regtest(self):
self.do_test_signature(Chain.bitcoin_regtest, 'bitcoinRegtest', 'BTCOpReturn')
def test_proofs_mock(self):
self.do_test_signature(Chain.mockchain, 'mockchain', 'Mock')
def do_test_signature(self, chain, display_chain, type):
merkle_tree_generator = MerkleTreeGenerator()
merkle_tree_generator.populate(get_test_data_generator())
_ = merkle_tree_generator.get_blockchain_data()
gen = merkle_tree_generator.get_proof_generator(
'8087c03e7b7bc9ca7b355de9d9d8165cc5c76307f337f0deb8a204d002c8e582', 'http://example.com', chain)
p1 = next(gen)
_ = next(gen)
p3 = next(gen)
p1_json_proof = {
'path': [
{'right': 'd4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35'},
{'right': '4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce'}
],
'merkleRoot': '0932f1d2e98219f7d7452801e2b64ebd9e5c005539db12d9b1ddabe7834d9044',
'targetHash': '6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b',
'anchors': [
helpers.tx_to_blink(chain, '8087c03e7b7bc9ca7b355de9d9d8165cc5c76307f337f0deb8a204d002c8e582')
]
}
mp2019 = MerkleProof2019()
proof_value = mp2019.encode(p1_json_proof)
p1_expected = {
"type": "MerkleProof2019",
"created": p1['created'],
"proofValue": proof_value.decode('utf8'),
"proofPurpose": "assertionMethod",
"verificationMethod": "http://example.com"
}
p3_json_proof = {
'path': [
{'left': '4295f72eeb1e3507b8461e240e3b8d18c1e7bd2f1122b11fc9ec40a65894031a'}
],
'merkleRoot': '0932f1d2e98219f7d7452801e2b64ebd9e5c005539db12d9b1ddabe7834d9044',
'targetHash': '4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce',
'anchors': [
helpers.tx_to_blink(chain, '8087c03e7b7bc9ca7b355de9d9d8165cc5c76307f337f0deb8a204d002c8e582')
]
}
mp2019 = MerkleProof2019()
proof_value = mp2019.encode(p3_json_proof)
p3_expected = {
"type": "MerkleProof2019",
"created": p3['created'],
"proofValue": proof_value.decode('utf8'),
"proofPurpose": "assertionMethod",
"verificationMethod": "http://example.com"
}
self.assertEqual(p1, p1_expected)
self.assertEqual(p3, p3_expected)
if __name__ == '__main__':
unittest.main()
|
11597123
|
from typing import Callable
import flax
import jax.numpy as jnp
def mse_loss(params: flax.core.frozen_dict.FrozenDict, apply_fn: Callable,
X: jnp.ndarray, y: jnp.ndarray):
y_hat = apply_fn(params, X)
predictor_loss = jnp.sum((y - y_hat)**2, 1).mean()
return predictor_loss, (y - y_hat)**2
|
11597209
|
import argparse
import os
from pystdlib.uishim import get_selection_rofi
from pystdlib.passutils import collect_entries, read_entry_raw, annotate_entry
from pystdlib import shell_cmd
parser = argparse.ArgumentParser(description="Some pass automation")
parser.add_argument('--store', dest="store_path",
default=f"{os.environ['HOME']}/.password-store",
type=str, help="Passwords store path")
parser.add_argument("--add", dest="add_entry", action="store_true",
default=False, help="Add new pass entry")
parser.add_argument('--selector-font', dest="selector_font", type=str, help="Selector font")
args = parser.parse_args()
# provide copying to clipboard
# provide option to use (py)fzf
if args.add_entry:
# how to:
# iterate over nested levels, collecting nodes under previously selected nodes
# full entry path should be accumulated during this loop
# on every level we check if current input exists as path part
# if it exists we are going deeper
# otherwise we create inexistent node and starting the loop over
# there should be a show-stopper keybinding to be able to end this loop
# afterwards we get last component of accumelated path and assuming it to be
# leaf(gpg) node, that will actually contain secret data
# then ask password type - manual or autogenerated
# think of how to provide password length in autogenerated case though
print("add entry")
else:
pass_files = collect_entries(args.store_path)
path = get_selection_rofi(pass_files, "entry")
if path:
annotated = annotate_entry(read_entry_raw(path))
field = get_selection_rofi(annotated.keys(), "field")
if field:
shell_cmd(f"xdotool type {annotated[field]}")
|
11597283
|
import sys
sys.path.append('../')
import numpy as np
#################################### args
import argparse
parser = argparse.ArgumentParser()
# model
parser.add_argument("--configuration", default='L1', nargs='?', type=str)
parser.add_argument("--mode", default='IWAE', nargs='?', type=str)
parser.add_argument("--M", default=128, nargs='?', type=int)
parser.add_argument("--likelihood_variance", default=1e-2, nargs='?', type=float)
parser.add_argument("--num_IW_samples", default=5, nargs='?', type=int)
# training
parser.add_argument("--minibatch_size", default=512, nargs='?', type=int)
parser.add_argument("--iterations", default=5000, nargs='?', type=int)
parser.add_argument("--gamma", default=1e-2, nargs='?', type=float)
parser.add_argument("--gamma_decay", default=0.98, nargs='?', type=float)
parser.add_argument("--lr", default=5e-3, nargs='?', type=float)
parser.add_argument("--lr_decay", default=0.98, nargs='?', type=float)
parser.add_argument("--fix_linear", default=True, nargs='?', type=bool)
parser.add_argument("--num_predict_samples", default=2000, nargs='?', type=int)
parser.add_argument("--predict_batch_size", default=1000, nargs='?', type=int) ## was 10 for experiments
# data
parser.add_argument("--dataset", default='kin8nm', nargs='?', type=str)
parser.add_argument("--split", default=0, nargs='?', type=int)
parser.add_argument("--seed", default=0, nargs='?', type=int)
parser.add_argument("--results_path", default='havasi_results', nargs='?', type=str)
ARGS = parser.parse_args()
#################################### paths
if ARGS.split == 0:
file_name = '{}_{}_{}'.format(ARGS.dataset, ARGS.configuration, ARGS.mode)
else:
file_name = '{}_{}_{}_{}'.format(ARGS.dataset, ARGS.configuration, ARGS.mode, ARGS.split)
print(file_name)
import os
tensorboard_path_base = os.path.join(ARGS.results_path, 'tensorboard')
checkpoints_path_base = os.path.join(ARGS.results_path, 'checkpoints')
figs_path_base = os.path.join(ARGS.results_path, 'figs')
tensorboard_path = os.path.join(tensorboard_path_base, file_name)
checkpoint_path = os.path.join(checkpoints_path_base, file_name)
figs_path = os.path.join(figs_path_base, file_name+'.png')
results_path = os.path.join(ARGS.results_path, 'results.db')
for p in [ARGS.results_path, tensorboard_path_base, checkpoints_path_base, figs_path_base]:
if not os.path.isdir(p):
os.mkdir(p)
#################################### data
from bayesian_benchmarks.data import get_regression_data
data = get_regression_data(ARGS.dataset)
data.X_test = data.X_test[:10000]
data.Y_test = data.Y_test[:10000]
#################################### model
from build_models import build_model
model = build_model(ARGS, data.X_train, data.Y_train)
#################################### init
sess = model.enquire_session()
model.init_op(sess)
#################################### monitoring
import gpflow.training.monitor as mon
print_freq = 1000
saving_freq = 500
tensorboard_freq = 500
print_task = mon.PrintTimingsTask() \
.with_name('print') \
.with_condition(mon.PeriodicIterationCondition(print_freq))
saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)
checkpoint_task = mon.CheckpointTask(checkpoint_dir=checkpoint_path, saver=saver) \
.with_name('checkpoint') \
.with_condition(mon.PeriodicIterationCondition(saving_freq)) \
.with_exit_condition(True)
writer = mon.LogdirWriter(tensorboard_path)
tensorboard_task = mon.ModelToTensorBoardTask(writer, model) \
.with_name('tensorboard') \
.with_condition(mon.PeriodicIterationCondition(tensorboard_freq))
monitor_tasks = [print_task, tensorboard_task, checkpoint_task]
#################################### training
with mon.Monitor(monitor_tasks, sess, model.global_step, print_summary=True) as monitor:
try:
mon.restore_session(sess, checkpoint_path)
except ValueError:
pass
iterations_to_go = max([ARGS.iterations - sess.run(model.global_step), 0])
print('Already run {} iterations. Running {} iterations'.format(sess.run(model.global_step), iterations_to_go))
for it in range(iterations_to_go):
monitor()
model.train_op(sess)
model.anchor(sess)
#################################### evaluation
from sklearn.neighbors import KernelDensity
from scipy.stats import norm, shapiro
res = {}
if 'SGHMC' == ARGS.mode:
spacing = 5
posterior_samples = model.sghmc_optimizer.collect_samples(sess, ARGS.num_predict_samples, spacing)
logp = np.empty(len(data.X_test))
rmse = np.empty(len(data.X_test))
shapiro_W = np.empty(len(data.X_test))
Xs_batch = np.array_split(data.X_test, max(1, int(len(data.X_test)/ARGS.predict_batch_size)))
for i, (x, y) in enumerate(zip(data.X_test, data.Y_test)):
if 'SGHMC' == ARGS.mode:
samples = np.empty((ARGS.num_predict_samples, 1, 1))
for j, s in enumerate(posterior_samples):
samples[j] = model.predict_y_samples(x.reshape(1, -1), 1, feed_dict=s)[0]
else:
samples = model.predict_y_samples(x.reshape(1, -1), ARGS.num_predict_samples)
Ss = samples[:, :, 0]
bandwidth = 1.06 * np.std(Ss) * len(Ss) ** (-1. / 5) # Silverman's (1986) rule of thumb.
kde = KernelDensity(bandwidth=float(bandwidth))
l = kde.fit(Ss).score(y.reshape(-1, 1))
logp[i] = float(l)
shapiro_W[i] = float(shapiro((Ss - np.average(Ss)) / np.std(Ss))[0])
rmse[i] = (np.average(Ss) - float(y)) ** 2
res['test_loglik'] = np.average(logp)
res['test_shapiro_W_median'] = np.median(shapiro_W)
res['test_rmse'] = np.average(rmse) ** 0.5
res.update(ARGS.__dict__)
print(res)
#################################### save
from bayesian_benchmarks.database_utils import Database
with Database(results_path) as db:
db.write('conditional_density_estimation', res)
|
11597286
|
from django.conf.urls import patterns, url
from kitsune.customercare import api
# API urls
urlpatterns = patterns(
'',
url(r'^banned$', api.BannedList.as_view(), name='customercare.api.banned'),
url(r'^ban$', api.ban, name='customercare.api.ban'),
url(r'^unban$', api.unban, name='customercare.api.unban'),
url(r'^ignored$', api.IgnoredList.as_view(),
name='customercare.api.ignored'),
url(r'^ignore$', api.ignore, name='customercare.api.ignore'),
url(r'^unignore$', api.unignore, name='customercare.api.unignore'),
)
|
11597291
|
from ceph_medic.checks import common
from ceph_medic import metadata
class TestGetFsid(object):
def setup(self):
metadata['cluster_name'] = 'ceph'
def make_metadata(self, contents=None):
contents = contents or ''
data = {'paths': {'/etc/ceph':{'files':{'/etc/ceph/ceph.conf':{'contents': contents}}}}}
data['cluster_name'] = 'ceph'
return data
def test_fails_to_find_an_fsid(self):
data = self.make_metadata("[global]\nkey=value\n\n[mdss]\ndisabled=true\n")
fsid = common.get_fsid(data)
assert fsid == ''
def test_empty_conf_returns_empty_string(self):
data = self.make_metadata()
fsid = common.get_fsid(data)
assert fsid == ''
def test_find_an_actual_fsid(self):
data = self.make_metadata("[global]\nfsid = 1234-lkjh\n\n[mdss]\ndisabled=true\n")
fsid = common.get_fsid(data)
assert fsid == '1234-lkjh'
def test_spaces_on_fsid_are_trimmed(self):
data = self.make_metadata("[global]\nfsid = 1234-lkjh \n\n[mdss]\ndisabled=true\n")
fsid = common.get_fsid(data)
assert fsid == '1234-lkjh'
def test_fsids_have_parity(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1', 'node2'])
node1_data = make_data()
data = self.make_metadata("[global]\nfsid = 1234-lkjh \n\n[mdss]\ndisabled=true\n")
node1_data["paths"] = data["paths"]
metadata['mons']['node1'] = node1_data
metadata['mons']['node2'] = node1_data
result = common.check_cluster_fsid('node1', node1_data)
assert result is None
def test_fsid_does_not_exist(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1'])
node1_data = make_data()
data = self.make_metadata("[global]\nfoo = 1234-lkjh \n\n[mdss]\ndisabled=true\n")
node1_data["paths"] = data["paths"]
metadata['mons']['node1'] = node1_data
result = common.check_fsid_exists('node1', node1_data)
assert "'fsid' is missing" in str(result)
def test_fsid_does_exist(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1'])
node1_data = make_data()
data = self.make_metadata("[global]\nfsid = 1234-lkjh \n\n[mdss]\ndisabled=true\n")
node1_data["paths"] = data["paths"]
metadata['mons']['node1'] = node1_data
result = common.check_fsid_exists('node1', node1_data)
assert result is None
def test_ignores_empty_fsid_during_cluster_fsid_check(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1', 'node2'])
node1_data = make_data()
node2_data = make_data()
data = self.make_metadata("[global]\nfsid = 1234-lkjh \n\n[mdss]\ndisabled=true\n")
node1_data["paths"] = data["paths"]
data = self.make_metadata("[global]\nfoo = 1234-lkjh \n\n[mdss]\ndisabled=true\n")
node2_data["paths"] = data["paths"]
metadata['mons']['node1'] = node1_data
metadata['mons']['node2'] = node2_data
result = common.check_cluster_fsid('node1', node1_data)
assert result is None
class TestGetCommonFSID(object):
def setup(self):
metadata['cluster_name'] = 'ceph'
metadata['mons'] = {}
def teardown(self):
metadata['mons'] = {}
def test_get_common_fsid_fails(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1', 'node2'])
node1_data = make_data({'ceph': {'sockets': {'/var/run/ceph/osd.socket': {'config': {}}}}})
metadata['mons']['node1'] = node1_data
assert common.get_common_fsid() == ''
def test_multiple_fsids_get_one_result(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1', 'node2', 'node3'])
node1_data = make_data(
{'ceph': {'sockets': {'/var/run/ceph/osd.socket': {'config': {'fsid': 'aaaa'}}}}}
)
node2_data = make_data(
{'ceph': {'sockets': {'/var/run/ceph/osd.socket': {'config': {'fsid': 'bbbb'}}}}}
)
metadata['mons']['node1'] = node1_data
metadata['mons']['node2'] = node2_data
metadata['mons']['node3'] = node1_data
assert common.get_common_fsid() == 'aaaa'
def test_common_fsid_is_found(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1', 'node2'])
node1_data = make_data(
{'ceph': {'sockets': {'/var/run/ceph/osd.socket': {'config': {'fsid': 'bbbb'}}}}}
)
node2_data = make_data(
{'ceph': {'sockets': {'/var/run/ceph/osd.socket': {'config': {'fsid': 'bbbb'}}}}}
)
metadata['mons']['node1'] = node1_data
metadata['mons']['node2'] = node2_data
assert common.get_common_fsid() == 'bbbb'
class TestCheckFSIDPerDaemon(object):
def setup(self):
metadata['cluster_name'] = 'ceph'
metadata['mons'] = {}
def test_no_different_fsids_found(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1', 'node2'])
node1_data = make_data(
{'ceph': {'sockets': {'/var/run/ceph/osd.socket': {'config': {'fsid': 'bbbb'}}}}}
)
node2_data = make_data(
{'ceph': {'sockets': {'/var/run/ceph/osd.socket': {'config': {'fsid': 'bbbb'}}}}}
)
metadata['mons']['node1'] = node1_data
metadata['mons']['node2'] = node2_data
assert common.check_fsid_per_daemon('node1', node1_data) is None
def test_single_different_fsid_found(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1', 'node2', 'node3'])
node1_data = make_data(
{'ceph': {'sockets': {'/var/run/ceph/osd.socket': {'config': {'fsid': 'aaaa'}}}}}
)
node2_data = make_data(
{'ceph': {'sockets': {'/var/run/ceph/osd.socket': {'config': {'fsid': 'bbbb'}}}}}
)
metadata['mons']['node1'] = node1_data
metadata['mons']['node2'] = node2_data
metadata['mons']['node3'] = node1_data
code, msg = common.check_fsid_per_daemon('node2', node2_data)
assert 'Found cluster FSIDs from running sockets different than: aaaa' in msg
assert 'osd.socket : bbbb' in msg
def test_multiple_different_fsid_found(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1', 'node2'])
node1_data = make_data(
{'ceph': {'sockets': {'/var/run/ceph/osd.socket': {'config': {'fsid': 'bbbb'}}}}}
)
node2_data = make_data(
{'ceph': {'sockets': {
'/var/run/ceph/osd1.socket': {'config': {'fsid': 'dddd'}},
'/var/run/ceph/osd3.socket': {'config': {'fsid': 'bbbb'}},
'/var/run/ceph/osd2.socket': {'config': {'fsid': 'cccc'}},
}
}}
)
metadata['mons']['node1'] = node1_data
metadata['mons']['node2'] = node2_data
code, msg = common.check_fsid_per_daemon('node2', node2_data)
assert 'Found cluster FSIDs from running sockets different than: bbbb' in msg
assert 'osd1.socket : dddd' in msg
assert 'osd2.socket : cccc' in msg
class TestCephVersionParity(object):
def setup(self):
metadata['cluster_name'] = 'ceph'
def test_finds_a_mismatch_of_versions(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1', 'node2'])
node1_data = make_data()
metadata['mons']['node1'] = node1_data
metadata['mons']['node2'] = make_data({'ceph': {'version': '13'}})
result = common.check_ceph_version_parity('node1', node1_data)
assert 'Ceph version "12.2.1" is different' in str(result)
def test_versions_have_parity(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1', 'node2'])
node1_data = make_data()
metadata['mons']['node1'] = node1_data
metadata['mons']['node2'] = make_data()
result = common.check_ceph_version_parity('node1', node1_data)
assert result is None
class TestCephSocketAndInstalledVersionParity(object):
def setup(self):
metadata['cluster_name'] = 'ceph'
def test_finds_a_mismatch_of_versions(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1'])
node1_data = make_data(
{'ceph': {
"sockets": {
"/var/run/ceph/osd.asok": {"version": {"version": "13.2.0"}},
},
"installed": True,
"version": "12.2.1",
}}
)
metadata['mons']['node1'] = node1_data
result = common.check_ceph_socket_and_installed_version_parity('node1', node1_data)
assert 'Ceph version "12.2.1" is different' in str(result)
def test_versions_have_parity(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1'])
node1_data = make_data(
{'ceph': {
"sockets": {
"/var/run/ceph/osd.asok": {"version": {"version": "12.2.0"}},
},
"installed": True,
"version": "ceph version 12.2.0 (32ce2a3ae5239ee33d6150705cdb24d43bab910c) luminous (rc)",
}}
)
metadata['mons']['node1'] = node1_data
result = common.check_ceph_socket_and_installed_version_parity('node1', node1_data)
assert result is None
def test_socket_version_is_none(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1'])
node1_data = make_data(
{'ceph': {
"sockets": {
"/var/run/ceph/osd.asok": {"version": {}, "config": {}},
},
"installed": True,
"version": "12.2.1",
}}
)
metadata['mons']['node1'] = node1_data
result = common.check_ceph_socket_and_installed_version_parity('node1', node1_data)
assert result is None
class TestRgwNumRadosHandles(object):
def test_value_is_larger_than_accepted(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1'])
node1_data = make_data(
{'ceph': {
"sockets": {
"/var/run/ceph/osd.asok": {'version': {}, 'config': {'rgw_num_rados_handles': 3}},
},
"installed": True,
"version": "12.2.1",
}}
)
metadata['mons']['node1'] = node1_data
result = common.check_rgw_num_rados_handles('node1', node1_data)
assert result == (
'WCOM7',
"rgw_num_rados_handles shouldn't be larger than 1, can lead to memory leaks: osd.asok"
)
def test_value_within_range(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1'])
node1_data = make_data(
{'ceph': {
"sockets": {
"/var/run/ceph/osd.asok": {'version': {}, 'config': {'rgw_num_rados_handles': 1}},
},
"installed": True,
"version": "12.2.1",
}}
)
metadata['mons']['node1'] = node1_data
result = common.check_rgw_num_rados_handles('node1', node1_data)
assert result is None
def test_multiple_value_is_larger_than_accepted(self, make_nodes, make_data):
metadata['nodes'] = make_nodes(mons=['node1'])
node1_data = make_data(
{'ceph': {
"sockets": {
"/var/run/ceph/osd1.asok": {'version': {}, 'config': {'rgw_num_rados_handles': 2}},
"/var/run/ceph/osd3.asok": {'version': {}, 'config': {'rgw_num_rados_handles': 3}},
},
"installed": True,
"version": "12.2.1",
}}
)
metadata['mons']['node1'] = node1_data
result = common.check_rgw_num_rados_handles('node1', node1_data)
assert 'osd1.asok' in str(result)
assert 'osd3.asok' in str(result)
class TestMultipleRunningMons(object):
def test_no_multiple_mons_found(self, data):
result = common.check_multiple_running_mons(None, data())
assert result is None
def test_multiple_mons_found(self, data):
fake_data = data()
fake_data['ceph']['sockets'] = {
'/var/lib/ceph/ceph-mon.0.asok': {},
'/var/lib/ceph/ceph-mon.1.asok': {},
'/var/lib/ceph/ceph-mon.2.asok': {},
}
code, message = common.check_multiple_running_mons(None, fake_data)
assert code == 'ECOM10'
assert 'mon.0' in message
assert 'mon.1' in message
assert 'mon.2' in message
class TestColocatedMonsOSDs(object):
def test_no_colocation_found(self, data):
result = common.check_colocated_running_mons_osds(None, data())
assert result is None
def test_no_osds_found(self, data):
fake_data = data()
fake_data['ceph']['sockets'] = {
'/var/lib/ceph/ceph-mon.0.asok': {},
'/var/lib/ceph/ceph-mon.1.asok': {},
'/var/lib/ceph/ceph-mon.2.asok': {},
}
result = common.check_colocated_running_mons_osds(None, fake_data)
assert result is None
def test_multiple_mons_found(self, data):
fake_data = data()
fake_data['ceph']['sockets'] = {
'/var/lib/ceph/ceph-mon.0.asok': {},
'/var/lib/ceph/ceph-mon.1.asok': {},
'/var/lib/ceph/ceph-osd.2.asok': {},
}
code, message = common.check_colocated_running_mons_osds(None, fake_data)
assert code == 'WCOM1'
assert 'osd.2' in message
assert 'mon.1' not in message
assert 'mon.2' not in message
|
11597292
|
from ewah.operators.base import EWAHBaseOperator
from ewah.constants import EWAHConstants as EC
from ewah.hooks.base import EWAHBaseHook as BaseHook
from sshtunnel import SSHTunnelForwarder
from tempfile import NamedTemporaryFile
from datetime import timedelta
from pytz import timezone
import os
from typing import Optional, List, Dict, Any, Union
class EWAHSQLBaseOperator(EWAHBaseOperator):
# Don't overwrite the _NAMES list to avoid accidentally exposing the SQL
# base operator
# _NAMES = []
_ACCEPTED_EXTRACT_STRATEGIES = {
EC.ES_FULL_REFRESH: True,
EC.ES_INCREMENTAL: True,
EC.ES_SUBSEQUENT: True,
}
def __init__(
self,
source_schema_name: Optional[str] = None,
source_table_name: Optional[
str
] = None, # defaults to same as target_table_name
source_database_name: Optional[str] = None, # bigquery: project id
sql_select_statement: Optional[str] = None, # Alternative to specifying table
timestamp_column: Optional[str] = None,
where_clauses: Optional[Union[str, List[str]]] = None,
extra_params: Optional[dict] = None,
batch_size: int = 100000,
*args,
**kwargs
):
source_table_name = source_table_name or kwargs["target_table_name"]
# default subsequent_field to timestamp_column or primary_key_column_name
if kwargs.get("extract_strategy") == EC.ES_SUBSEQUENT:
kwargs["subsequent_field"] = kwargs.get(
"subsequent_field",
timestamp_column or kwargs.get("primary_key_column_name", None),
)
super().__init__(*args, **kwargs)
if isinstance(where_clauses, str):
where_clauses = [where_clauses]
if self.extract_strategy == EC.ES_INCREMENTAL:
assert timestamp_column, "Incremental loading must have timestamp column!"
if not sql_select_statement:
assert source_schema_name
assert source_table_name
if self.columns_definition:
columns_sql = "\n\t {0}{1}{0}".format(
self._SQL_COLUMN_QUOTE,
"{0}\n\t, {0}".format(self._SQL_COLUMN_QUOTE).join(
self.columns_definition.keys()
),
)
else:
columns_sql = "*"
sql_select_statement = self._SQL_BASE.format(
columns=columns_sql,
schema=source_schema_name,
table=source_table_name,
database=source_database_name,
)
self.sql = self._SQL_BASE_SELECT.format(select_sql=sql_select_statement)
self.extra_params = extra_params
self.timestamp_column = timestamp_column
self.where_clauses = where_clauses
self.batch_size = batch_size
def ewah_execute(self, context):
# called, potentially with a data_from and data_until
params = self.extra_params or {}
where_clauses = self.where_clauses or []
if self.data_from and self.timestamp_column:
where_clauses.append(
"{0} >= {1}".format(
self.timestamp_column, self._SQL_PARAMS.format("data_from")
)
)
params["data_from"] = self.data_from
if self.data_until and self.timestamp_column:
where_clauses.append(
"{0} <= {1}".format(
self.timestamp_column, self._SQL_PARAMS.format("data_until")
)
)
params["data_until"] = self.data_until
if self.subsequent_field and self.test_if_target_table_exists():
where_clauses.append(
"{0} > {1}".format(
self.subsequent_field, self._SQL_PARAMS.format("previous_max_value")
)
)
params["previous_max_value"] = self.get_max_value_of_column(
self.subsequent_field
)
where_clauses = where_clauses or ["1 = 1"]
sql = self.sql.format("\n AND ".join(where_clauses))
for batch in self.source_hook.get_data_in_batches(
sql=sql,
params=params or None, # Don't supply empty dict as params!
return_dict=True,
batch_size=self.batch_size,
):
self.upload_data(batch)
|
11597303
|
import pandas as pd
import numpy as np
from pyids import IDS
from pyids.algorithms import mine_CARs
from pyids.data_structures import IDSRuleSet
from pyarc.qcba.data_structures import QuantitativeDataFrame
import random
import logging
import time
logging.basicConfig(level=logging.INFO)
df = pd.read_csv("../../../data/iris0.csv")
cars = mine_CARs(df, 50, sample=False)
ids_ruleset = IDSRuleSet.from_cba_rules(cars).ruleset
quant_dataframe = QuantitativeDataFrame(df)
start = time.time()
ids = IDS(algorithm="RUSM")
ids.fit(
class_association_rules=cars,
quant_dataframe=quant_dataframe,
random_seed=None,
lambda_array=7*[1]
)
end = time.time()
print("time", end - start)
for r in ids.clf.rules:
print(r)
#auc_cba = ids.score_auc(quant_dataframe, order_type="cba")
#auc_f1 = ids.score_auc(quant_dataframe, order_type="f1")
#print(auc_cba, auc_f1)
#print(ids.score(quant_dataframe))
print(ids.score_auc(quant_dataframe))
|
11597305
|
import tensorflow as tf
__all__ = ["gan_discriminator_loss", "gan_generator_loss"]
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def gan_discriminator_loss(real_output, fake_output):
r"""
Args:
real_output (tensor): A tensor representing the real logits of discriminator
fake_output (tensor): A tensor representing the fake logits of discriminator
Return:
a tensor representing the sum of real and fake loss
"""
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
return real_loss + fake_loss
def gan_generator_loss(fake_output):
r"""
Args:
fake_output (tensor): A tensor representing the fake logits of discriminator
Return:
a tensor representing the generator loss
"""
return cross_entropy(tf.ones_like(fake_output), fake_output)
|
11597340
|
from tkinter import *
root=Tk()
s=StringVar()
def disp(x):
global text
text+=x
s.set(text)
def equal():
global text
text=''
try:
text=eval(s.get())
except Exception as e:
s.set(e)
text=''
else:
s.set(text)
text=''
text=''
# root.geometry("300x300")
root.config(bg="purple")
color=['#12ff3f','#12ff67','#67ff67','#11deaf']
e=Entry(root,textvariable=s)
e.pack(side=TOP,expand='yes',fill='both')
for i in ['789/','456*','123+','.0-=']:
f=Frame(root,bg=color.pop())
for j in i:
b=Button(f,text=j,command=lambda x=j:disp(x) if x!='=' else equal())
b.pack(side='left',padx=10,pady=10,expand="yes",fill='both')
f.pack(side='top',padx=10,pady=10,expand="yes",fill='both')
b=Button(root,text="Exit",command=root.destroy)
b.pack(side="bottom")
root.mainloop()
|
11597361
|
from joblib import dump, load
from datetime import date
import mlflow.pyfunc
from mlflow import pyfunc
from util import load_yaml, load_json
class Wrapper(mlflow.pyfunc.PythonModel):
def __init__(self, model=None, preprocessing=None, metrics=None, columns=None):
"""
Constructor
Parameters
----------
model : object
If it's just a model: enter all parameters
if it is more than one model: do not enter parameters and use
the add method to add each of the models
preprocessing : Preprocessamento
Preprocessing used in training
metrics : dict
Dictionary with the metrics of the result of the model
columns : list
list with columns names
Returns
-------
WrapperModel
"""
self.artifacts = dict()
self.artifacts["model"] = model
self.artifacts["preprocessing"] = preprocessing
self.artifacts["metrics"] = metrics
self.artifacts["columns"] = columns
self.artifacts["creation_date"] = date.today()
def predict(self, model_input):
"""
Method that returns the result of the prediction on a dataset
Parameters
----------
df : pd.DataFrame
Data to be predicted
Returns
-------
list
"""
df_processed = model_input.copy()
model = self.artifacts["model"]
columns = self.artifacts["columns"]
return model.predict(df_processed[columns])
def predict_proba(self, model_input, binary=False):
"""
Method that returns the result of the prediction on a dataset
Parameters
----------
df : pd.DataFrame
data to be predicted
Returns
-------
list
"""
df_processed = model_input.copy()
model = self.artifacts["model"]
columns = self.artifacts["columns"]
if binary:
return model.predict_proba(df_processed[columns])[:,1]
else:
return model.predict_proba(df_processed[columns])
def load(self, path):
"""
Load the model object to a specific path
Parameters
----------
path : str
path where the model object will be saved
Returns
-------
None
"""
load(path)
def save_model(self, path):
"""
Saves the model object to a specific path
Parameters
----------
path : str
path where the model object will be saved
Returns
-------
None
"""
dump(self, path)
@staticmethod
def load_model(path):
"""
Loads the model object in a specific path (pyfunc)
Parameters
----------
path : str
path where the model object will be loaded.
Returns
-------
None
"""
model = pyfunc.load_model(path)
return model
def save(self, path):
"""
Save model as a Wrapper class (pyfunc)
Parameters
----------
path : str
path where the model object will be loaded.
Returns
-------
None
"""
path_artifacts = path+'_artifacts.pkl'
dump(self.artifacts, path_artifacts)
content = load_json("config/arquivos.json")
conda_env = load_yaml(content["path_yaml"])
mlflow.pyfunc.save_model(
path= path,
python_model= self,
artifacts= {'model': path_artifacts},
conda_env= conda_env)
def get_metrics(self):
"""
Return metrics
Parameters
----------
self : object Wrapper
Returns
-------
dict
"""
return self.artifacts['metrics']
def get_columns(self):
"""
Return columns
Parameters
----------
self : object Wrapper
Returns
-------
list
"""
return self.artifacts['columns']
def get_model(self):
"""
Return model
Parameters
----------
self : object Wrapper
Returns
-------
dict
"""
return self.artifacts['model']
def get_preprocessing(self):
"""
Return preprocessing instance
Parameters
----------
self : object Wrapper
Returns
-------
Preprocessing instance
"""
return self.artifacts['preprocessing']
|
11597362
|
import os
import unittest
from django.test import SimpleTestCase, override_settings, tag
from anymail.exceptions import AnymailAPIError
from anymail.message import AnymailMessage
from .utils import AnymailTestMixin, sample_image_path
# For most integration tests, Postmark's sandboxed "POSTMARK_API_TEST" token is used.
# But to test template sends, a real Postmark server token and template id are needed:
ANYMAIL_TEST_POSTMARK_SERVER_TOKEN = os.getenv('ANYMAIL_TEST_POSTMARK_SERVER_TOKEN')
ANYMAIL_TEST_POSTMARK_TEMPLATE_ID = os.getenv('ANYMAIL_TEST_POSTMARK_TEMPLATE_ID')
@tag('postmark', 'live')
@override_settings(ANYMAIL_POSTMARK_SERVER_TOKEN="POST<PASSWORD>",
EMAIL_BACKEND="anymail.backends.postmark.EmailBackend")
class PostmarkBackendIntegrationTests(AnymailTestMixin, SimpleTestCase):
"""Postmark API integration tests
These tests run against the **live** Postmark API, but using a
test key that's not capable of sending actual email.
"""
def setUp(self):
super().setUp()
self.message = AnymailMessage('Anymail Postmark integration test', 'Text content',
'<EMAIL>', ['<EMAIL>'])
self.message.attach_alternative('<p>HTML content</p>', "text/html")
def test_simple_send(self):
# Example of getting the Postmark send status and message id from the message
sent_count = self.message.send()
self.assertEqual(sent_count, 1)
anymail_status = self.message.anymail_status
sent_status = anymail_status.recipients['<EMAIL>'].status
message_id = anymail_status.recipients['<EMAIL>'].message_id
self.assertEqual(sent_status, 'sent')
self.assertGreater(len(message_id), 0) # non-empty string
self.assertEqual(anymail_status.status, {sent_status}) # set of all recipient statuses
self.assertEqual(anymail_status.message_id, message_id)
def test_all_options(self):
message = AnymailMessage(
subject="Anymail Postmark all-options integration test",
body="This is the text body",
# Postmark accepts multiple from_email addresses, but truncates to the first on their end
from_email="Test From <<EMAIL>>, <EMAIL>",
to=["<EMAIL>", "Recipient 2 <<EMAIL>>"],
cc=["<EMAIL>", "Copy 2 <<EMAIL>>"],
bcc=["<EMAIL>", "Blind Copy 2 <<EMAIL>>"],
reply_to=["<EMAIL>", "Reply 2 <<EMAIL>>"],
headers={"X-Anymail-Test": "value"},
# no send_at support
metadata={"meta1": "simple string", "meta2": 2},
tags=["tag 1"], # max one tag
track_opens=True,
track_clicks=True,
merge_data={}, # force batch send (distinct message for each `to`)
)
message.attach("attachment1.txt", "Here is some\ntext for you", "text/plain")
message.attach("attachment2.csv", "ID,Name\n1,<NAME>", "text/csv")
cid = message.attach_inline_image_file(sample_image_path())
message.attach_alternative(
"<p><b>HTML:</b> with <a href='http://example.com'>link</a>"
"and image: <img src='cid:%s'></div>" % cid,
"text/html")
message.send()
self.assertEqual(message.anymail_status.status, {'sent'})
self.assertEqual(message.anymail_status.recipients['<EMAIL>'].status, 'sent')
self.assertEqual(message.anymail_status.recipients['<EMAIL>'].status, 'sent')
# distinct messages should have different message_ids:
self.assertNotEqual(message.anymail_status.recipients['<EMAIL>'].message_id,
message.anymail_status.recipients['<EMAIL>'].message_id)
def test_invalid_from(self):
self.message.from_email = 'webmaster@localhost' # Django's default From
with self.assertRaises(AnymailAPIError) as cm:
self.message.send()
err = cm.exception
self.assertEqual(err.status_code, 422)
self.assertIn("Invalid 'From' address", str(err))
@unittest.skipUnless(ANYMAIL_TEST_POSTMARK_SERVER_TOKEN and ANYMAIL_TEST_POSTMARK_TEMPLATE_ID,
"Set ANYMAIL_TEST_POSTMARK_SERVER_TOKEN and ANYMAIL_TEST_POSTMARK_TEMPLATE_ID "
"environment variables to run Postmark template integration tests")
@override_settings(ANYMAIL_POSTMARK_SERVER_TOKEN=ANYMAIL_TEST_POSTMARK_SERVER_TOKEN)
def test_template(self):
message = AnymailMessage(
from_email="<EMAIL>",
to=["<EMAIL>", "Second Recipient <<EMAIL>>"],
template_id=ANYMAIL_TEST_POSTMARK_TEMPLATE_ID,
merge_data={
"<EMAIL>": {"name": "Recipient 1", "order_no": "12345"},
"<EMAIL>": {"order_no": "6789"},
},
merge_global_data={"name": "Valued Customer"},
)
message.send()
self.assertEqual(message.anymail_status.status, {'sent'})
@override_settings(ANYMAIL_POSTMARK_SERVER_TOKEN="Hey, that's not a server token!")
def test_invalid_server_token(self):
with self.assertRaises(AnymailAPIError) as cm:
self.message.send()
err = cm.exception
self.assertEqual(err.status_code, 401)
# Make sure the exception message includes Postmark's response:
self.assertIn("Please verify that you are using a valid token", str(err))
|
11597385
|
import numpy as np
import code
import theano
from theano import config
import theano.tensor as T
from theano.ifelse import ifelse
from theano.tensor.extra_ops import fill_diagonal
from collections import OrderedDict
import time
from imagernn.utils import *
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import theano.tensor.nnet as tnnet
class CnnEvaluator:
"""
A multimodal long short-term memory (LSTM) generator
"""
# ========================================================================================
def __init__(self, params,Wemb = None):
self.word_encoding_size = params.get('word_encoding_size', 512)
image_feat_size = params.get('image_feat_size', 512)
aux_inp_size = params.get('aux_inp_size', -1)
self.n_fmaps_psz = params.get('n_fmaps_psz', 100)
self.filter_hs = params.get('filter_hs', [])
# Used for dropout.
self.use_noise = theano.shared(numpy_floatX(0.))
vocabulary_size = params.get('vocabulary_size',-1)
self.sent_enc_size = params.get('sent_encoding_size',-1)# size of CNN vectors hardcoded here
model = OrderedDict()
# Recurrent weights: take x_t, h_{t-1}, and bias unit
# and produce the 3 gates and the input to cell signal
if Wemb == None:
model['Wemb'] = initwTh(vocabulary_size-1, self.word_encoding_size) # word encoder
model['WIemb'] = initwTh(image_feat_size, self.sent_enc_size,inittype='xavier') # image encoder
#model['b_Img'] = np.zeros((self.sent_enc_size)).astype(config.floatX)
model['Wfc_sent'] = initwTh(self.n_fmaps_psz * len(self.filter_hs), self.sent_enc_size,inittype='xavier') # word encoder
#model['bfc_sent'] = np.zeros((self.sent_enc_size)).astype(config.floatX)
#if params['advers_gen']:
# Add a merging layer
#model['Wm_sent'] = initwTh(self.sent_enc_size, params.get('merge_dim',50),inittype='xavier') # word encoder
#model['Wm_img'] = initwTh(self.sent_enc_size, params.get('merge_dim',50),inittype='xavier') # word encoder
#model['b_m'] = np.zeros((params.get('merge_dim',50))).astype(config.floatX)
## Final output weights
#model['W_out'] = initwTh(params.get('merge_dim',50),1, 1.0) # word encoder
# Decoder weights (e.g. mapping to vocabulary)
update_list = ['Wemb','Wfc_sent','WIemb']
self.regularize = ['Wemb','Wfc_sent','WIemb']
if params.get('en_aux_inp',0) and not params['advers_gen']:
model['WIemb_aux'] = initwTh(aux_inp_size, self.sent_enc_size) # image encoder
model['b_Img_aux'] = np.zeros((self.sent_enc_size)).astype(config.floatX)
self.model_th = self.init_tparams(model)
# Share the Word embeddings with the generator model
if Wemb != None:
self.model_th['Wemb'] = Wemb
self.updateP = OrderedDict()
for vname in update_list:
self.updateP[vname] = self.model_th[vname]
# Instantiate a conv layer already so we don't end up creating new weights
if params['advers_gen']:
filter_w = self.word_encoding_size
self.conv_layers = []
max_sent_len = params.get('maxlen',0)
for filter_h in self.filter_hs:
filter_shape = (self.n_fmaps_psz, params['n_gen_samples'], filter_h, filter_w)
pool_size = (max_sent_len-filter_h+1, self.word_encoding_size-filter_w+1)
conv_layer = batch2DConvPoolLayer(filter_shape=filter_shape,
poolsize=pool_size,
non_linear=params['conv_non_linear'])
# flatten all the filter outputs to a single vector
self.conv_layers.append(conv_layer)
self.updateP.update(conv_layer.params)
self.regularize.extend(conv_layer.regularize)
self.model_th.update(conv_layer.params)
# ========================================================================================
def init_tparams(self,params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
# ========================================================================================
# BUILD CNN evaluator forward propogation model
def build_model(self, tparams, options, xI=None, prior_inp_list = []):
trng = RandomStreams()
rng = np.random.RandomState()
# Used for dropout.
use_noise = theano.shared(numpy_floatX(0.))
xWi = T.matrix('xW', dtype='int64')
# Now input is transposed compared to the generator!!
xW = xWi.T
n_samples = xW.shape[0]
n_words= xW.shape[1]
Words = T.concatenate([tparams['Wemb'], T.alloc(numpy_floatX(0.),1,self.word_encoding_size)],axis=0)
embW = Words[xW.flatten()].reshape([options['batch_size'], 1, n_words, self.word_encoding_size])
if options.get('use_dropout',0):
embW = dropout_layer(embW, use_noise, trng, options['drop_prob_encoder'], shp = embW.shape)
sent_emb, cnn_out , tparams = self.sent_conv_layer(tparams, options, embW, options['batch_size'], use_noise, trng)
if xI == None:
xI = T.matrix('xI', dtype=config.floatX)
xI_is_inp = True
else:
xI_is_inp = False
if options.get('mode','batchtrain') != 'batchtrain':
posSamp = T.ivector('posSamp')
if xI_is_inp:
embImg = T.dot(xI, tparams['WIemb']) + tparams['b_Img']
else:
embImg = xI + tparams['b_Img']
if options.get('use_dropout',0):
embImg = dropout_layer(embImg, use_noise, trng, options['drop_prob_encoder'], shp = embImg.shape)
#-------------------------------------------------------------------------------------------------------------#
# Curr prob is computed by applying softmax over (I0,c0), (I0,c1),... (I0,cn-1) pairs
# It could also be computed with (I0,c0), (I1,c0),... (In,c0) pairs, but will lead to different discrimination
# Maybe even sum of the two could be used
#-------------------------------------------------------------------------------------------------------------#
probMatchImg, sim_score = multimodal_cosine_sim_softmax(embImg, sent_emb, tparams, options.get('sim_smooth_factor',1.0))
inp_list = [xWi]
if xI_is_inp:
inp_list.append(xI)
if options.get('en_aux_inp',0):
xAux = T.matrix('xAux', dtype=config.floatX)
embAux = T.dot(xAux, tparams['WIemb_aux']) + tparams['b_Img_aux']
xAuxEmb = dropout_layer(embAux, use_noise, trng, options['drop_prob_aux'], shp = embAux.shape)
inp_list.append(xAux)
probMatchAux, sim_scoreAux = multimodal_cosine_sim_softmax(embAux, sent_emb, tparams, options.get('sim_smooth_factor',1.0))
else:
probMatchAux = T.alloc(numpy_floatX(0.),1,1)
probMatch = (probMatchImg + probMatchAux) / 2.
sortedProb = T.argsort(probMatch,axis=1)
batch_idces = T.arange(probMatch.shape[0])
opponents = T.switch(T.eq(sortedProb[:,-1], batch_idces), sortedProb[:,-2], sortedProb[:,-1])
violator_mask = (probMatch.diagonal() - probMatch[batch_idces,opponents]) < (options.get('cost_margin',0.02))
n_violators = violator_mask.sum()
if options.get('mode','batchtrain') == 'batchtrain':
cost = [-((T.log(probMatch.diagonal())* (1+2.0*violator_mask)).sum())/probMatch.shape[0]]
else:
cost = [-(T.log(probMatch[0,posSamp]).sum())/posSamp.shape[0]]
cost.append(n_violators)
cost.append((probMatch.diagonal() - probMatch[batch_idces,opponents]))
f_pred_sim_prob = theano.function(prior_inp_list + inp_list, [probMatchImg, probMatchAux, probMatch, opponents], name='f_pred_sim_prob')
f_pred_sim_scr = theano.function(prior_inp_list + inp_list[:2], sim_score, name='f_pred_sim_scr')
f_sent_emb = theano.function(inp_list[:1], cnn_out, name='f_sent_emb')
if options.get('mode','batchtrain') != 'batchtrain':
inp_list.append(posSamp)
return use_noise, inp_list, [f_pred_sim_prob, f_pred_sim_scr, f_sent_emb], cost, sim_score, tparams
# ========================================================================================
# BUILD CNN evaluator forward propogation model with taking direct inputs from lstm gen
def build_advers_eval(self, tparams, options, gen_inp_list=None, gen_out=None, genUpdates = None, genLens = None):
trng = RandomStreams()
#n_words= xWRef.shape[1]
zero_guy = T.alloc(numpy_floatX(0.),1,self.word_encoding_size)
Word_Vecs = T.concatenate([zero_guy, tparams['Wemb']],axis=0)
#Word_Vecs = tparams['Wemb']
#Word_Vecs = tparams['Wemb']
# These are of dimensions B x n_samp x time x Vocab
if gen_out == None:
discrim_inp = T.tensor4(name='disc_inp')
inp_list = [discrim_inp]
n_ref_samps = discrim_inp.shape[0]
else:
refData_inp = tensor.tensor4(name='disc_ref_inp')
n_ref_samps = refData_inp.shape[0]
n_words = refData_inp.shape[2]
n_gen_words = gen_out.shape[2]
z_shape = list(gen_out.shape)
z_shape[2] = n_words - n_gen_words
gen_pad = ifelse(tensor.gt(n_words, n_gen_words), tensor.concatenate([gen_out,
tensor.zeros(z_shape)], axis=2), gen_out)
discrim_inp = tensor.concatenate([refData_inp, gen_pad], axis=0)
inp_list = [refData_inp]
# Embed this input into size B x n_samp x time x word_vec_dim
embW = T.dot(discrim_inp,Word_Vecs)
#embGen = ifelse(tensor.gt(n_words, n_gen_words),tensor.concatenate([gen_out,theano.tensor.alloc(numpy_floatX(0.),n_words-n_gen_words,self.word_encoding_size)], axis=0),gen_out)
#embGen = tensor.shape_padleft(embGen, n_ones=2)
#embWRef = Words[xWRef.flatten()].reshape([options['eval_batch_size'], 1, n_words, self.word_encoding_size])
#embW = tensor.concatenate([embWRef, embGen], axis=0)
max_sent_len = options.get('maxlen',0)
layer1_inputs = []
for i,filter_h in enumerate(self.filter_hs):
pool_size = (max_sent_len-filter_h+1,1)
self.conv_layers[i].build(embW, poolsize = pool_size)
# flatten all the filter outputs to a single vector
cout = self.conv_layers[i].output.flatten(2)
layer1_inputs.append(cout)
layer1_input = T.concatenate(layer1_inputs,axis=1)
# Now apply dropout on the cnn ouptut
if options.get('use_dropout',0):
cnn_out = dropout_layer(layer1_input, self.use_noise, trng, options['drop_prob_eval'],layer1_input.shape)
else:
cnn_out = layer1_input
# Now transform this into a sent embedding
sent_emb = T.dot(cnn_out, tparams['Wfc_sent'])# + tparams['bfc_sent']
# Add a nonlinearity here
#sent_emb = nonLinLayer(sent_emb, layer_type=options['conv_non_linear'])
# Now to embed the image feature vector and calculate a similarity score
if gen_out == None:
xImg = T.matrix('xI', dtype=config.floatX)
else:
xImg = gen_inp_list[0]
#Compute Image embedding:
embImg = T.dot(xImg, tparams['WIemb'])# + tparams['b_Img']
# Add a nonlinearity here
#embImg = nonLinLayer(embImg, layer_type=options['conv_non_linear'])
#if options.get('use_dropout',0):
# embImg = dropout_layer(embImg, self.use_noise, trng, options['drop_prob_eval'],embImg.shape)
#else:
# embImg = embImg
#m_img = l2norm(tensor.dot(embImg, tparams['Wm_img']))
#m_sent = l2norm(tensor.dot(sent_emb, tparams['Wm_sent']))
m_img = l2norm(embImg)
m_sent = l2norm(sent_emb)
#Now time to merge them
#merge_out = m_img * m_sent + tparams['b_m']
#merge_out = nonLinLayer(merge_out, layer_type=options['conv_non_linear'])
scores = T.dot(m_img, m_sent.T)
#merge_out = nonLinLayer(merge_out, layer_type='sigm')
# Final output layer
#p_out = nonLinLayer(tensor.dot(merge_out, tparams['W_out']), layer_type='sigm')
p_out = (scores.diagonal())
if gen_out !=None:
p_out = T.concatenate([p_out, 0.5*(scores[:,n_ref_samps:].diagonal()+1.0)])
#p_out = nonLinLayer(5.0*scores.diagonal(), layer_type='sigm').flatten()
if gen_out !=None:
for inp in gen_inp_list:
if inp not in inp_list:
inp_list.append(inp)
print inp_list
else:
inp_list.append(xImg)
xTarg = T.fvector('targ')
inp_list.append(xTarg)
#import pdb;pdb.set_trace()
if options.get('eval_loss','contrastive')=='contrastive':
#costEval, ic_s, ic_i = self.contrastive_loss(m_img, m_sent)
probMatch = T.nnet.softmax(scores*2.0)
costEval = -((T.log(probMatch[:,:n_ref_samps].diagonal())*xTarg).sum())
if gen_out !=None:
costGen = -((T.log(probMatch[:,n_ref_samps:].diagonal())).sum())
# Also minimize the probability assigned to the generated fake samples
#costEval += ((T.log(probMatch[:,n_ref_samps:].diagonal())).sum())
else:
costGen = []
ic_s = probMatch
ic_i = probMatch
elif options.get('eval_loss','contrastive')=='wass':
costEval = (scores[:,:n_ref_samps].diagonal()*xTarg).mean() - (scores[:,:n_ref_samps].diagonal()*(1.-xTarg)).mean()
if gen_out !=None:
costGen = -(scores[:,n_ref_samps:].diagonal()).mean()
costEval += costGen
costEval = -costEval
ic_s = costEval
ic_i = costEval
#regularize
if options.get('regc',0.) > 0.:
self.reg_cost = theano.shared(numpy_floatX(0.), name='reg_c')
reg_c = T.as_tensor_variable(numpy_floatX(options['regc']), name='reg_c')
for p in self.regularize:
self.reg_cost = self.reg_cost+(self.model_th[p] ** 2).sum()
self.reg_cost *= 0.5 * reg_c
costEval += (self.reg_cost /options['batch_size'])
f_pred_cost = theano.function(inp_list, costEval, name='f_pred_sim_scr', updates=genUpdates)
f_pred_sim_prob = theano.function(inp_list[:-1], [p_out], name='f_pred_sim_prob', updates=genUpdates)
#f_pred_sim_prob = theano.function(inp_list, [p_out, sent_emb, m_img, m_sent, embW, ic_s, ic_i, self.reg_cost], name='f_pred_sim_prob')
f_sent_emb = theano.function(inp_list[:-1], [m_sent, m_img, scores], name='f_sent_emb', updates=genUpdates)
return inp_list, [f_pred_sim_prob, f_pred_cost, f_sent_emb], [costEval, costGen], p_out, tparams
def contrastive_loss(self, im, s, margin=0.1):
"""
Compute contrastive loss
"""
# compute image-sentence score matrix
scores = T.dot(im, s.T)
diagonal = scores.diagonal()
# compare every diagonal score to scores in its column (i.e, all contrastive images for each sentence)
cost_s = T.maximum(0, margin - diagonal + scores)
# compare every diagonal score to scores in its row (i.e, all contrastive sentences for each image)
cost_im = T.maximum(0, margin - diagonal.reshape((-1, 1)) + scores)
# clear diagonals
cost_s = fill_diagonal(cost_s, 0.)
cost_im = fill_diagonal(cost_im, 0.)
return cost_s.sum() + cost_im.sum(), cost_s, cost_im
# ========================================================================================
####################################################################################
# Defines the convolution layer on sentences.
# -- Input is word embeddings stacked as a n_word * enc_size "image"
# -- Filters are all of width equal to enc_size, height varies (3,4,5 grams etc.)
# -- Also pooling is taking max over entire filter output, i.e each filter output
# is converted to a single number!
# -- Output is stacking all the filter outputs to a single vector,
# sz = (batch-size, n_filters)
####################################################################################
def sent_conv_layer(self, tparams, options, embW, batch_size, use_noise, trng, n_samp=1):
# Used for dropout.
rng = np.random.RandomState()
max_sent_len = options.get('maxlen',0)
filter_shapes = []
self.conv_layers = []
pool_sizes = []
filter_w = self.word_encoding_size
layer1_inputs = []
for filter_h in self.filter_hs:
filter_shapes.append((self.n_fmaps_psz, n_samp, filter_h, filter_w))
if max_sent_len > 0:
image_shape = [batch_size, n_samp, max_sent_len, self.word_encoding_size]
else:
image_shape = None
pool_sizes.append((max_sent_len-filter_h+1, self.word_encoding_size-filter_w+1))
conv_layer = LeNetConvPoolLayer(rng, input= embW, image_shape= image_shape, filter_shape=filter_shapes[-1],
poolsize=pool_sizes[-1], non_linear=options['conv_non_linear'])
# flatten all the filter outputs to a single vector
cout = conv_layer.output.flatten(2)
self.conv_layers.append(conv_layer)
layer1_inputs.append(cout)
self.updateP.update(conv_layer.params)
self.regularize.extend(conv_layer.regularize)
tparams.update(conv_layer.params)
layer1_input = T.concatenate(layer1_inputs,axis=1)
# Now apply dropout on the cnn ouptut
if options.get('use_dropout',0):
cnn_out = dropout_layer(layer1_input, use_noise, trng, options['drop_prob_cnn'],layer1_input.shape)
else:
cnn_out = layer1_input
# Now transform this into a sent embedding
sent_emb = T.dot(cnn_out,tparams['Wfc_sent']) + tparams['bfc_sent']
return sent_emb, cnn_out, tparams
# ========================================================================================
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input_x, filter_shape, image_shape, poolsize=(2, 2), non_linear="tanh"):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height,filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows,#cols)
"""
# assert image_shape[1] == filter_shape[1]
self.filter_shape = filter_shape
self.image_shape = image_shape
self.poolsize = poolsize
self.non_linear = non_linear
self.max_pool_method = 'downsamp'
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /np.prod(poolsize))
# initialize weights with random weights
if self.non_linear=="none" or self.non_linear=="relu":
self.W = theano.shared(np.asarray(rng.uniform(low=-0.01,high=0.01,size=filter_shape),
dtype=config.floatX),name="W_conv")
else:
W_bound = np.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(np.asarray(rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=config.floatX),name="W_conv")
b_values = np.zeros((filter_shape[0],), dtype=config.floatX)
self.b = theano.shared(value=b_values, name="b_conv")
# convolve input feature maps with filters
conv_out = conv.conv2d(input=input_x, filters=self.W,filter_shape=self.filter_shape, image_shape=self.image_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output = myMaxPool(conv_out_tanh, ps=self.poolsize, method=self.max_pool_method)
elif self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output = myMaxPool(conv_out_tanh, ps=self.poolsize, method=self.max_pool_method)
else:
pooled_out = myMaxPool(conv_out, ps=self.poolsize, method=self.max_pool_method)
self.output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
self.params = {}
self.params['CNN_W_h' + str(filter_shape[2]) + '_w' +str(filter_shape[3])] = self.W
self.params['CNN_b_h' + str(filter_shape[2]) + '_w' +str(filter_shape[3])] = self.b
self.regularize = ['CNN_W_h' + str(filter_shape[2]) + '_w' +str(filter_shape[3])]
def predict(self, new_data, batch_size):
"""
predict for new data
"""
img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
if self.non_linear=="tanh":
conv_out_tanh = Tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = myMaxPool(conv_out_tanh, ps=self.poolsize, method=self.max_pool_method)
if self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = myMaxPool(conv_out_tanh, ps=self.poolsize, method=self.max_pool_method)
else:
pooled_out = myMaxPool(conv_out, ps=self.poolsize, method=self.max_pool_method)
output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return output
# ========================================================================================
class batch2DConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, filter_shape, poolsize=(2, 2), non_linear="tanh"):
"""
Allocate a 3D conv layer with shared variable internal parameters.
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height,filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows,#cols)
"""
# assert image_shape[1] == filter_shape[1]
self.filter_shape = filter_shape
self.poolsize = poolsize
self.non_linear = non_linear
self.max_pool_method = 'downsamp'
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /np.prod(poolsize))
# initialize weights with random weights
if self.non_linear=="none" or self.non_linear=="relu":
self.W = theano.shared(np.asarray(np.random.uniform(low=-0.01,high=0.01,size=filter_shape),
dtype=config.floatX),name="W_conv")
else:
W_bound = np.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(np.asarray(np.random.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=config.floatX),name="W_conv")
b_values = np.zeros((filter_shape[0],), dtype=config.floatX)
self.b = theano.shared(value=b_values, name="b_conv")
self.params = {}
self.params['CNN_W_h' + str(filter_shape[2]) + '_w' +str(filter_shape[3])] = self.W
self.params['CNN_b_h' + str(filter_shape[2]) + '_w' +str(filter_shape[3])] = self.b
self.regularize = ['CNN_W_h' + str(filter_shape[2]) + '_w' +str(filter_shape[3])]
def build(self, input_x, poolsize=(2, 2)):
# convolve input feature maps with filters
conv_out = tnnet.conv2d(input=input_x, filters=self.W,filter_shape=self.filter_shape)
if self.non_linear=="tanh":
conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output = myMaxPool(conv_out_tanh, ps=self.poolsize, method=self.max_pool_method)
elif self.non_linear=="relu":
conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output = myMaxPool(conv_out_tanh, ps=self.poolsize, method=self.max_pool_method)
else:
pooled_out = myMaxPool(conv_out, ps=self.poolsize, method=self.max_pool_method)
self.output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
|
11597391
|
import pytest
from mitmproxy.net import http
from mitmproxy.net import websockets
class TestUtils:
def test_client_handshake_headers(self):
h = websockets.client_handshake_headers(version='42')
assert h['sec-websocket-version'] == '42'
h = websockets.client_handshake_headers(key='some-key')
assert h['sec-websocket-key'] == 'some-key'
h = websockets.client_handshake_headers(protocol='foobar')
assert h['sec-websocket-protocol'] == 'foobar'
h = websockets.client_handshake_headers(extensions='foo; bar')
assert h['sec-websocket-extensions'] == 'foo; bar'
def test_server_handshake_headers(self):
h = websockets.server_handshake_headers('some-key')
assert h['sec-websocket-accept'] == '8iILEZtcVdtFD7MDlPKip9ec9nw='
assert 'sec-websocket-protocol' not in h
assert 'sec-websocket-extensions' not in h
h = websockets.server_handshake_headers('some-key', 'foobar', 'foo; bar')
assert h['sec-websocket-accept'] == '8iILEZtcVdtFD7MDlPKip9ec9nw='
assert h['sec-websocket-protocol'] == 'foobar'
assert h['sec-websocket-extensions'] == 'foo; bar'
@pytest.mark.parametrize("input,expected", [
([(b'connection', b'upgrade'), (b'upgrade', b'websocket'), (b'sec-websocket-key', b'foobar')], True),
([(b'connection', b'upgrade'), (b'upgrade', b'websocket'), (b'sec-websocket-accept', b'foobar')], True),
([(b'Connection', b'UpgRaDe'), (b'Upgrade', b'WebSocKeT'), (b'Sec-WebSockeT-KeY', b'foobar')], True),
([(b'Connection', b'UpgRaDe'), (b'Upgrade', b'WebSocKeT'), (b'Sec-WebSockeT-AccePt', b'foobar')], True),
([(b'connection', b'foo'), (b'upgrade', b'bar'), (b'sec-websocket-key', b'foobar')], False),
([(b'connection', b'upgrade'), (b'upgrade', b'websocket')], False),
([(b'connection', b'upgrade'), (b'sec-websocket-key', b'foobar')], False),
([(b'upgrade', b'websocket'), (b'sec-websocket-key', b'foobar')], False),
([], False),
])
def test_check_handshake(self, input, expected):
h = http.Headers(input)
assert websockets.check_handshake(h) == expected
@pytest.mark.parametrize("input,expected", [
([(b'sec-websocket-version', b'13')], True),
([(b'Sec-WebSockeT-VerSion', b'13')], True),
([(b'sec-websocket-version', b'9')], False),
([(b'sec-websocket-version', b'42')], False),
([(b'sec-websocket-version', b'')], False),
([], False),
])
def test_check_client_version(self, input, expected):
h = http.Headers(input)
assert websockets.check_client_version(h) == expected
@pytest.mark.parametrize("input,expected", [
('foobar', b'AzhRPA4TNwR6I/riJheN0TfR7+I='),
(b'foobar', b'AzhRPA4TNwR6I/riJheN0TfR7+I='),
])
def test_create_server_nonce(self, input, expected):
assert websockets.create_server_nonce(input) == expected
@pytest.mark.parametrize("input,expected", [
([(b'sec-websocket-extensions', b'foo; bar')], 'foo; bar'),
([(b'Sec-WebSockeT-ExteNsionS', b'foo; bar')], 'foo; bar'),
([(b'sec-websocket-extensions', b'')], ''),
([], None),
])
def test_get_extensions(self, input, expected):
h = http.Headers(input)
assert websockets.get_extensions(h) == expected
@pytest.mark.parametrize("input,expected", [
([(b'sec-websocket-protocol', b'foobar')], 'foobar'),
([(b'Sec-WebSockeT-ProTocoL', b'foobar')], 'foobar'),
([(b'sec-websocket-protocol', b'')], ''),
([], None),
])
def test_get_protocol(self, input, expected):
h = http.Headers(input)
assert websockets.get_protocol(h) == expected
@pytest.mark.parametrize("input,expected", [
([(b'sec-websocket-key', b'foobar')], 'foobar'),
([(b'Sec-WebSockeT-KeY', b'foobar')], 'foobar'),
([(b'sec-websocket-key', b'')], ''),
([], None),
])
def test_get_client_key(self, input, expected):
h = http.Headers(input)
assert websockets.get_client_key(h) == expected
@pytest.mark.parametrize("input,expected", [
([(b'sec-websocket-accept', b'foobar')], 'foobar'),
([(b'Sec-WebSockeT-AccepT', b'foobar')], 'foobar'),
([(b'sec-websocket-accept', b'')], ''),
([], None),
])
def test_get_server_accept(self, input, expected):
h = http.Headers(input)
assert websockets.get_server_accept(h) == expected
|
11597414
|
from .testing import GraphQLTestCase
from .utils import (
DJANGO_FILTER_INSTALLED,
camelize,
get_model_fields,
get_reverse_fields,
is_valid_django_model,
maybe_queryset,
)
__all__ = [
"DJANGO_FILTER_INSTALLED",
"get_reverse_fields",
"maybe_queryset",
"get_model_fields",
"camelize",
"is_valid_django_model",
"GraphQLTestCase",
]
|
11597434
|
import lyrebird
import os
import json
import codecs
storage = lyrebird.get_plugin_storage()
CONFIG_FILE = os.path.abspath(os.path.join(storage, 'conf.json'))
DEFAULT_CONF_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', './default_conf/conf.json'))
class Config:
def __init__(self):
self.base_ssh = None
self.base_path = None
def load():
if not os.path.exists(CONFIG_FILE):
f_from = codecs.open(DEFAULT_CONF_FILE, 'r', 'utf-8')
f_to = codecs.open(CONFIG_FILE, 'w', 'utf-8')
f_to.write(f_from.read())
f_to.close()
f_from.close()
conf_data = json.loads(codecs.open(CONFIG_FILE, 'r', 'utf-8').read())
conf = Config()
conf.__dict__ = conf_data
return conf
|
11597493
|
import os
import subprocess
import time
def parallel_submitter(cmd_list, log_dir='./logs', logfilename_list=None, num_workers=None):
"""
Run subprocesses in parallel
Import (after installing PMP):
>> from pcmdi_metrics.misc.scripts import parallel_submitter
Inputs:
- cmd_list: python list of command lines, e.g.,
['python abc.py -p ../../test_param.py -m model1',
'python abc.py -p ../../test_param.py -m model2',
:
'python abc.py -p ../../test_param.py -m model100']
- log_dir: string for directory path for log files, e.g.,
'/a/b'
default = './logs'
- logfilename_list: python list of pull path of log files, e.g.,
['log_model1',
'log_model2',
:
'log_model100']
In case it was not given, automatically generated as 'log_process_N' (N: process index number)
- num_workers: integer number that limits how many process to be submitted at one time
default: 20% of all CPUs of the current computer
Outputs:
- processes running in parallel
- log files in log_dir
- Each process generates two log files: stdout and stderr
"""
# ------------------------------------------------------
# some env. setups...
# ------------------------------------------------------
# To avoid below error
# OpenBLAS blas_thread_init: pthread_create failed for thread XX of 96: Resource temporarily unavailable
os.environ['OPENBLAS_NUM_THREADS'] = '1'
# Must be done before any CDAT library is called.
# https://github.com/CDAT/cdat/issues/2213
if 'UVCDAT_ANONYMOUS_LOG' not in os.environ:
os.environ['UVCDAT_ANONYMOUS_LOG'] = 'no'
# ------------------------------------------------------
os.makedirs(log_dir, exist_ok=True)
if num_workers is None:
num_workers = int(os.cpu_count() * 0.2) # default: use 20% of all available CPUs
print('Number of employed CPUs for subprocesses:', num_workers)
print("Parallel process start: %s" % time.ctime())
processes = list()
for index, (process, log_file) in enumerate(zip(cmd_list, logfilename_list)):
print(index, ':', process)
# LOG FILE
if logfilename_list is None:
log_file = os.path.join(log_dir, 'log_process_'+str(index))
else:
log_file = os.path.join(log_dir, logfilename_list[index])
# SUBMIT PROCESS
with open(log_file+"_stdout.txt", "wb") as out, open(log_file+"_stderr.txt", "wb") as err:
p = subprocess.Popen(process.split(' '), stdout=out, stderr=err)
processes.append(p)
# WAIT FOR NEXT SUBMIT
if len(processes) == num_workers:
wait = True
while wait:
done, num = check_for_done(processes)
if done:
processes.pop(num)
wait = False
if index != len(cmd_list)-1:
print("Launching next process in cmd_list: %s" % time.ctime())
else:
time.sleep(2) # set this so the CPU does not go crazy
# DONE
print("Parallel process completed: %s" % time.ctime())
def check_for_done(processes):
for i, p in enumerate(processes):
if p.poll() is not None:
return True, i # subprocess finished
return False, False # suprocess not finished
def main():
cmd_list = ['expr 1 + ' + str(r) for r in range(1, 10)]
logfilename_list = ['log_' + str(r) for r in range(1, 10)]
for (process, log_file) in zip(cmd_list, logfilename_list):
print(process, '\t', log_file)
num_workers = 2
parallel_submitter(cmd_list, logfilename_list=logfilename_list, num_workers=num_workers)
if __name__ == "__main__":
main()
|
11597508
|
import numpy as np
import pyflux as pf
noise = np.random.normal(0,1,40)
data = np.zeros(40)
for i in range(1,len(data)):
data[i] = 0.9*data[i-1] + noise[i]
def test_couple_terms():
"""
Tests an GPNARX model with 1 AR term and that
the latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.SquaredExponential())
x = model.fit()
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_couple_terms_integ():
"""
Tests an GPNARX model with 1 AR term, integrated once, and that
the latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, integ=1, kernel=pf.SquaredExponential())
x = model.fit()
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_bbvi():
"""
Tests an GPNARX model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.SquaredExponential())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_mh():
"""
Tests an GPNARX model estimated with Metropolis-Hastings and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.SquaredExponential())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_pml():
"""
Tests a PML model estimated with Laplace approximation and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.SquaredExponential())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_predict_length():
"""
Tests that the prediction dataframe length is equal to the number of steps h
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.SquaredExponential())
x = model.fit()
x.summary()
assert(model.predict(h=5).shape[0] == 5)
def test_predict_is_length():
"""
Tests that the prediction IS dataframe length is equal to the number of steps h
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.SquaredExponential())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test_predict_nans():
"""
Tests that the predictions are not nans
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.SquaredExponential())
x = model.fit()
x.summary()
assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0)
def test_predict_is_nans():
"""
Tests that the in-sample predictions are not nans
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.SquaredExponential())
x = model.fit()
x.summary()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
def test_ou_couple_terms():
"""
Tests an GPNARX model with 1 AR term and that
the latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.OrnsteinUhlenbeck())
x = model.fit()
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_ou_couple_terms_integ():
"""
Tests an GPNARX model with 1 AR term, integrated once, and that
the latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, integ=1, kernel=pf.OrnsteinUhlenbeck())
x = model.fit()
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_ou_bbvi():
"""
Tests an GPNARX model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.OrnsteinUhlenbeck())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_ou_mh():
"""
Tests an GPNARX model estimated with Metropolis-Hastings and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.OrnsteinUhlenbeck())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_ou_pml():
"""
Tests a PML model estimated with Laplace approximation and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.OrnsteinUhlenbeck())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_ou_predict_length():
"""
Tests that the prediction dataframe length is equal to the number of steps h
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.OrnsteinUhlenbeck())
x = model.fit()
x.summary()
assert(model.predict(h=5).shape[0] == 5)
def test_ou_predict_is_length():
"""
Tests that the prediction IS dataframe length is equal to the number of steps h
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.OrnsteinUhlenbeck())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test_ou_predict_nans():
"""
Tests that the predictions are not nans
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.OrnsteinUhlenbeck())
x = model.fit()
x.summary()
assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0)
def test_ou_predict_is_nans():
"""
Tests that the in-sample predictions are not nans
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.OrnsteinUhlenbeck())
x = model.fit()
x.summary()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
def test_rq_couple_terms():
"""
Tests an GPNARX model with 1 AR term and that
the latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.RationalQuadratic())
x = model.fit()
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_rq_couple_terms_integ():
"""
Tests an GPNARX model with 1 AR term, integrated once, and that
the latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, integ=1, kernel=pf.RationalQuadratic())
x = model.fit()
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_rq_bbvi():
"""
Tests an GPNARX model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.RationalQuadratic())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_rq_mh():
"""
Tests an GPNARX model estimated with Metropolis-Hastings and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.RationalQuadratic())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_rq_pml():
"""
Tests a PML model estimated with Laplace approximation and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.RationalQuadratic())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_rq_predict_length():
"""
Tests that the prediction dataframe length is equal to the number of steps h
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.RationalQuadratic())
x = model.fit()
x.summary()
assert(model.predict(h=5).shape[0] == 5)
def test_rq_predict_is_length():
"""
Tests that the prediction IS dataframe length is equal to the number of steps h
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.RationalQuadratic())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test_rq_predict_nans():
"""
Tests that the predictions are not nans
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.RationalQuadratic())
x = model.fit()
x.summary()
assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0)
def test_rq_predict_is_nans():
"""
Tests that the in-sample predictions are not nans
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.RationalQuadratic())
x = model.fit()
x.summary()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
def test_per_couple_terms():
"""
Tests an GPNARX model with 1 AR term and that
the latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.Periodic())
x = model.fit()
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_per_couple_terms_integ():
"""
Tests an GPNARX model with 1 AR term, integrated once, and that
the latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, integ=1, kernel=pf.Periodic())
x = model.fit()
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_per_bbvi():
"""
Tests an GPNARX model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.Periodic())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_per_mh():
"""
Tests an GPNARX model estimated with Metropolis-Hastings and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.Periodic())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_per_pml():
"""
Tests a PML model estimated with Laplace approximation and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GPNARX(data=data, ar=1, kernel=pf.Periodic())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 3)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_per_predict_length():
"""
Tests that the prediction dataframe length is equal to the number of steps h
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.Periodic())
x = model.fit()
x.summary()
assert(model.predict(h=5).shape[0] == 5)
def test_per_predict_is_length():
"""
Tests that the prediction IS dataframe length is equal to the number of steps h
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.Periodic())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test_per_predict_nans():
"""
Tests that the predictions are not nans
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.Periodic())
x = model.fit()
x.summary()
assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0)
def test_per_predict_is_nans():
"""
Tests that the in-sample predictions are not nans
"""
model = pf.GPNARX(data=data, ar=2, kernel=pf.Periodic())
x = model.fit()
x.summary()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
|
11597526
|
from typing import Sequence
from pydantic import conint
from rastervision.pipeline.config import (Config, register_config, Field,
ConfigError, validator)
from rastervision.core.data.raster_source import (RasterSourceConfig,
MultiRasterSource)
@register_config('sub_raster_source')
class SubRasterSourceConfig(Config):
raster_source: RasterSourceConfig = Field(
...,
description=
'A RasterSourceConfig that will provide a subset of the channels.')
target_channels: Sequence[conint(ge=0)] = Field(
...,
description='Channel indices to send each of the channels in this '
'raster source to.')
@validator('target_channels')
def non_empty_target_channels(cls, v):
if len(v) == 0:
raise ConfigError('target_channels should be non-empty.')
return list(v)
def build(self, tmp_dir, use_transformers=True):
rs = self.raster_source.build(tmp_dir, use_transformers)
return rs
@register_config('multi_raster_source')
class MultiRasterSourceConfig(RasterSourceConfig):
raster_sources: Sequence[SubRasterSourceConfig] = Field(
..., description='List of SubRasterSourceConfigs to combine.')
allow_different_extents: bool = Field(
False, description='Allow sub-rasters to have different extents.')
force_same_dtype: bool = Field(
False,
description=
'Force all subchips to be of the same dtype as the first subchip.')
crs_source: conint(ge=0) = Field(
0,
description=
'Use the crs_transformer of the raster source at this index.')
def get_raw_channel_order(self):
# concatenate all target_channels
channel_mappings = sum(
(rs.target_channels for rs in self.raster_sources), [])
# this will be used to index the channel dim of the
# concatenated array to achieve the channel mappings
raw_channel_order = [0] * len(channel_mappings)
for from_idx, to_idx in enumerate(channel_mappings):
raw_channel_order[to_idx] = from_idx
self.validate_channel_mappings(channel_mappings, raw_channel_order)
return raw_channel_order
def validate_channel_mappings(self, channel_mappings: Sequence[int],
raw_channel_order: Sequence[int]):
# validate completeness of mappings
src_inds = set(range(len(channel_mappings)))
tgt_inds = set(channel_mappings)
if src_inds != tgt_inds:
raise ConfigError('Missing mappings for some channels.')
# check compatibility with channel_order, if given
if self.channel_order:
if len(self.channel_order) != len(raw_channel_order):
raise ConfigError(
f'Channel mappings ({raw_channel_order}) and '
f'channel_order ({self.channel_order}) are incompatible.')
@validator('raster_sources')
def validate_raster_sources(cls, v):
if len(v) == 0:
raise ConfigError('raster_sources should be non-empty.')
return v
def build(self, tmp_dir, use_transformers=True):
if use_transformers:
raster_transformers = [t.build() for t in self.transformers]
else:
raster_transformers = []
built_raster_sources = [
rs.build(tmp_dir, use_transformers) for rs in self.raster_sources
]
multi_raster_source = MultiRasterSource(
raster_sources=built_raster_sources,
raw_channel_order=self.get_raw_channel_order(),
force_same_dtype=self.force_same_dtype,
allow_different_extents=self.allow_different_extents,
channel_order=self.channel_order,
crs_source=self.crs_source,
raster_transformers=raster_transformers,
extent_crop=self.extent_crop)
return multi_raster_source
def update(self, pipeline=None, scene=None):
for t in self.transformers:
t.update(pipeline, scene)
|
11597550
|
from decimal import Decimal
from django.db import models
import directory.models as directory
class PriceName(models.Model):
title = models.CharField(max_length=511, unique=True, help_text='Наименование Прайса', db_index=True)
active_status = models.BooleanField(default=True, help_text='Статус активности', db_index=True)
date_start = models.DateField(help_text="Дата начала действия докумена", blank=True, null=True)
date_end = models.DateField(help_text="Дата окончания действия докумена", blank=True, null=True)
research = models.ManyToManyField(directory.Researches, through='PriceCoast', help_text="Услуга-Прайс", blank=True)
def __str__(self):
return "{}".format(self.title)
def status(self):
return self.active_status
class Meta:
verbose_name = 'Прайс - название'
verbose_name_plural = 'Прайс - название'
class PriceCoast(models.Model):
price_name = models.ForeignKey(PriceName, on_delete=models.DO_NOTHING, db_index=True)
research = models.ForeignKey(directory.Researches, on_delete=models.DO_NOTHING, db_index=True)
coast = models.DecimalField(max_digits=10, decimal_places=2)
def __str__(self):
return "{}".format(self.price_name.title)
@staticmethod
def get_coast_from_price(dir_research_loc, price_modifier):
"""
Принимает вид исследования, объект price_modifier: объект прайса, модификатор
на основании прайса получает базовую цену и умножает на модификатор.
Возвращает окончательну цену для записи в issledovaniya
"""
value = 0
if price_modifier:
price_name_loc = price_modifier[0]
price_modifier_loc = price_modifier[1]
try:
d = PriceCoast.objects.values_list('coast').get(price_name=price_name_loc, research_id=dir_research_loc)
res_coast = d[0]
value = (res_coast * price_modifier_loc).quantize(Decimal("1.00"))
except PriceCoast.DoesNotExist:
return value
return value
class Meta:
unique_together = ('price_name', 'research')
verbose_name = 'Прайс - цены'
verbose_name_plural = 'Прайс - цены'
class Contract(models.Model):
title = models.CharField(max_length=511, unique=True, help_text='Наименование организации', db_index=True)
number = models.CharField(max_length=255, blank=True, help_text='Номер договора', db_index=False)
date_start = models.DateField(help_text="Дата начала действия докумена", blank=True, null=True)
date_end = models.DateField(help_text="Дата окончания действия докумена", blank=True, null=True)
price = models.ForeignKey(PriceName, blank=True, null=True, db_index=True, on_delete=models.CASCADE)
modifier = models.DecimalField(max_digits=8, decimal_places=3, default=1, help_text="10000,101")
active_status = models.BooleanField(default=True, help_text='Действующий', db_index=True)
show_in_card = models.BooleanField(default=False, help_text='Показывать в карте пациента', db_index=True)
main = models.BooleanField(default=False, help_text='По умолчанию действует. если несколько.' 'Можно переназначить', db_index=True)
def __str__(self):
return "{}".format(self.title)
class Company(models.Model):
title = models.CharField(max_length=511, unique=True, help_text='Наименование организации', db_index=True)
short_title = models.CharField(max_length=255, default='', blank=True)
active_status = models.BooleanField(default=True, help_text='Показывать при выборе', db_index=True)
legal_address = models.CharField(max_length=511, default='', blank=True)
fact_address = models.CharField(max_length=511, default='', blank=True)
inn = models.CharField(max_length=12, default=0, blank=True)
ogrn = models.CharField(max_length=13, default=0, blank=True)
kpp = models.CharField(max_length=9, default='', blank=True)
bik = models.CharField(max_length=9, default='', blank=True)
contract = models.ForeignKey(Contract, blank=True, null=True, db_index=True, on_delete=models.CASCADE)
def __str__(self):
return "{}".format(self.title)
def get_price(self):
if self.contract:
return "{}".format(self.contract.price)
else:
return ""
def get_modifier(self):
if self.contract:
return "{}".format(self.contract.modifier)
else:
return ""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.