id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11575868
|
import os
import json
from graviteeio_cli.lint.rulesets.gio_apim.functions.gioApimDocumentSchema import gio_apim_Document_Schema
dir_name = os.path.abspath(".") + "/test"
oas_file = "{}/resources/gio_oas/openapi.json".format(dir_name)
def read_api_def(file_name):
with open("{}/lint/gio_oas/{}".format(dir_name, file_name)) as file:
return json.loads(file.read())
def test_gio_apim_api_def():
source = read_api_def("api_def.json")
errors = gio_apim_Document_Schema(source, schema="gio_apim/schemas/schema_gio_apimv3.json")
for error in errors:
print('%s %s' % (error.path, error.message))
assert len(errors) == 0
|
11575889
|
import logging
from flask_jsonpify import jsonify
SERVER_OVERLOADED = 503
NOT_FOUND = 404
TIMEOUT = 504
TOO_MANY_REQUESTS = 429
logger = logging.getLogger(__name__)
def no_content():
return "", 204
def error(status, code, message):
return jsonify({'error': {'code': code, 'message': message}}), status
def not_implemented(message=None):
return error(501, 'not implemented',
message or "Route not implemented yet.")
def model_info_lookup_error(exc):
return bad_request("Model information could not be retrieved for {0}"
.format(exc))
def bad_request(message):
return error(400, 'bad request', message)
def forbidden(message=None):
return error(403, 'forbidden',
message or "This request requires authentication.")
def not_found(message=None):
return error(NOT_FOUND, 'not found',
message or "Nothing found at this location.")
def server_overloaded(message=None):
return error(SERVER_OVERLOADED, 'server overloaded',
message or ("Cannot process your request because the " +
"server is overloaded. Try again in a" +
"few minutes."))
def unknown_error(message):
logger.error(message)
return error(500, 'internal server error', message)
def timeout_error(message=None):
return error(TIMEOUT, 'request_timeout',
message or ("Cannot process your request because the " +
"server timed out."))
def too_many_requests_error(message=None):
return error(TOO_MANY_REQUESTS, 'too_many_requests',
message or ("A limited number of parallel connections per " +
"IP is allowed."))
|
11575894
|
from msl.loadlib import Client64
class Client(Client64):
def __init__(self, module32):
super(Client, self).__init__(module32, timeout=2)
|
11575899
|
import re
from ztag.annotation import Annotation
from ztag.annotation import OperatingSystem
from ztag.annotation import Type
from ztag.annotation import Manufacturer
from ztag import protocols
import ztag.test
class FtpXerox(Annotation):
protocol = protocols.FTP
subprotocol = protocols.FTP.BANNER
port = None
manufact_1_re = re.compile(
"^220 FUJI XEROX Docu((Print)|(Centre))",
re.IGNORECASE
)
product_1_re = re.compile(
"^220 FUJI XEROX ([- _a-zA-Z0-9]+)",
re.IGNORECASE
)
manufact_2_re = re.compile(
"^220 Xerox Phaser",
re.IGNORECASE
)
product_2_re = re.compile(
"^220 Xerox ([- _a-zA-Z0-9]+)",
re.IGNORECASE
)
tests = {
"FtpXerox_1": {
"global_metadata": {
"device_type": Type.GENERIC_PRINTER,
"manufacturer": Manufacturer.XEROX,
"product": "Phaser 6500DN"
}
},
"FtpXerox_2": {
"global_metadata": {
"device_type": Type.GENERIC_PRINTER,
"manufacturer": Manufacturer.XEROX,
"product": "DocuPrint CM305 df",
}
},
}
def process(self, obj, meta):
banner = obj["banner"]
if self.manufact_1_re.search(banner):
meta.global_metadata.device_type = Type.GENERIC_PRINTER
meta.global_metadata.manufacturer = Manufacturer.XEROX
product = self.product_1_re.search(banner).group(1)
meta.global_metadata.product = product
return meta
if self.manufact_2_re.search(banner):
meta.global_metadata.device_type = Type.GENERIC_PRINTER
meta.global_metadata.manufacturer = Manufacturer.XEROX
product = self.product_2_re.search(banner).group(1)
meta.global_metadata.product = product
return meta
""" Tests
"220 Xerox Phaser 6500DN\r\n"
"220 Xerox Phaser 6180MFP-D\r\n"
"220 Xerox Phaser 6500DN\r\n"
"220 FUJI XEROX DocuPrint M355 df\r\n"
"220 Xerox Phaser 6125N\r\n"
"220 Xerox Phaser 6280DN\r\n"
"220 Xerox Phaser 6600N\r\n"
"220 FUJI XEROX DocuPrint CP305 d\r\n"
"220 Xerox Phaser 6600DN\r\n"
"220 FUJI XEROX DocuPrint CM305 df\r\n"
"220 FUJI XEROX DocuPrint 3055\r\n"
"220 Xerox Phaser 3610\r\n"
"220 FUJI XEROX DocuPrint M355 df\r\n"
"220 FUJI XEROX DocuPrint 2065\r\n"
"220 FUJI XEROX DocuPrint CP305 d\r\n"
"220 Xerox Phaser 6280N\r\n"
"""
|
11575919
|
import asyncio
import unittest
from aiter import map_aiter, push_aiter
from .helpers import run, get_n
class test_aitertools(unittest.TestCase):
def test_asyncmap(self):
def make_async_transformation_f(results):
async def async_transformation_f(item):
results.append(item)
return item + 1
return async_transformation_f
results = []
q = push_aiter()
q.push(5, 4, 3)
q.stop()
r = list(q.available_iter())
self.assertEqual(r, [5, 4, 3])
aiter = map_aiter(make_async_transformation_f(results), q)
r = run(get_n(aiter))
self.assertEqual(r, [6, 5, 4])
self.assertEqual(results, [5, 4, 3])
def test_syncmap(self):
def make_sync_transformation_f(results):
def sync_transformation_f(item):
results.append(item)
return item + 1
return sync_transformation_f
results = []
q = push_aiter()
q.push(5, 4, 3)
q.stop()
r = list(q.available_iter())
self.assertEqual(r, [5, 4, 3])
aiter = map_aiter(make_sync_transformation_f(results), q)
r = run(get_n(aiter))
self.assertEqual(r, [6, 5, 4])
self.assertEqual(results, [5, 4, 3])
def test_make_pipe(self):
async def map_f(x):
await asyncio.sleep(x / 100.0)
return x * x
q = push_aiter()
aiter = map_aiter(map_f, q)
for _ in range(4):
q.push(_)
for _ in range(3, 9):
q.push(_)
r = run(get_n(aiter, 10))
q.stop()
r.extend(run(get_n(aiter)))
r1 = sorted([_*_ for _ in range(4)] + [_ * _ for _ in range(3, 9)])
self.assertEqual(r, r1)
|
11575921
|
def gen_prime(num):
res = set()
for val in range(1, num + 1):
if val > 1:
for i in range(2, val):
if val % i == 0:
break
else:
res.add(val)
print(sorted(list(res)))
def gen2(num):
res = []
for i in range(2, num + 1):
for j in range(2, i + 1):
if i % j == 0:
break
if j == i:
res.append(j)
print(res)
# gen_prime(41)
# gen2(41)
def find_all_prime(num):
res = []
for val in range(2, num + 1):
prime = True
for y in range(2, val):
if val % y == 0:
prime = False
if prime:
res.append(val)
return res
res = find_all_prime(40)
print(res)
|
11575971
|
from data_importers.ems_importers import BaseDemocracyCountsCsvImporter
class Command(BaseDemocracyCountsCsvImporter):
council_id = "EAY"
addresses_name = "2021-03-02T10:33:59.623219/Democracy Club - Polling Districts.csv"
stations_name = "2021-03-02T10:33:59.623219/Democracy Club - Polling Stations.csv"
elections = ["2021-05-06"]
def address_record_to_dict(self, record):
if record.postcode in ["KA3 6AZ", "KA2 0BQ", "KA1 2SE", "KA18 2JW", "KA18 2NB"]:
return None
return super().address_record_to_dict(record)
def station_record_to_dict(self, record):
if record.stationcode in [
"CDV01",
"CDV02",
"CDV03",
"CDV04",
"CDV05",
"CDV06",
"CDV07",
"CDV08",
"CDV09",
"CDV10",
"CDV11",
"CDV12",
"CDV13",
"CDV14_1",
"CDV14_2",
"CDV15",
"CDV16",
"CDV17_1",
"CDV17_2",
"CDV18",
"CDV19",
"CDV20",
"CDV21",
"CDV22",
"CDV23",
"CDV24",
"CDV25",
"CDV26",
"CDV27",
"CDV28",
"CDV29",
"CDV30",
"CDV31",
"KIV01",
"KIV02",
"KIV03",
"KIV04",
"KIV05",
"KIV06",
"KIV07",
"KIV08_1",
"KIV08_2",
"KIV09",
"KIV10",
"KIV11",
"KIV12",
"KIV13",
"KIV14",
"KIV15",
"KIV16",
"KIV17",
"KIV18",
"KIV19",
"KIV20",
"KIV21",
"KIV22",
"KIV23",
"KIV24",
"KIV25",
"KIV26",
"KIV27",
"KIV28_1",
"KIV28_2",
"KIV29",
"KIV30",
"KIV31",
"KIV32",
"KIV33",
"KIV34",
"KIV35",
"KIV36",
"KIV37",
"KIV38",
"KIV39",
"KIV40",
"KIV41",
"KIV42",
"KIV43",
"KIV44",
"KIV45",
"KIV46",
"KIV47",
"KIV48",
"KIV49",
"KIV50",
"KIV51",
"KIV52",
"KIV53",
"KIV54",
"KIV55",
"KIV56",
"KIV57",
"KIV58",
"KIV59_1",
"KIV59_2",
"KIV60",
"KIV61",
"KIV62_1",
"KIV62_2",
"KIV63",
"KIV64",
]:
return super().station_record_to_dict(record)
else:
return None
|
11575993
|
def extractWorkNwongNet(item):
'''
Parser for 'work.nwong.net'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Poison-Wielding Fugitive', 'Poison-Wielding Fugitive', 'translated'),
('I Said Make My Abilities Average!', 'I Said Make My Abilities Average', 'translated'),
('I Said Make My Abilities Average', 'I Said Make My Abilities Average', 'translated'),
('Head Over Heels from the Scarf I Lent Her', 'Head Over Heels from the Scarf I Lent Her', 'translated'),
('Dimension Wave', 'Dimension Wave', 'translated'),
('Crowbar Nurse', 'Crowbar Nurse', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
11575995
|
import torch.nn as nn
import torch.nn.functional as F
import timm
import torch
from collections import OrderedDict
from .backbone import create_custom_backbone # 导入自定义网络
def create_backbone(model_name, num_classes, metric=False):
"""
主干网络入口
优先顺序: 自定义>timm
model_name: 网络名称
num_classes: 网络输出
metric: False分类任务 True度量学习
"""
try:
# 加载自定义网络
model = create_custom_backbone(model_name, num_classes)
except:
# 加载Timm网络
if metric:
model = MetricModel(model_name, pretrained=True, feature_dim=num_classes)
else:
model = timm.create_model(
model_name, pretrained=True, num_classes=num_classes
)
return model
class MetricModel(nn.Module):
"""
度量学习:加载基于timm的特征提取网络
"""
def __init__(self, model_name, pretrained, feature_dim):
super(MetricModel, self).__init__()
# 特征提取器
self.features = timm.create_model(
model_name,
pretrained=pretrained,
num_classes=feature_dim, # 修改输出维度
)
self.bn = nn.BatchNorm1d(feature_dim)
def forward(self, imgs):
features = self.features(imgs)
features = self.bn(features) # 规范化,正则化
features = F.normalize(features, p=2, dim=1) # 特征归一化,即模长为1
return features
|
11576004
|
from flask_restly.decorator import resource, get, rate
from flask_restly import FlaskRestly
from flask import Flask
from time import time
def test_should_return_429_code_if_too_much_requests():
app = Flask(__name__)
FlaskRestly(app)
requests_limit = 2
_data = dict()
@rate.resolver
def view_rate_limit(key, window, _):
requests = _data.get(key, 0)
_data[key] = requests + 1
return _data.get(key, 0), time() + window
@resource(name='test')
class SomeResource:
@rate.limit(requests=requests_limit)
@get('/<int:id>')
def get_entity(self, id):
return dict(id=id)
with app.app_context():
SomeResource()
with app.test_client() as client:
for _ in range(requests_limit):
response = client.get('/api/rest/v1/test/1')
assert response.status_code == 200
response = client.get('/api/rest/v1/test/1')
assert response.status_code == 429
def test_should_inject_proper_headers():
app = Flask(__name__)
FlaskRestly(app)
requests_limit = 2
time_window = 2
_data = dict()
now = int(time())
@rate.resolver
def view_rate_limit(key, window, _):
requests = _data.get(key, 0)
_data[key] = requests + 1
return _data.get(key, 0), now + window
@resource(name='test')
class SomeResource:
@rate.limit(requests=requests_limit, window=time_window)
@get('/<int:id>')
def get_entity(self, id):
return dict(id=id)
with app.app_context():
SomeResource()
with app.test_client() as client:
response = client.get('/api/rest/v1/test/1')
assert response.headers.get('X-RateLimit-Limit', type=int) == requests_limit
assert response.headers.get('X-RateLimit-Remaining', type=int) == requests_limit - 1
assert response.headers.get('X-RateLimit-Reset', type=int) == now + time_window
def test_should_use_custom_group_name():
app = Flask(__name__)
FlaskRestly(app)
requests_limit = 2
group_name = 'test'
data_group_name = '127.0.0.1__' + group_name
_data = {
data_group_name: requests_limit,
}
@rate.resolver
def view_rate_limit(key, window, _):
requests = _data.get(key, 0)
_data[key] = requests + 1
return _data.get(key, 0), time() + window
@resource(name='test')
class SomeResource:
@rate.limit(requests=requests_limit, group=group_name)
@get('/<int:id>')
def get_entity(self, id):
return dict(id=id)
with app.app_context():
SomeResource()
with app.test_client() as client:
response = client.get('/api/rest/v1/test/1')
assert response.status_code == 429
def test_should_use_custom_key_resolver():
app = Flask(__name__)
FlaskRestly(app)
group_name = 'some_custom_key'
@rate.key_resolver
def _custom_key_resolver(*_):
return group_name
requests_limit = 2
_data = {
group_name: requests_limit,
}
@rate.resolver
def view_rate_limit(key, window, _):
requests = _data.get(key, 0)
_data[key] = requests + 1
return _data.get(key, 0), time() + window
@resource(name='test')
class SomeResource:
@rate.limit(requests=requests_limit)
@get('/<int:id>')
def get_entity(self, id):
return dict(id=id)
with app.app_context():
SomeResource()
with app.test_client() as client:
response = client.get('/api/rest/v1/test/1')
assert response.status_code == 429
|
11576044
|
import os
import pyodbc
from decouple import config
server = config('server')
database = config('database')
username = config('username')
password = config('password')
driver= '{ODBC Driver 17 for SQL Server}'
cnxn = pyodbc.connect('DRIVER='+driver+';SERVER='+server+';PORT=1433;DATABASE='+database+';UID='+username+';PWD='+ password,autocommit=True)
def prepare_database():
try:
cursor = cnxn.cursor()
cursor.execute("""IF (EXISTS (SELECT * FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA = 'dbo'
AND TABLE_NAME = 'Orders'))
BEGIN
DROP TABLE [dbo].[Orders];
END
IF (EXISTS (SELECT * FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA = 'dbo'
AND TABLE_NAME = 'Inventory'))
BEGIN
DROP TABLE [dbo].[Inventory];
END
CREATE TABLE [dbo].[Orders] (ID int PRIMARY KEY, ProductID int, OrderDate datetime);
CREATE TABLE [dbo].[Inventory] (ProductID int PRIMARY KEY, QuantityInStock int
CONSTRAINT CHK_QuantityInStock CHECK (QuantityInStock>-1));
-- Fill up with some sample values
INSERT INTO dbo.Orders VALUES (1,1,getdate());
INSERT INTO dbo.Inventory VALUES (1,0);
""")
except pyodbc.DatabaseError as err:
print("Couldn't prepare database tables")
def execute_transaction():
try:
cnxn.autocommit = False
cursor = cnxn.cursor()
cursor.execute("INSERT INTO Orders VALUES (2,1,getdate());")
cursor.execute("UPDATE Inventory SET QuantityInStock=QuantityInStock-1 WHERE ProductID=1")
except pyodbc.DatabaseError as err:
cnxn.rollback()
print("Transaction rolled back: " + str(err))
else:
cnxn.commit()
print("Transaction committed!")
finally:
cnxn.autocommit = True
prepare_database()
execute_transaction()
|
11576080
|
class FilterModule(object):
def filters(self):
return {
'parse_re_response':
self.parse_re_response
}
# end filters
# sample response: {"route-engine-information": {"route-engine":[]}}
@staticmethod
def parse_re_response(device_response):
"""
:param device_response:
"""
if device_response:
re_resp = device_response.get('route-engine-information', {})
re_details = re_resp.get('route-engine', {})
if re_details:
if type(re_details) is dict:
return ['re0']
elif type(re_details) is list:
return ['re0', 're1']
return []
# end parse_re_response
# end FilterModule
# For UT
if __name__ == "__main__":
fc = FilterModule()
device_response = None
print fc.parse_re_response(device_response) # noqa: E999
device_response = {'route-engine-information': {}}
print fc.parse_re_response(device_response)
device_response = {'route-engine-information': {'route-engine': {}}}
print fc.parse_re_response(device_response)
device_response = {'route-engine-information': {'route-engine': {
'slot': 0}}}
print fc.parse_re_response(device_response)
device_response = {'route-engine-information': {'route-engine': [{
'slot': 0}, {'slot': 1}]}}
print fc.parse_re_response(device_response)
|
11576108
|
from unittest_reinvent.running_modes.lib_invent_tests.learning_strategy_tests.test_learning_strategy_sdap_strategy \
import TestLearningStrategySdapStrategy
from unittest_reinvent.running_modes.lib_invent_tests.learning_strategy_tests.test_learning_strategy_dap_strategy \
import TestLearningStrategyDapStrategy
from unittest_reinvent.running_modes.lib_invent_tests.learning_strategy_tests.test_learning_strategy_mauli_strategy \
import TestLearningStrategyMauliStrategy
from unittest_reinvent.running_modes.lib_invent_tests.learning_strategy_tests.test_learning_strategy_mascof_strategy \
import TestLearningStrategyMascofStrategy
|
11576135
|
import datetime
import logging
import decimal
from django.core.management.base import BaseCommand, CommandError
from pyExcelerator import parse_xls
from market.models import MarketPlace, MarketCategory, MarketSubCategory
from django.template.defaultfilters import slugify
def clean_subcategory(subcategory):
subcategory = subcategory.replace("Colonials - ", "").strip()
if '-' in subcategory:
x, y = subcategory.split('-', 1)
x = x.strip()
y = y.strip()
if x == y:
return x
return subcategory
class Command(BaseCommand):
args = '<poll_id poll_id ...>'
help = 'Closes the specified poll for voting'
def handle(self, *args, **options):
from inventory.models import Coin
marketplace = MarketPlace.objects.get(slug="greatcoins")
today = datetime.datetime.today()
spreadsheet = open(args[0], 'r')
for sheet_name, values in parse_xls(spreadsheet, 'utf8'): #'cp1251'):
if sheet_name == "Sheet1" or sheet_name == "Sheet2":
products = [[]]
property_keys = []
print ('Parsing Sheet = "%s"' % sheet_name.encode('cp866', 'backslashreplace'))
for row_idx, col_idx in sorted(values.keys()):
if row_idx == 0:
property_keys.append(values[(row_idx, col_idx)])
continue
v = values[(row_idx, col_idx)]
if isinstance(v, unicode): v = v.encode('utf8') #'cp866', 'backslashreplace')
else: v = str(v)
last_row, last_col = len(products), len(products[-1])
while last_row < row_idx:
products.extend([[]])
last_row = len(products)
while last_col < col_idx:
products[-1].extend([''])
last_col = len(products[-1])
products[-1].extend([v])
print (property_keys)
sort_order_idx = 14
sub_category_idx = 2
category_idx = 1
subcategories = set()
categories = set()
category_tree = {}
category_objs = {}
subcategory_objs = {}
counter = 0
for i, product in enumerate(products[1:]):
line = i + 3
# counter += 1
# if counter == 600: break
if len(product) < 6:
print "Line %d: invalid row" % (line)
continue
try:
pcgs_number = int(decimal.Decimal(product[5]))
except ValueError:
print "Line %d : invalid PCGS" % (line)
continue
except Exception,e:
print "Line %d, %s. Could not recognize PCGS Number, line will not be saved.\n >> %s" % ((line), e, product)
continue
if len(product) < sub_category_idx + 1:
print "Line %d: sub category is missing" % (line)
continue
if len(product) < 15:
print "Line %d: sort order value don't exist. line will not be saved.\n >> %s" % (line, product)
continue
category = product[category_idx]
subcategory = clean_subcategory(product[sub_category_idx])
if category == '':
print "Line %d: category is missing" % (line)
print product
continue
if category == "Hawaii":
print "Line %d: Hawaii category found, break." % (line)
break
category_obj = category_objs.get(category, None)
if category_obj is None:
try:
category_obj = MarketCategory.objects.get(slug=slugify(category))
except MarketCategory.DoesNotExist:
category_obj = MarketCategory.objects.get_or_create(marketplace=marketplace, name=category)[0]
category_objs[category] = category_obj
if subcategory == '':
subcategory_obj = None
else:
subcategory_obj = subcategory_objs.get(category + '_' + subcategory, None)
if subcategory_obj is None:
try:
subcategory_obj = MarketSubCategory.objects.get(parent=category_obj, slug=slugify(subcategory))
except MarketSubCategory.DoesNotExist:
subcategory_obj = MarketSubCategory.objects.get_or_create(marketplace=marketplace,
parent=category_obj,
name=subcategory)[0]
subcategory_objs[category + '_' + subcategory] = subcategory_obj
category_tree.setdefault(category, set())
category_tree[category].add(subcategory)
coin, created = Coin.objects.get_or_create(pcgs_number=pcgs_number)
if not created: #and today < coin.last_update:
#already updated today
#print "Line %d: coin already saved. %s" % (line, coin)
continue
coin.category = category_obj
coin.subcategory = subcategory_obj
coin.country_code = 'us'
coin.pcgs_number = pcgs_number
coin.description = product[6]
coin.year_issued = product[7]
coin.actual_year = product[8]
coin.denomination = product[9]
coin.major_variety = product[10]
coin.die_variety = product[11]
coin.prefix = product[12]
coin.suffix = product[13]
if len(product) > 14:
coin.sort_order = product[14]
coin.heading = subcategory
if len(product) > 16:
coin.holder_variety = product[16]
if len(product) > 17:
coin.holder_variety_2 = product[17]
if len(product) > 18:
coin.additional_data = product[18]
coin.save()
|
11576141
|
import pygame
from pygame.sprite import Sprite
# This class represents the bar at the bottom that the player controls
class Player(pygame.sprite.Sprite):
# Set speed vector
change_x=0
change_y=0
# Constructor function
def __init__(self,x,y, filename):
# Call the parent's constructor
pygame.sprite.Sprite.__init__(self)
# Set height, width
self.image = pygame.image.load(filename).convert()
# Make our top-left corner the passed-in location.
self.rect = self.image.get_rect()
self.rect.top = y
self.rect.left = x
self.prev_x = x
self.prev_y = y
# Clear the speed of the player
def prevdirection(self):
self.prev_x = self.change_x
self.prev_y = self.change_y
# Change the speed of the player
def changespeed(self,x,y):
self.change_x+=x
self.change_y+=y
# Find a new position for the player
def update(self,walls,gate):
# Get the old position, in case we need to go back to it
old_x=self.rect.left
new_x=old_x+self.change_x
prev_x=old_x+self.prev_x
self.rect.left = new_x
old_y=self.rect.top
new_y=old_y+self.change_y
prev_y=old_y+self.prev_y
# Did this update cause us to hit a wall?
x_collide = pygame.sprite.spritecollide(self, walls, False)
if x_collide:
# Whoops, hit a wall. Go back to the old position
self.rect.left=old_x
# self.rect.top=prev_y
# y_collide = pygame.sprite.spritecollide(self, walls, False)
# if y_collide:
# # Whoops, hit a wall. Go back to the old position
# self.rect.top=old_y
# print('a')
else:
self.rect.top = new_y
# Did this update cause us to hit a wall?
y_collide = pygame.sprite.spritecollide(self, walls, False)
if y_collide:
# Whoops, hit a wall. Go back to the old position
self.rect.top=old_y
# self.rect.left=prev_x
# x_collide = pygame.sprite.spritecollide(self, walls, False)
# if x_collide:
# # Whoops, hit a wall. Go back to the old position
# self.rect.left=old_x
# print('b')
if gate != False:
gate_hit = pygame.sprite.spritecollide(self, gate, False)
if gate_hit:
self.rect.left=old_x
self.rect.top=old_y
|
11576173
|
from gensim.models.keyedvectors import KeyedVectors
import psycopg2
from psycopg2.extensions import register_adapter
from psycopg2.extras import Json, execute_values
import bz2
import numpy as np
def adapt_numpy_ndarray(numpy_ndarray):
"""
Transform NumPy Array to bjson
"""
return Json(numpy_ndarray.tolist())
def connect_db():
connection = psycopg2.connect("host=postgres user=postgres port=5432")
register_adapter(np.ndarray, adapt_numpy_ndarray)
cursor = connection.cursor()
return connection, cursor
def create_embed_table(conn,cursor):
try:
cursor.execute('DROP TABLE embeddings')
print('dropping table')
except:
print('no table to drop')
try:
cursor.execute('CREATE TABLE embeddings (key varchar, embedding jsonb)')
conn.commit()
except:
print("DB already created")
if __name__==("__main__"):
conn,cursor = connect_db()
create_embed_table(conn,cursor)
print("loading embeding")
model_path ='/data/GoogleNews-vectors-negative300.bin'
model = KeyedVectors.load_word2vec_format(model_path,binary=True)
print("embedding loaded")
done = 0
vocab = list(set(model.index2word))
embeds = [model[key] for key in vocab ]
data = list(zip(vocab,embeds))
print('running inserts')
insert_query = "INSERT INTO EMBEDDINGS (key,embedding) VALUES %s"
execute_values(cursor,insert_query,data,page_size=10)
print('running commit')
conn.commit()
|
11576177
|
def comb(*sequences):
'''
combinations of multiple sequences so you don't have
to write nested for loops
>>> from pprint import pprint as pp
>>> pp(comb(['Guido','Larry'], ['knows','loves'], ['Phyton','Purl']))
[['Guido', 'knows', 'Phyton'],
['Guido', 'knows', 'Purl'],
['Guido', 'loves', 'Phyton'],
['Guido', 'loves', 'Purl'],
['Larry', 'knows', 'Phyton'],
['Larry', 'knows', 'Purl'],
['Larry', 'loves', 'Phyton'],
['Larry', 'loves', 'Purl']]
>>>
'''
combinations = [[seq] for seq in sequences[0]]
for seq in sequences[1:]:
combinations = [comb+[item]
for comb in combinations
for item in seq ]
return combinations
def comb2(*sequences):
'''
Generator of combinations of multiple sequences so you don't have
to write nested for loops
Note: rightmost sequence changes the quickest as if it were the inner loop.
>>> for x in comb2(['Guido','Larry'], ['knows','loves','hates'], ['Phyton','Purl','Wuby','Auk']):
... print x
...
['Guido', 'knows', 'Phyton']
['Guido', 'knows', 'Purl']
['Guido', 'knows', 'Wuby']
['Guido', 'knows', 'Auk']
['Guido', 'loves', 'Phyton']
['Guido', 'loves', 'Purl']
['Guido', 'loves', 'Wuby']
['Guido', 'loves', 'Auk']
['Guido', 'hates', 'Phyton']
['Guido', 'hates', 'Purl']
['Guido', 'hates', 'Wuby']
['Guido', 'hates', 'Auk']
['Larry', 'knows', 'Phyton']
['Larry', 'knows', 'Purl']
['Larry', 'knows', 'Wuby']
['Larry', 'knows', 'Auk']
['Larry', 'loves', 'Phyton']
['Larry', 'loves', 'Purl']
['Larry', 'loves', 'Wuby']
['Larry', 'loves', 'Auk']
['Larry', 'hates', 'Phyton']
['Larry', 'hates', 'Purl']
['Larry', 'hates', 'Wuby']
['Larry', 'hates', 'Auk']
>>>
The algorithm relies on noting the following and generalising:
If you had three sequences of length 2, 3, and 4 left-to-right,
Then the indices of the *elements* for all combinations can be
generated with:
for x in range(2*3*4):
print ( (x/(3*4*1))%2, (x/(4*1))%3, (x/(1))%4 )
'''
import operator
lengths = [len(seq) for seq in sequences]
range_len_seq = range(len(sequences))
max_count = reduce(operator.mul, lengths)
_tmp = lengths + [1] # append multiplicative identity
dividers = [reduce(operator.mul, _tmp[-x-1:]) for x in range_len_seq][::-1]
modulos = lengths
for n in range(max_count):
yield [sequences[r][(n/dividers[r])%modulos[r]] for r in range_len_seq]
|
11576251
|
from fastai.basic_train import LearnerCallback
import fastai.tabular.data
from fastai.torch_core import *
from fast_rl.agents.ddpg_models import DDPGModule
from fast_rl.core.agent_core import ExperienceReplay, ExplorationStrategy, Experience
from fast_rl.core.basic_train import AgentLearner
from fast_rl.core.data_block import MDPDataBunch, MDPStep, FEED_TYPE_STATE, FEED_TYPE_IMAGE
class DDPGLearner(AgentLearner):
def __init__(self, data: MDPDataBunch, model, memory, exploration_method, trainers, opt=optim.Adam,
**kwargs):
self.memory: Experience = memory
self.exploration_method: ExplorationStrategy = exploration_method
super().__init__(data=data, model=model, opt=opt, **kwargs)
self.ddpg_trainers = listify(trainers)
for t in self.ddpg_trainers: self.callbacks.append(t(self))
def predict(self, element, **kwargs):
with torch.no_grad():
training = self.model.training
if element.shape[0] == 1: self.model.eval()
pred = self.model(element)
if training: self.model.train()
return self.exploration_method.perturb(pred.detach().cpu().numpy(), self.data.action.action_space)
def interpret_q(self, item):
with torch.no_grad():
return self.model.interpret_q(item).cpu().numpy().item()
class BaseDDPGTrainer(LearnerCallback):
def __init__(self, learn):
super().__init__(learn)
self.max_episodes = 0
self.episode = 0
self.iteration = 0
self.copy_over_frequency = 3
@property
def learn(self) -> DDPGLearner:
return self._learn()
def on_train_begin(self, n_epochs, **kwargs: Any):
self.max_episodes = n_epochs
def on_epoch_begin(self, epoch, **kwargs: Any):
self.episode = epoch
self.iteration = 0
def on_loss_begin(self, **kwargs: Any):
"""Performs tree updates, exploration updates, and model optimization."""
if self.learn.model.training: self.learn.memory.update(item=self.learn.data.x.items[-1])
self.learn.exploration_method.update(self.episode, max_episodes=self.max_episodes, explore=self.learn.model.training)
if not self.learn.warming_up:
samples: List[MDPStep] = self.memory.sample(self.learn.data.bs)
post_optimize = self.learn.model.optimize(samples)
if self.learn.model.training:
self.learn.memory.refresh(post_optimize=post_optimize)
self.learn.model.target_copy_over()
self.iteration += 1
def create_ddpg_model(data: MDPDataBunch, base_arch: DDPGModule, layers=None, ignore_embed=False, channels=None,
opt=torch.optim.RMSprop, loss_func=None, **kwargs):
bs, state, action = data.bs, data.state, data.action
nc, w, h, n_conv_blocks = -1, -1, -1, [] if state.mode == FEED_TYPE_STATE else ifnone(channels, [32, 32, 32])
if state.mode == FEED_TYPE_IMAGE: nc, w, h = state.s.shape[3], state.s.shape[2], state.s.shape[1]
_layers = ifnone(layers, [400, 200] if len(n_conv_blocks) == 0 else [200, 200])
if ignore_embed or np.any(state.n_possible_values == np.inf) or state.mode == FEED_TYPE_IMAGE: emb_szs = []
else: emb_szs = [(d+1, int(fastai.tabular.data.emb_sz_rule(d))) for d in state.n_possible_values.reshape(-1, )]
ao = int(action.taken_action.shape[1])
model = base_arch(ni=state.s.shape[1], ao=ao, layers=_layers, emb_szs=emb_szs, n_conv_blocks=n_conv_blocks,
nc=nc, w=w, h=h, opt=opt, loss_func=loss_func, **kwargs)
return model
ddpg_config = {
DDPGModule: BaseDDPGTrainer
}
def ddpg_learner(data: MDPDataBunch, model, memory: ExperienceReplay, exploration_method: ExplorationStrategy,
trainers=None, **kwargs):
trainers = ifnone(trainers, ddpg_config[model.__class__])
return DDPGLearner(data, model, memory, exploration_method, trainers, **kwargs)
|
11576282
|
import numpy as np
import os
import matplotlib.pyplot as plt
from matplotlib import gridspec
import seaborn as sns
import colorsys
from sis import coeff_determination_metric, find_sub_list, retokenize_annotation
ASPECT_TO_COLOR = { 0: 'red',
1: 'blue',
2: 'green' }
def rgb_to_hsl(r, g, b):
r /= 255.0
g /= 255.0
b /= 255.0
h, l, s = colorsys.rgb_to_hls(r, g, b)
h *= 360.0
s *= 100.0
l *= 100.0
return (h, s, l)
def highlight_annot(review, rationale, index_to_token, aspect, hsl=True,
underline_annots=True):
review_len = review.get_num_tokens()
words = review.to_text(index_to_token, str_joiner=None)
aspects = [None for i in range(review_len)]
if hsl:
hsl_values = [None for i in range(review_len)]
# linearly-spaced interval, rounded
hsl_interval = np.rint(np.linspace(60, 95, num=len(rationale),
endpoint=True))
# if coloring text, should use [15, 85] range for interval
if underline_annots:
is_annot = [False for i in range(review_len)]
annot_idxs = []
for start, end in review.get_annotation_idxs():
for i in range(start, end + 1):
is_annot[i] = True
for rank, i in enumerate(rationale):
aspects[i] = aspect
if hsl:
# want 0 --> first element of rationale.elms (most important word)
# rank = K - 1 - rank
hsl_values[i] = hsl_interval[rank]
formatted_words = []
for i, w in enumerate(words):
style = ''
if aspects[i] is not None:
asp = aspects[i]
if hsl:
color = ASPECT_TO_COLOR[asp]
if color == 'red':
hsl_maker = lambda x: 'hsl(0, 100%%, %d%%)' % int(x)
background_color = 'hsl(0, 100%, 96%)';
elif color == 'green':
hsl_maker = lambda x: 'hsl(120, 100%%, %d%%)' % int(x)
background_color = 'hsl(120, 100%, 96%)';
else: # color == 'blue'
hsl_maker = lambda x: 'hsl(240, 100%%, %d%%)' % int(x)
background_color = 'hsl(240, 100%, 96%)';
style += 'background-color:%s;' % (hsl_maker(hsl_values[i]))
else:
style += 'color:%s;' % (ASPECT_TO_COLOR[asp])
if underline_annots and is_annot[i]:
style += 'text-decoration:underline; text-decoration-color:%s;' % \
(ASPECT_TO_COLOR[aspect])
formatted_w = '<span style="%s">%s</span>' % (style, w)
formatted_words.append(formatted_w)
html_out = ' '.join(formatted_words)
return html_out
def highlight_multi_rationale(review, rationales, index_to_token,
color_palette=sns.color_palette('dark'),
hsl=True, underline_annots=True,
underline_color='black'):
review_len = review.get_num_tokens()
words = review.to_text(index_to_token, str_joiner=None)
word_to_rationale = [None for i in range(review_len)]
if hsl:
hsl_values = [None for i in range(review_len)]
if underline_annots:
is_annot = [False for i in range(review_len)]
annot_idxs = []
for start, end in review.get_annotation_idxs():
for i in range(start, end + 1):
is_annot[i] = True
for i, rationale in enumerate(rationales):
if hsl:
# linearly-spaced interval, rounded
hsl_interval = np.rint(np.linspace(60, 85, num=len(rationale),
endpoint=True))
for rank, c in enumerate(rationale):
assert(word_to_rationale[c] is None) # rationales must be disjoint
word_to_rationale[c] = i
if hsl:
hsl_values[c] = hsl_interval[rank]
formatted_words = []
for i, w in enumerate(words):
style = ''
if word_to_rationale[i] is not None:
rationale_idx = word_to_rationale[i]
try:
color = color_palette[rationale_idx]
except: # ran out of colors in palette, default to black/gray
color = sns.color_palette('gray')[1]
if hsl:
h, s, l = rgb_to_hsl(*color)
hsl_maker = lambda x: 'hsl(%.4f, %.4f%%, %d%%)' % (h, s, int(x))
style += 'background-color:%s;' % (hsl_maker(hsl_values[i]))
else:
style += 'color:rgb(%.4f, %.4f, %.4f);' % \
(color[0]*255.0, color[1]*255.0, color[2]*255.0)
if underline_annots and is_annot[i]:
style += 'text-decoration:underline; text-decoration-color:%s;' % \
(underline_color)
formatted_w = '<span style="%s">%s</span>' % (style, w)
formatted_words.append(formatted_w)
html_out = ' '.join(formatted_words)
return html_out
def highlight_annot_tf(decoded_seq, rationale, color='red', shading=True,
joiner='', hide_elms=[]):
in_rationale = [False for i in range(len(decoded_seq))]
if shading:
hsl_values = [None for i in range(len(decoded_seq))]
# linearly-spaced interval, rounded
hsl_interval = np.rint(np.linspace(60, 95, num=len(rationale),
endpoint=True))
# if coloring text, should use [15, 85] range for interval
for rank, i in enumerate(rationale):
in_rationale[i] = True
if shading:
# want 0 --> first element of rationale.elms (most important word)
# rank = K - 1 - rank
hsl_values[i] = hsl_interval[rank]
formatted_seq = []
for i, elem in enumerate(decoded_seq):
if i in hide_elms:
continue
style = ''
if in_rationale[i]:
if shading:
if color == 'red':
hsl_maker = lambda x: 'hsl(0, 100%%, %d%%)' % int(x)
background_color = 'hsl(0, 100%, 96%)'
elif color == 'green':
hsl_maker = lambda x: 'hsl(120, 100%%, %d%%)' % int(x)
background_color = 'hsl(120, 100%, 96%)'
else: # color == 'blue'
hsl_maker = lambda x: 'hsl(240, 100%%, %d%%)' % int(x)
background_color = 'hsl(240, 100%, 96%)'
style += 'background-color:%s;' % (hsl_maker(hsl_values[i]))
else:
style += 'color:%s;' % (color)
formatted_elem = '<span style="%s">%s</span>' % (style, elem)
formatted_seq.append(formatted_elem)
html_out = joiner.join(formatted_seq)
return html_out
def make_legend(num_sis, color_palette=sns.color_palette(), labels=None):
html = ''
def make_legend_li(rgb, text):
html = ''
html += '''<li style="float:left; margin-right:10px;">
<span style="border:none;float:left;width:25px;height:16px;margin:3px;
background-color:rgb(%.4f, %.4f, %.4f)"></span> %s</li>
''' % (rgb[0], rgb[1], rgb[2], text)
return html
html += '<p><ul style="list-style:none;">'
for i, color in enumerate(color_palette[:num_sis]):
rgb = np.array(list(color)) * 255.0
if labels is not None:
text = labels[i]
else:
text = 'SIS %d' % (i+1)
html += make_legend_li(rgb, text)
html += '</ul></p>'
return html
def save_html(html, filepath, header=None):
with open(filepath, 'w') as outfile:
outfile.write('<html>\n<body>\n')
if header:
outfile.write(header + '<hr>\n')
outfile.write(html)
outfile.write('\n</body>\n</html>')
def plot_predictive_dist(dist, bins=25, vertlines=[], title='', savepath=None):
plt.hist(dist, bins=bins)
for x in vertlines:
plt.axvline(x=x, c='black')
if title != '':
plt.title(title)
plt.xlabel('Predicted Score')
plt.ylabel('Frequency')
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight')
plt.clf()
plt.cla()
plt.close()
else:
plt.show()
# `data` is list of (vals, bins, label) tuples
def plot_hist(data, title='', xlabel='', ylabel='', normed=True,
savepath=None, legend_loc='upper right'):
for vals, bins, label in data:
plt.hist(vals, bins=bins, normed=normed, alpha=0.5, label=label)
if xlabel != '':
plt.xlabel(xlabel)
if ylabel != '':
plt.ylabel(ylabel)
if title != '':
plt.title(title)
plt.legend(loc=legend_loc)
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight')
plt.clf()
plt.cla()
plt.close()
else:
plt.show()
def plot_scatter(x, y, xlabel='', ylabel='', title='', upper_lim=None,
savepath=None):
assert(len(x) == len(y))
plt.figure(figsize=(5, 5))
plt.scatter(x, y, s=50, alpha=0.2)
if xlabel != '':
plt.xlabel(xlabel)
if ylabel != '':
plt.ylabel(ylabel)
if title != '':
plt.title(title)
if upper_lim is None:
upper_lim = max(np.max(x), np.max(y))
plt.xlim(0, upper_lim)
plt.ylim(0, upper_lim)
plt.plot([0, upper_lim], [0, upper_lim], '--', c='black')
plt.gca().set_aspect('equal', adjustable='box')
plt.draw()
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight')
plt.clf()
plt.cla()
plt.close()
else:
plt.show()
# For visualizing weights per feature (e.g. from integrated gradients)
def plot_bar_weights(weights, xs=None, title='', xlabel='', ylabel='',
savepath=None):
if xs is None:
xs = list(range(len(weights)))
plt.bar(xs, weights)
if xlabel != '':
plt.xlabel(xlabel)
if ylabel != '':
plt.ylabel(ylabel)
if title != '':
plt.title(title)
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight')
else:
plt.show()
# Visualize all SISes for some examples
# `rows` is a list of image grids
def visualize_mnist_sis_collection(rows, title=None, savepath=None):
nrow = len(rows)
ncol = max((len(r) for r in rows))+1
fig = plt.figure(figsize=((ncol+3)/2.0, (nrow+2)/2.0))
gs = gridspec.GridSpec(nrow, ncol,
wspace=0.05, hspace=0.05,
top=1.-0.5/(nrow+1), bottom=0.5/(nrow+1),
left=0.5/(ncol+1), right=1-0.5/(ncol+1))
for r, images in enumerate(rows):
for c, img in enumerate(images):
if c != 0:
c += 1
ax = plt.subplot(gs[r,c])
ax.imshow(img, cmap='gray')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
ax.grid(False)
if title is not None:
plt.suptitle(title, y=1.025, size=22)
if savepath is not None:
plt.savefig(savepath, dpi=600, bbox_inches='tight')
plt.show()
|
11576344
|
import ubjson
data = {
"name": "python-ubjson",
"versions": ["1", "2"],
"group": {
"is_a_package": True,
"value": 42
}
}
serialized = ubjson.dumpb(data)
print(serialized)
with open("/tmp/data.json", "wb") as f:
f.write(serialized)
|
11576352
|
from pybullet_utils.logger import Logger
logger = Logger()
logger.configure_output_file("e:/mylog.txt")
for i in range (10):
logger.log_tabular("Iteration", 1)
Logger.print2("hello world")
logger.print_tabular()
logger.dump_tabular()
|
11576381
|
import merge
import unittest
from cs.CsDatabag import CsDataBag
class TestCsDatabag(unittest.TestCase):
def setUp(self):
merge.DataBag.DPATH = "."
def test_init(self):
csdatabag = CsDataBag("koffie")
self.assertTrue(csdatabag is not None)
if __name__ == '__main__':
unittest.main()
|
11576384
|
import requests
url = 'http://photon.bits-goa.ac.in/lms/login/index.php'
values = {'username': '31120150591',
'password': <PASSWORD>'}
r = requests.post(url, data=values)
print (r.content)
|
11576406
|
from typing import Optional
from sqlalchemy.exc import IntegrityError
from db.models import session_creator, Team, Members
class TeamService:
@staticmethod
def edit_team(name, project) -> bool:
"""Takes a name, and a project description"""
# TODO: does this need a try/catch??
session = session_creator()
team = session.query(Team).filter(Team.team_name == name).first()
if team is not None:
team.project = project
session.commit()
session.close()
return True
else:
session.commit()
session.close()
return False
@staticmethod
def get_team_by_name(name, session=None) -> Optional[Team]:
"""Returns the team with the given name, or none if it doesn't exist"""
sess_flag = False
if session is None:
session = session_creator()
sess_flag = True
team = session.query(Team).filter(Team.team_name == name).first()
if sess_flag:
session.commit()
session.close()
return team
@staticmethod
def get_team_by_id(id, session=None) -> Optional[Team]:
"""Returns the team with the given id, or none if it doesn't exist"""
sess_flag = False
if session is None:
session = session_creator()
sess_flag = True
team = session.query(Team).filter(Team.id == id).first()
if sess_flag:
session.commit()
session.close()
return team
@staticmethod
def get_teams_by_name(name, session=None) -> list:
"""Returns a list of teams that match the given name"""
sess_flag = False
if session is None:
session = session_creator()
sess_flag = True
teams = session.query(Team).filter(Team.team_name.contains(name)).all()
if sess_flag:
session.commit()
session.close()
return teams
@staticmethod
def get_team_by_join_message_id(id, session=None) -> Optional[Team]:
"""Returns the team with the given join message id, or none if it doesn't exist"""
sess_flag = False
if session is None:
session = session_creator()
sess_flag = True
team = session.query(Team).filter(Team.join_message_id == id).first()
if sess_flag:
session.commit()
session.close()
return team
@staticmethod
def get_teams_by_member(member, session=None) -> Optional[list]:
"""Returns a list of all teams the member is a part of"""
sess_flag = False
if session is None:
session = session_creator()
sess_flag = True
team_ids = session.query(Members).filter(Members.member_id == str(member)).all()
teams = []
for t in team_ids:
teams.append(session.query(Team).filter(Team.id == t.team_id).first())
if sess_flag:
session.commit()
session.close()
return teams
@staticmethod
def delete_team_by_name(name) -> bool:
# TODO: Confirm that member references are deleted as well
"""Deletes team with given id"""
session = session_creator()
team = session.query(Team).filter(Team.team_name == name).first()
if team is not None:
# if team.members is not None:
# members = team.members
# for member in members:
# session.delete(member)
session.delete(team)
session.commit()
session.close()
return True
else:
session.commit()
session.close()
return False
@staticmethod
def get_all_teams(session=None) -> list:
"""Returns a list of team objects"""
sess_flag = False
if session is None:
session = session_creator()
sess_flag = True
teams = session.query(Team).all()
if sess_flag:
session.commit()
session.close()
return teams
@staticmethod
def add_team(name, tc, join_message, project=None) -> bool:
"""Add a new team"""
try:
session = session_creator()
session.add(
Team(
team_name=name,
tc_id=tc,
join_message_id=join_message,
project=project,
)
)
session.commit()
session.close()
return True
except IntegrityError:
return False
@staticmethod
def add_member(team, user_id, session=None):
sess_flag = False
if session is None:
session = session_creator()
sess_flag = True
session.add(team)
team.members.append(Members(member_id=user_id))
if sess_flag:
session.commit()
session.close()
@staticmethod
def remove_member(team, user_id, session=None):
sess_flag = False
if session is None:
session = session_creator()
sess_flag = True
session.add(team)
session.query(Members).filter(
Members.member_id == user_id, Members.team_id == team.id
).delete()
if sess_flag:
session.commit()
session.close()
|
11576409
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def associate_certificate(self, Arn: str) -> Dict:
pass
def can_paginate(self, operation_name: str = None):
pass
def cancel_job(self, Id: str) -> Dict:
pass
def create_job(self, Role: str, Settings: Dict, AccelerationSettings: Dict = None, BillingTagsSource: str = None, ClientRequestToken: str = None, JobTemplate: str = None, Queue: str = None, StatusUpdateInterval: str = None, UserMetadata: Dict = None) -> Dict:
pass
def create_job_template(self, Name: str, Settings: Dict, AccelerationSettings: Dict = None, Category: str = None, Description: str = None, Queue: str = None, StatusUpdateInterval: str = None, Tags: Dict = None) -> Dict:
pass
def create_preset(self, Name: str, Settings: Dict, Category: str = None, Description: str = None, Tags: Dict = None) -> Dict:
pass
def create_queue(self, Name: str, Description: str = None, PricingPlan: str = None, ReservationPlanSettings: Dict = None, Tags: Dict = None) -> Dict:
pass
def delete_job_template(self, Name: str) -> Dict:
pass
def delete_preset(self, Name: str) -> Dict:
pass
def delete_queue(self, Name: str) -> Dict:
pass
def describe_endpoints(self, MaxResults: int = None, Mode: str = None, NextToken: str = None) -> Dict:
pass
def disassociate_certificate(self, Arn: str) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_job(self, Id: str) -> Dict:
pass
def get_job_template(self, Name: str) -> Dict:
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_preset(self, Name: str) -> Dict:
pass
def get_queue(self, Name: str) -> Dict:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def list_job_templates(self, Category: str = None, ListBy: str = None, MaxResults: int = None, NextToken: str = None, Order: str = None) -> Dict:
pass
def list_jobs(self, MaxResults: int = None, NextToken: str = None, Order: str = None, Queue: str = None, Status: str = None) -> Dict:
pass
def list_presets(self, Category: str = None, ListBy: str = None, MaxResults: int = None, NextToken: str = None, Order: str = None) -> Dict:
pass
def list_queues(self, ListBy: str = None, MaxResults: int = None, NextToken: str = None, Order: str = None) -> Dict:
pass
def list_tags_for_resource(self, Arn: str) -> Dict:
pass
def tag_resource(self, Arn: str, Tags: Dict) -> Dict:
pass
def untag_resource(self, Arn: str, TagKeys: List = None) -> Dict:
pass
def update_job_template(self, Name: str, AccelerationSettings: Dict = None, Category: str = None, Description: str = None, Queue: str = None, Settings: Dict = None, StatusUpdateInterval: str = None) -> Dict:
pass
def update_preset(self, Name: str, Category: str = None, Description: str = None, Settings: Dict = None) -> Dict:
pass
def update_queue(self, Name: str, Description: str = None, ReservationPlanSettings: Dict = None, Status: str = None) -> Dict:
pass
|
11576433
|
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_blog.settings')
app = Celery('django_blog')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request:{0!r}'.format(self.request))
|
11576454
|
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-i","--intxt",nargs="+",required=True)
parser.add_argument("-o","--outtxt",required=True)
args = parser.parse_args()
outf = open(args.outtxt,"w")
for f in args.intxt :
with open(f) as txt :
outf.write(txt.read()+"\n")
outf.close()
|
11576563
|
from __future__ import absolute_import, print_function, unicode_literals
# NOTE : No fix provided for "Admin actions are no longer collected from base ModelAdmin classes" for now, since it seems no one really relied on this behaviour?
|
11576572
|
import asyncio
from io import BytesIO
from twisted.web.http import CACHED, NOT_MODIFIED, PRECONDITION_FAILED
from twisted.web.test.requesthelper import DummyRequest as TwistedDummyRequest
class DummyApplication:
finished = False
fail_to_create = False
def create_application_instance(self, protocol, scope):
if self.fail_to_create:
raise Exception()
self.scope = scope
self.protocol = protocol
self.queue = asyncio.Queue()
return self.queue
def finish_protocol(self, protocol):
self.finished = True
class DummyRequest(TwistedDummyRequest):
_isSecure = False
startedWriting = 0
etag = None
channel = True
def __init__(self, *args, **kwargs):
self.content = BytesIO()
TwistedDummyRequest.__init__(self, *args, **kwargs)
def setETag(self, etag):
if etag:
self.etag = etag
tags = self.getHeader(b"if-none-match")
if tags:
tags = tags.split()
if (etag in tags) or (b"*" in tags):
self.setResponseCode(
((self.method in (b"HEAD", b"GET")) and NOT_MODIFIED)
or PRECONDITION_FAILED
)
return CACHED
return None
def write(self, data):
if not self.startedWriting:
self.startedWriting = 1
if self.etag is not None:
self.responseHeaders.setRawHeaders(b"ETag", [self.etag])
return super(DummyRequest, self).write(data)
def isSecure(self):
return self._isSecure
|
11576595
|
import math
import torch
import encoding
import torch.nn as nn
import torch.nn.functional as F
from modules import RFBlock, ContextEncodeDropInplaceABN
from modules import InPlaceABN, InPlaceABNWrapper
from modules.misc import InvertedResidual, conv_bn
from collections import OrderedDict
from functools import partial
class MobileNetV2Context(nn.Module):
def __init__(self, n_class=19, in_size=(448, 896), width_mult=1.,
out_sec=256, context=(32, 4), aspp_sec=(12, 24, 36), norm_act=InPlaceABN):
"""
MobileNetV2Plus: MobileNetV2 based Semantic Segmentation
:param n_class: (int) Number of classes
:param in_size: (tuple or int) Size of the input image feed to the network
:param width_mult: (float) Network width multiplier
:param out_sec: (tuple) Number of the output channels of the ASPP Block
:param context: (tuple) K and reduction
"""
super(MobileNetV2Context, self).__init__()
self.n_class = n_class
# setting of inverted residual blocks
self.interverted_residual_setting = [
# t, c, n, s, d
[1, 16, 1, 1, 1], # 1/2
[6, 24, 2, 2, 1], # 1/4
[6, 32, 3, 2, 1], # 1/8
[6, 64, 4, 1, 2], # 1/8
[6, 96, 3, 1, 4], # 1/8
[6, 160, 3, 1, 8], # 1/8
[6, 320, 1, 1, 16], # 1/8
]
# building first layer
assert in_size[0] % 8 == 0
assert in_size[1] % 8 == 0
self.input_size = in_size
input_channel = int(32 * width_mult)
self.mod1 = nn.Sequential(OrderedDict([("conv1", conv_bn(inp=3, oup=input_channel, stride=2))]))
# building inverted residual blocks
mod_id = 0
for t, c, n, s, d in self.interverted_residual_setting:
output_channel = int(c * width_mult)
# Create blocks for module
blocks = []
for block_id in range(n):
if block_id == 0 and s == 2:
blocks.append(("block%d" % (block_id + 1), InvertedResidual(inp=input_channel,
oup=output_channel,
stride=s,
dilate=1,
expand_ratio=t)))
else:
blocks.append(("block%d" % (block_id + 1), InvertedResidual(inp=input_channel,
oup=output_channel,
stride=1,
dilate=d,
expand_ratio=t)))
input_channel = output_channel
self.add_module("mod%d" % (mod_id + 2), nn.Sequential(OrderedDict(blocks)))
mod_id += 1
# building last several layers
org_last_chns = (self.interverted_residual_setting[0][1] +
self.interverted_residual_setting[1][1] +
self.interverted_residual_setting[2][1] +
self.interverted_residual_setting[3][1] +
self.interverted_residual_setting[4][1] +
self.interverted_residual_setting[5][1] +
self.interverted_residual_setting[6][1])
self.last_channel = int(org_last_chns * width_mult) if width_mult > 1.0 else org_last_chns
self.context1 = ContextEncodeDropInplaceABN(channel=self.last_channel, K=context[0],
reduction=context[1], norm_act=norm_act)
self.se_loss1 = nn.Sequential(OrderedDict([("linear", nn.Linear(int(self.last_channel / context[1]) *
context[0], self.n_class))]))
self.rfblock = nn.Sequential(RFBlock(in_chs=self.last_channel, out_chs=out_sec,
scale=1.0, feat_res=(int(in_size[0] / 8), int(in_size[1] / 8)),
up_ratio=2, aspp_sec=aspp_sec, norm_act=norm_act))
if self.n_class != 0:
in_stag2_up_chs = self.interverted_residual_setting[1][1] + self.interverted_residual_setting[0][1]
self.context2 = ContextEncodeDropInplaceABN(channel=(out_sec + in_stag2_up_chs), K=context[0],
reduction=context[1], norm_act=norm_act)
self.se_loss2 = nn.Sequential(OrderedDict([("linear", nn.Linear(int((out_sec + in_stag2_up_chs)
/ context[1]) * context[0],
self.n_class))]))
self.score = nn.Sequential(OrderedDict([("norm.1", norm_act(out_sec + in_stag2_up_chs)),
("conv.1", nn.Conv2d(out_sec + in_stag2_up_chs, out_sec,
kernel_size=3, stride=1, padding=2,
dilation=2, bias=False)),
("norm.2", norm_act(out_sec)),
("conv.2", nn.Conv2d(out_sec, self.n_class,
kernel_size=1, stride=1, padding=0,
bias=True)),
("up1", nn.Upsample(size=in_size, mode='bilinear'))]))
"""
self.score = nn.Sequential(OrderedDict([("norm", norm_act(out_sec + in_stag2_up_chs)),
("conv", nn.Conv2d(out_sec + in_stag2_up_chs, self.n_class,
kernel_size=1, stride=1, padding=0,
bias=True)),
("up1", nn.Upsample(size=in_size, mode='bilinear'))]))
"""
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, x):
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 1. Encoder: feature extraction
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
stg1 = self.mod1(x) # (N, 32, 224, 448) 1/2
stg1 = self.mod2(stg1) # (N, 16, 224, 448) 1/2 -> 1/4 -> 1/8
stg2 = self.mod3(stg1) # (N, 24, 112, 224) 1/4 -> 1/8
stg3 = self.mod4(stg2) # (N, 32, 56, 112) 1/8
stg4 = self.mod5(stg3) # (N, 64, 56, 112) 1/8 dilation=2
stg5 = self.mod6(stg4) # (N, 96, 56, 112) 1/8 dilation=4
stg6 = self.mod7(stg5) # (N, 160, 56, 112) 1/8 dilation=8
stg7 = self.mod8(stg6) # (N, 320, 56, 112) 1/8 dilation=16
stg1_1 = F.max_pool2d(input=stg1, kernel_size=3, stride=2, ceil_mode=True) # 1/4
stg1_2 = F.max_pool2d(input=stg1_1, kernel_size=3, stride=2, ceil_mode=True) # 1/8
stg2_1 = F.max_pool2d(input=stg2, kernel_size=3, stride=2, ceil_mode=True) # 1/8
# (N, 712, 56, 112) 1/8 (16+24+32+64+96+160+320)
enc1, stg8 = self.context1(torch.cat([stg3, stg4, stg5, stg6, stg7, stg1_2, stg2_1], dim=1))
stg8 = self.rfblock(stg8)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 2. Decoder: multi-scale feature fusion
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
if self.n_class != 0:
enc2, stg8 = self.context2(torch.cat([stg8, stg2, stg1_1], dim=1))
return self.se_loss1(enc1), self.se_loss2(enc2), self.score(stg8)
else:
return stg8
if __name__ == '__main__':
import os
import torch
from torch.autograd import Variable
os.environ["CUDA_VISIBLE_DEVICES"] = "1,0"
dummy_in = Variable(torch.randn(1, 3, 448, 448).cuda(), requires_grad=True)
model = MobileNetV2Context(n_class=19, in_size=(448, 448), width_mult=1., out_sec=256, context=(32, 4),
norm_act=partial(InPlaceABNWrapper, activation="leaky_relu", slope=0.1)).cuda()
enc1, enc2, dummy_out = model(dummy_in)
print("ok!!!")
|
11576596
|
import sys
import networkzero as nw0
try:
# Python 2.7 compat
input = raw_input
except NameError:
pass
print("Looking for chat hub")
hub = nw0.discover("chat-hub")
if not hub:
print("Unable to find chat hub after 60s")
raise SystemExit
print("Chat hub found at", hub)
def main(name=None):
name = name or input("Name: ")
nw0.send_message_to(hub, ["JOIN", name])
try:
while True:
try:
message = input("Message: ")
except KeyboardInterrupt:
message = None
if not message:
break
nw0.send_message_to(hub, ["SPEAK", (name, message)])
finally:
nw0.send_message_to(hub, ["LEAVE", name])
if __name__ == '__main__':
main(*sys.argv[1:])
|
11576657
|
import pandas as pd
import sys
from collections import defaultdict
from Representation.rnn import DKTnet
import numpy as np
class DKT:
def __init__(self, input_data, repr_type="rnn", dkt_type="rnn"):
self.repr_type = repr_type
self.dkt_type = dkt_type
data = input_data
d_skill = dict(zip(data["problem_id"].map(str), data["skill_name"].map(str)))
data = data[["user_id", "problem_id", "correct"]]
problems = sorted(list(set(data["problem_id"].map(str))))
problem_correct = []
p = {}
up = {}
for j, i in enumerate(problems):
problem_correct.extend([i+"0", i+"1"])
p[i+"0"] = 2*j
p[i+"1"] = 2*j + 1
up[i] = j
self.p = p
self.up = up
g = list(data.groupby(["user_id"]))
responses = []
for i in range(len(g)):
responses.append(len(g[i][1]))
self.max_responses = max(responses)-1
def dkt_data(self, data):
users = len(set(data["user_id"]))
g = list(data.groupby(["user_id"]))
p = self.p
up = self.up
max_responses = self.max_responses
input_shape = (users, max_responses, len(p))
x_train = np.zeros((users, max_responses, len(p)), dtype=np.uint8)
y_train = -np.ones((users, max_responses, 1), dtype=np.int8)
y_train_order = np.zeros((users, max_responses, len(up)), dtype=np.int8)
from datetime import datetime
st = datetime.now()
for i in range(len(g)):
temp_data = g[i][1]
counter = 0
responses = max_responses# min(max_responses, len(g[i][1])-1)
x1 = np.zeros((responses, len(p)))
y1 = np.zeros((responses, len(up)))
yy1 = np.zeros((responses, len(up)))
for j in range(len(temp_data)-1):
# x1[j, p[str(temp_data.iloc[j]["problem_id"])+str(temp_data.iloc[j]["correct"])] ] = 1
x_train[i, j, p[str(temp_data.iloc[j]["problem_id"])+str(temp_data.iloc[j]["correct"])] ] = 1
y_train_order[i, j, up[str(temp_data.iloc[j+1]["problem_id"])] ] = 1
y_train[i, j, 0] = int(temp_data.iloc[j+1]["correct"])
counter += 1
return x_train, y_train, y_train_order
def build_model(self, data, val_data, activation, hidden_layer_size=200):
x_train, y_train, y_train_order = self.dkt_data(data)
x_train_val, y_train_val, y_train_order_val = self.dkt_data(val_data)
input_dim = len(p)
input_dim_order = len(up)
model = DKTnet(input_dim, input_dim_order, hidden_layer_size, activation, x_train, y_train, y_train_order, x_train_val, y_train_val, y_train_order_val)
model = model.build()
return model
def dkt_representation(self, data, activation, hidden_layer_size):
repr_type = self.repr_type
d_skill = dict(zip(data["problem_id"].map(str), data["skill_name"].map(str)))
data = data[["user_id", "problem_id", "correct"]]
problems = sorted(list(set(data["problem_id"].map(str))))
problem_correct = []
p = {}
up = {}
for j, i in enumerate(problems):
problem_correct.extend([i+"0", i+"1"])
p[i+"0"] = 2*j
p[i+"1"] = 2*j + 1
up[i] = j
g = list(data.groupby(["user_id"]))
responses = []
for i in range(len(g)):
responses.append(len(g[i][1]))
max_responses = max(responses)-1
users = len(g)
input_shape = (users, max_responses, len(p))
x_train = np.zeros((users, max_responses, len(p)), dtype=np.bool)
y_train = np.zeros((users, max_responses, 1), dtype=np.uint8)
y_train_order = np.zeros((users, max_responses, len(up)), dtype=np.int8)
from datetime import datetime
st = datetime.now()
for i in range(len(g)):
temp_data = g[i][1]
counter = 0
responses = min(max_responses, len(g[i][1])-1)
x1 = np.zeros((responses, len(p)))
y1 = np.zeros((responses, len(up)))
yy1 = np.zeros((responses, len(up)))
for j in range(responses):
x1[j, p[str(temp_data.iloc[j]["problem_id"])+str(temp_data.iloc[j]["correct"])] ] = 1
y_train_order[i, j, up[str(temp_data.iloc[j+1]["problem_id"])] ] = 1
y_train[i, j, 0] = int(temp_data.iloc[j+1]["correct"])
counter += 1
if max_responses >= len(temp_data):
x2 = np.zeros((max_responses-len(temp_data)+1, len(p)))-np.ones((max_responses-len(temp_data)+1, len(p)))
x_train[i] = np.concatenate((x1, x2))
else:
x_train[i] = x1
print ("Shapes of x_train, y_train, order for dkt:", np.shape(x_train), np.shape(y_train), np.shape(y_train_order))
en = datetime.now()
input_dim = len(p)
input_dim_order = len(up)
model = DKTnet(input_dim, input_dim_order, hidden_layer_size, activation, x_train, y_train, y_train_order)
model = model.build()
repr_matrix = 0
for i in model.layers:
for j in ((i.get_weights())):
# print (i, j.shape, input_dim_order, hidden_layer_size)
if repr_type=="dense" and list(np.shape(j)) == [input_dim_order, hidden_layer_size]:
repr_matrix = j
break
if (not repr_type=="dense") and list(np.shape(j)) == [input_dim, hidden_layer_size]:
repr_matrix = j
break
vector, problem_ids = [], []
for i, j in up.items():
problem_ids.append(i)
if repr_type=="correct-incorrect":
vector.append(list(repr_matrix[p[i+"0"]])+list(repr_matrix[p[i+"1"]]))
elif repr_type=="correct":
vector.append(list(repr_matrix[p[i+"1"]]))
elif repr_type=="incorrect":
vector.append(list(repr_matrix[p[i+"0"]]))
elif repr_type=="dense":
vector.append(list(repr_matrix[up[i]]))
else:
print ("Error")
pass
skill_vec = list(map(lambda x:d_skill[x], problem_ids))
X = pd.DataFrame({"problem_id":problem_ids, "vector":vector, "skill_name":skill_vec})
print ('Evaluation Done for dkt')
return X
|
11576674
|
from acpc_python_client.data.base_data_object import BaseDataObject
from acpc_python_client.data.state import State
class MatchState(BaseDataObject):
"""State of the match as perceived by agent."""
def __init__(self, wrapper, game):
super().__init__(wrapper)
self._state = State(self._data_holder.state, game)
def get_state(self):
"""State of the game.
Returns:
MatchState: State of the game.
"""
return self._state
def get_viewing_player(self):
"""Return index of the player that is viewing this state.
Returns:
int: Index of the player that is viewing this state.
"""
return self._data_holder.viewingPlayer
|
11576680
|
from typing import Any, Dict, List, Optional
from asyncpg.exceptions import UniqueViolationError
from fastapi import APIRouter, HTTPException
from orm.exceptions import NoMatch
from starlette.status import (
HTTP_200_OK,
HTTP_201_CREATED,
HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND,
)
from app.models import Ingredient
from app.schemas import IngredientCreate, IngredientDB, IngredientUpdate
router = APIRouter()
@router.get("/", response_model=List[IngredientDB], status_code=HTTP_200_OK)
async def get_ingredients(query: Optional[str] = None):
ingredients = await (
Ingredient.objects.filter(ingredient__contains=query).all()
if query
else Ingredient.objects.all()
)
if not ingredients:
raise HTTPException(HTTP_404_NOT_FOUND, "No ingredients found")
return ingredients
@router.post("/", response_model=IngredientDB, status_code=HTTP_201_CREATED)
async def add_ingredient(payload: IngredientCreate):
try:
return await Ingredient.objects.create(ingredient=payload.ingredient)
except UniqueViolationError as err:
raise HTTPException(HTTP_400_BAD_REQUEST, "Ingredient exists") from err
@router.get("/{ingred_id}/", response_model=IngredientDB, status_code=HTTP_200_OK)
async def get_ingredient(ingred_id: int):
try:
return await Ingredient.objects.get(id=ingred_id)
except NoMatch as err:
raise HTTPException(HTTP_404_NOT_FOUND, "Ingredient not found") from err
@router.put("/{ingred_id}/", response_model=IngredientDB, status_code=HTTP_200_OK)
async def update_ingredient(ingred_id: int, payload: IngredientUpdate):
ingredient = await get_ingredient(ingred_id)
updates: Dict[str, Any] = {k: v for k, v in payload.dict().items() if v is not None}
await ingredient.update(**updates)
return await get_ingredient(ingred_id)
@router.delete("/{ingred_id}/", response_model=IngredientDB, status_code=HTTP_200_OK)
async def remove_ingredient(ingred_id: int):
ingredient = await get_ingredient(ingred_id)
await ingredient.delete()
return ingredient
|
11576715
|
from moshmosh.extension import Extension
from moshmosh.ast_compat import ast
# https://github.com/python/cpython/blob/master/Parser/Python.asdl#L102
opname_map = {
"+": 'Add',
'-': 'Sub',
'*': 'Mult',
'/': 'Div',
'%': 'Mod',
'**': 'Pow',
'<<': 'LShift',
'>>': 'RShift',
'|': 'BitOr',
'^': 'BitXor',
'&': 'BitAnd',
'//': 'FloorDiv'
}
class ScopedOperatorVisitor(ast.NodeTransformer):
"""
`a op b -> func(a, b)`, recursively.
The `op => func` pair is specified by users.
"""
def __init__(self, activation, op_name: str, func_name: str):
self.pair = (op_name, func_name)
self.activation = activation
def visit_BinOp(self, n: ast.BinOp):
if n.lineno in self.activation:
name = n.op.__class__.__name__
pair = self.pair
if name == pair[0]:
fn = ast.Name(pair[1], ast.Load())
return ast.Call(
fn,
[self.visit(n.left), self.visit(n.right)],
[],
lineno=n.lineno,
col_offset=n.col_offset
)
return self.generic_visit(n)
class ScopedOperator(Extension):
identifier = "scoped-operator"
def __init__(self, op_name: str, func_name: str):
self.op_name = opname_map.get(op_name, op_name)
self.func_name = func_name
self.visitor = ScopedOperatorVisitor(
self.activation,
self.op_name,
self.func_name
)
def rewrite_ast(self, node):
return self.visitor.visit(node)
|
11576716
|
import sys
from psutil import virtual_memory
mem = virtual_memory()
mem.total # total physical virtual_memory
GB = float(1024**3) # float need if we run with python 2
if mem.total / GB < 3.8:
print("Warning: building native modules may fail due to not enough physical memory.")
print("You have {:.1f} GB available.\n".format(mem.total/GB))
print("\tContext: The C++ compiler needs lots of RAM. 4GB seem sufficient (Nov 2016)\n")
print("\tIf you are on OSX / Windows, and using Docker, try this")
print("""\t\tdocker-machine stop
\t\tVBoxManage modifyvm default --memory 4096
\t\tdocker-machine start\n""")
print("\tSource: http://stackoverflow.com/questions/32834082/how-to-increase-docker-machine-memory-mac")
print("\nIf you are using OSX, and use Docker >= 1.13, then click")
print("on the Docker item, then -> Preferences -> Advanced and")
print("adjust the memory to be >= 4.0GB")
sys.exit(1)
|
11576725
|
import os
import sys
try:
nbconvert = sys.argv[1]
notebook = sys.argv[2]
except:
print "usage: python convert_notebook.py /path/to/nbconvert.py /path/to/notebook_file.ipynb"
sys.exit(-1)
# convert notebook
os.system('%s -f blogger-html %s' % (nbconvert, notebook))
# get out filenames
outfile_root = os.path.splitext(notebook)[0]
body_file = outfile_root + '.html'
header_file = outfile_root + '_header.html'
# read the files
body = open(body_file).read()
header = open(header_file).read()
# replace the highlight tags
body = body.replace('class="highlight"', 'class="highlight-ipynb"')
header = header.replace('highlight', 'highlight-ipynb')
# specify <pre> tags
body = body.replace('<pre', '<pre class="ipynb"')
header = header.replace('html, body', '\n'.join(('pre.ipynb {',
' color: black;',
' background: #f7f7f7;',
' border: 0;',
' box-shadow: none;',
' margin-bottom: 0;',
' padding: 0;'
'}\n',
'html, body')))
# create a special div for notebook
body = '<div class="ipynb">\n\n' + body + "\n\n</div>"
header = header.replace('body {', 'div.ipynb {')
# specialize headers
header = header.replace('html, body,',
'\n'.join((('h1.ipynb h2.ipynb h3.ipynb '
'h4.ipynb h5.ipynb h6.ipynb {'),
'h1.ipynb h2.ipynb ... {',
' margin: 0;',
' padding: 0;',
' border: 0;',
' font-size: 100%;',
' font: inherit;',
' vertical-align: baseline;',
'}\n',
'html, body,')))
for h in '123456':
body = body.replace('<h%s' % h, '<h%s class="ipynb"' % h)
# comment out document-level formatting
header = header.replace('html, body,',
'/*html, body,*/')
header = header.replace('h1, h2, h3, h4, h5, h6,',
'/*h1, h2, h3, h4, h5, h6,*/')
#----------------------------------------------------------------------
# Write the results to file
open(body_file, 'w').write(body)
open(header_file, 'w').write(header)
|
11576768
|
import cv2
import numpy as np
import math
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", required=True, help="path to input video")
ap.add_argument("-d", "--dir", required=True, help="path to output frames")
ap.add_argument("-n", "--name", required=True, help="nameing convention")
ap.add_argument("-i", "--interval", required=True, help="interval of frame slicing in seconds")
args = vars(ap.parse_args())
name=args["name"]
dir=args["dir"]
cap= cv2.VideoCapture(args["video"])
frameRate = cap.get(5) #frame rate
x=1
while True:
frameId = cap.get(1)
_, frame =cap.read()
#uncomment if you want to rotate the frames
#frame=cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
if (frameId % (math.floor(frameRate)*int(args["interval"])) == 0):
filename = dir +name+ str(int(x)) + ".jpg";x+=1
cv2.imwrite(filename, frame)
cap.release()
print ("Done!")
|
11576790
|
import transformers
from packaging import version
from kobert_transformers import get_tokenizer
tokenizer = get_tokenizer()
def test_transformers_version():
assert version.parse("3.0") <= version.parse(transformers.__version__) < version.parse("5.0")
def test_tokenization():
sample_text = "[CLS] 한국어 모델을 공유합니다. [SEP]"
tokens = tokenizer.tokenize(sample_text)
assert tokens == ["[CLS]", "▁한국", "어", "▁모델", "을", "▁공유", "합니다", ".", "[SEP]"]
encoded_ids = tokenizer.convert_tokens_to_ids(tokens)
assert encoded_ids == [2, 4958, 6855, 2046, 7088, 1050, 7843, 54, 3]
def test_tokenizer_attribute():
assert tokenizer.unk_token_id == 0
assert tokenizer.pad_token_id == 1
assert tokenizer.cls_token_id == 2
assert tokenizer.sep_token_id == 3
assert tokenizer.mask_token_id == 4
assert tokenizer.unk_token == "[UNK]"
assert tokenizer.pad_token == "[PAD]"
assert tokenizer.cls_token == "[CLS]"
assert tokenizer.sep_token == "[SEP]"
assert tokenizer.mask_token == "[MASK]"
assert tokenizer.model_max_length == 512
assert tokenizer.max_len_single_sentence == 510
assert tokenizer.max_len_sentences_pair == 509
assert sorted(tokenizer.all_special_tokens) == [
"[CLS]",
"[MASK]",
"[PAD]",
"[SEP]",
"[UNK]",
]
assert sorted(tokenizer.all_special_ids) == [0, 1, 2, 3, 4]
assert tokenizer.vocab_size == 8002
|
11576802
|
import torch
import typing
#NOTE: code from https://github.com/vchoutas/smplify-x
__all__ = ["InitTranslation"]
class InitTranslation(torch.nn.Module):
def __init__(self,
torso_edge_indices: typing.Sequence[typing.Tuple[int, int]]=[(5, 12), (2, 9)],
focal_length: float=5000.0,
):
super(InitTranslation, self).__init__()
self.torso_edge_indices = torso_edge_indices
self.focal_length = float(focal_length)
def forward(self,
joints3d: torch.Tensor,
joints2d: torch.Tensor,
) -> torch.Tensor:
diff3d = []
diff2d = []
for edge in self.torso_edge_indices:
diff3d.append(joints3d[:, edge[0]] - joints3d[:, edge[1]])
diff2d.append(joints2d[:, edge[0]] - joints2d[:, edge[1]])
diff3d = torch.stack(diff3d, dim=1)
diff2d = torch.stack(diff2d, dim=1)
length_2d = diff2d.pow(2).sum(dim=-1).sqrt()
length_3d = diff3d.pow(2).sum(dim=-1).sqrt()
height2d = length_2d.mean(dim=1)
height3d = length_3d.mean(dim=1)
est_d = self.focal_length * (height3d / height2d)
b = joints3d.shape[0]
return torch.cat([
torch.zeros([b, 2], device=est_d.device), est_d.expand(b, 1)
], dim=1)
|
11576841
|
from overrides import overrides
from collections import Counter
from allennlp.training.metrics.metric import Metric
from dygie.training.f1 import compute_f1
def _invert_arguments(arguments, triggers):
"""
For scoring the argument, we don't need the trigger spans to match exactly. We just need the
trigger label corresponding to the predicted trigger span to be correct.
"""
# Can't use a dict because multiple triggers could share the same argument.
inverted = set()
for k, v in arguments.items():
if k[0] in triggers: # If it's not, the trigger this arg points to is null. TODO(dwadden) check.
trigger_label = triggers[k[0]]
to_append = (k[1], trigger_label, v)
inverted.add(to_append)
return inverted
class EventMetrics(Metric):
"""
Computes precision, recall, and micro-averaged F1 for triggers and arguments.
"""
def __init__(self):
self.reset()
@overrides
def __call__(self, predicted_events_list, metadata_list):
for predicted_events, metadata in zip(predicted_events_list, metadata_list):
# Trigger scoring.
predicted_triggers = predicted_events["trigger_dict"]
gold_triggers = metadata["trigger_dict"]
self._score_triggers(predicted_triggers, gold_triggers)
# Argument scoring.
predicted_arguments = _invert_arguments(predicted_events["argument_dict"], predicted_triggers)
gold_arguments = _invert_arguments(metadata["argument_dict"], gold_triggers)
self._score_arguments(predicted_arguments, gold_arguments)
def _score_triggers(self, predicted_triggers, gold_triggers):
self._gold_triggers += len(gold_triggers)
self._predicted_triggers += len(predicted_triggers)
for token_ix, label in predicted_triggers.items():
# Check whether the offsets match, and whether the labels match.
if token_ix in gold_triggers:
self._matched_trigger_ids += 1
if gold_triggers[token_ix] == label:
self._matched_trigger_classes += 1
def _score_arguments(self, predicted_arguments, gold_arguments):
self._gold_arguments += len(gold_arguments)
self._predicted_arguments += len(predicted_arguments)
for prediction in predicted_arguments:
ix, trigger, arg = prediction
gold_id_matches = {entry for entry in gold_arguments
if entry[0] == ix
and entry[1] == trigger}
if gold_id_matches:
self._matched_argument_ids += 1
gold_class_matches = {entry for entry in gold_id_matches if entry[2] == arg}
if gold_class_matches:
self._matched_argument_classes += 1
@overrides
def get_metric(self, reset=False):
res = {}
# Triggers
res["trig_id_precision"], res["trig_id_recall"], res["trig_id_f1"] = compute_f1(
self._predicted_triggers, self._gold_triggers, self._matched_trigger_ids)
res["trig_class_precision"], res["trig_class_recall"], res["trig_class_f1"] = compute_f1(
self._predicted_triggers, self._gold_triggers, self._matched_trigger_classes)
# Arguments
res["arg_id_precision"], res["arg_id_recall"], res["arg_id_f1"] = compute_f1(
self._predicted_arguments, self._gold_arguments, self._matched_argument_ids)
res["arg_class_precision"], res["arg_class_recall"], res["arg_class_f1"] = compute_f1(
self._predicted_arguments, self._gold_arguments, self._matched_argument_classes)
# Reset counts if at end of epoch.
if reset:
self.reset()
return res
@overrides
def reset(self):
self._gold_triggers = 0
self._predicted_triggers = 0
self._matched_trigger_ids = 0
self._matched_trigger_classes = 0
self._gold_arguments = 0
self._predicted_arguments = 0
self._matched_argument_ids = 0
self._matched_argument_classes = 0
class ArgumentStats(Metric):
"""
Compute the fraction of predicted event arguments that are associated with multiple triggers.
"""
def __init__(self):
self.reset()
@overrides
def __call__(self, predicted_events_list):
for predicted_events in predicted_events_list:
predicted_arguments = _invert_arguments(predicted_events["argument_dict"],
predicted_events["trigger_dict"])
# Count how many times each span appears as an argument.
span_counts = Counter()
for prediction in predicted_arguments:
span_counts[prediction[0]] += 1
# Count how many spans appear more than once.
repeated = {k: v for k, v in span_counts.items() if v > 1}
self._total_arguments += len(span_counts)
self._repeated_arguments += len(repeated)
@overrides
def get_metric(self, reset=False):
# Fraction of event arguments associated with multiple triggers.
args_multiple = (self._repeated_arguments / self._total_arguments
if self._total_arguments
else 0)
if reset:
self.reset()
res = dict(args_multiple=args_multiple)
return res
@overrides
def reset(self):
self._total_arguments = 0
self._repeated_arguments = 0
|
11576861
|
class Dynamic(object):
kws = {'passing': ['arg=None'],
'failing': ['message'],
'logging': ['message', 'level=INFO'],
'returning': None,
'kwargs': ['expected', '**kws']}
def get_keyword_names(self):
return list(self.kws)
def run_keyword(self, name, args, kwargs=None):
kw = globals()[name]
return kw(*args, **(kwargs or {}))
def get_keyword_arguments(self, name):
return self.kws[name]
def passing(arg=None):
assert not arg or '=' not in arg
def failing(message):
raise AssertionError(message)
def logging(message, level='INFO'):
print('*%s* %s' % (level, message))
def returning():
return 'Hello, world!'
def kwargs(expected, **kws):
actual = ', '.join('%s: %s' % (k, kws[k]) for k in sorted(kws))
assert actual == expected
if __name__ == '__main__':
import sys
from robotremoteserver import RobotRemoteServer
RobotRemoteServer(Dynamic(), '127.0.0.1', *sys.argv[1:])
|
11576869
|
import os.path
from flask import Flask, redirect, request, url_for
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.storage import get_default_storage_class
from flask.ext.uploads import delete, init, save, Upload
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
app.config['DEFAULT_FILE_STORAGE'] = 'filesystem'
app.config['UPLOADS_FOLDER'] = os.path.realpath('.') + '/static/'
app.config['FILE_SYSTEM_STORAGE_FILE_VIEW'] = 'static'
db = SQLAlchemy(app)
Storage = get_default_storage_class(app)
init(db, Storage)
db.create_all()
@app.route('/')
def index():
"""List the uploads."""
uploads = Upload.query.all()
return (
'<a href="/upload">New Upload</a><br>' +
u''.join(
u'<a href="%s">%s</a>'
u'<form action="/delete/%s" method="POST">'
u' <button type="submit">Delete</button>'
u'</form><br>'
% (Storage().url(u.name), u.name, u.id)
for u in uploads
)
)
@app.route('/upload', methods=['GET', 'POST'])
def upload():
"""Upload a new file."""
if request.method == 'POST':
print 'saving'
save(request.files['upload'])
return redirect(url_for('index'))
return (
u'<form method="POST" enctype="multipart/form-data">'
u' <input name="upload" type="file">'
u' <button type="submit">Upload</button>'
u'</form>'
)
@app.route('/delete/<int:id>', methods=['POST'])
def remove(id):
"""Delete an uploaded file."""
upload = Upload.query.get_or_404(id)
delete(upload)
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(port=8000)
|
11576892
|
from PyPDF2 import PdfFileReader, PdfFileWriter
import pathlib
from pathlib import Path
from reportlab.pdfgen import canvas
import io
import os
import csv
import numpy as np
def getPDF(lngths, perimeters, CSA, APW, MLW):
"""
creates a PDF file containing information about the limb in correct
locations on the page
then merges the PDF file with the existing template to create the output
file
Returns
-------
The file path to the PDF
"""
path = pathlib.Path(__file__).parent.absolute()
my_file = Path(path, "Measurements Template.pdf")
try:
my_abs_path = my_file.resolve(strict=True)
except:
return(1)
packet = io.BytesIO()
c = canvas.Canvas(packet)
for i in range(1, len(lngths)-1):
stringl = "{}".format(abs(round(lngths[i],1)))
stringp = "{}".format(abs(round(perimeters[i],1)))
c.drawString(360+((i-1)*27), 474-((i-1)*41.5), stringl)
c.drawString(88, 524.5- ((i-1)*74.5), stringp)
stringmaxZ = "{}".format(abs(round(lngths[len(lngths)-1],1)))
c.drawString(514, 419, stringmaxZ)
c.setFont("Courier-Bold", 12)
c.drawString(65, 575, "Perimeter / cm")
c.drawString(400, 520, "Distance / cm")
c.showPage()
c.drawImage("ant.png", 38,225, 256,256)
c.drawImage("lat.png", 300,225,256,256)
c.drawImage("figure.png", -2.5,-50, 334,200)
for i in range(1,len(CSA),2):
sCSA = "{}".format(round(CSA[i],1))
sAPW = "{}".format(round(APW[i],1))
sMLW = "{}".format(round(MLW[i],1))
c.drawString(403, 145-((i-1)*11.5), sCSA)
c.drawString(465, 145-((i-1)*11.5), sAPW)
c.drawString(520, 145-((i-1)*11.5), sMLW)
c.save()
packet.seek(0)
newpdf = PdfFileReader(packet)
template = PdfFileReader(open(os.path.join(path, "Measurements Template.pdf"), "rb"))
t2 = PdfFileReader(open(os.path.join(path, "Output Template.pdf"), "rb"))
output = PdfFileWriter()
page = t2.getPage(0)
page.mergePage(newpdf.getPage(1))
page2 = template.getPage(0)
page2.mergePage(newpdf.getPage(0))
output.addPage(page)
output.addPage(page2)
output_file_path = os.path.join(get_downloads_folder(), "ampscanReport.pdf")
outputStream = open(output_file_path, "wb")
output.write(outputStream)
outputStream.close()
return output_file_path
def get_downloads_folder():
"""Gets the downloads folder in a relatively platform independent way"""
# Get user dir
downloads_path = os.path.join(os.path.expanduser("~"), "Downloads")
if not os.path.exists(downloads_path): # If downloads folder doesn't exist, create it
os.mkdir(downloads_path)
return downloads_path
def generateRegBinsCsv(file, regObject, numBins, scalarMin, scalarMax):
"""
Generates a CSV file output of scalar values put into bins
:param file: The open file to save csv output to. Should be open with newline=''
:param regObject: The reg object with scalar values
:param numBins: Number of bins for scalar values
:param scalarMin: The min scalar value to look for
:param scalarMax: The max scalar value to look for
:return: None
"""
writer = csv.writer(file)
binSize = (scalarMax - scalarMin) / numBins
bins = []
binValues = []
for i in range(numBins):
binValues.append(scalarMin + binSize * i)
bins.append(0)
for point in regObject.values:
bin = int((point - scalarMin) / binSize)
if bin < 0:
bins[0] += 1
elif bin >= len(bins):
bins[-1] += 1
else:
bins[bin] += 1
l = len(regObject.values)
for i in range(numBins):
writer.writerow([scalarMin+binSize*i, bins[i] / l])
def generateRegCsv(file, regObject):
"""
Generates a CSV file output of scalar values put into bins
:param file: The open file to save csv output to. Should be open with newline=''
:param regObject: The reg object with scalar values
:return: None
"""
writer = csv.writer(file)
for i in regObject.values:
writer.writerow([i])
def generate_spec(file, regObject):
"""
This function automatically generates specific output for the specified
registration object.
Parameters
----------
regObject: AmpObject
the registration object.
Returns
-------
none
"""
absmean = np.mean(np.abs(regObject.values))
absstd = np.std(np.abs(regObject.values))
mean = np.mean(regObject.values)
std = np.std(regObject.values)
valuemin = np.min(regObject.values)
valuemax = np.max(regObject.values)
idxleft = np.where(regObject.vert[:,0] >= 0)
idxright = np.where(regObject.vert[:,0] <= 0)
valueleft = regObject.values[idxleft]
valueright = regObject.values[idxright]
meanleft = np.mean(valueleft)
stdleft = np.std(valueleft)
meanright = np.mean(valueright)
stdright = np.std(valueright)
gap = np.where(regObject.values > 0)[0]
integratedgap = np.sum(regObject.values[gap])/regObject.values.shape[0]
gap = (gap.shape[0])/(regObject.values.shape[0])*100
pressure = np.where(regObject.values < -3)[0]
integratedHP = np.sum(regObject.values[pressure])/regObject.values.shape[0]
pressure = (pressure.shape[0])/(regObject.values.shape[0])*100
outdict = {'mean distance':mean,
'standard deviation':std,
'minimum distance':valuemin,
'maximum distance':valuemax,
'mean absolute distance':absmean,
'absolute standard deviation':absstd,
'Left mean distance':meanleft,
'Left standard deviation':stdleft,
'Right mean distance':meanright,
'Right standard deviation':stdright,
'percentage of gap area':gap,
'percentage of high pressure area':pressure,
'percentage of area within range':100-pressure-gap,
'integrated value of gap area':integratedgap,
'integrated value of high pressure area':integratedHP}
with open(os.getcwd()+file, 'w', newline='') as myfile:
writer = csv.DictWriter(myfile, fieldnames=['Name', 'Value'])
for i in outdict:
writer.writerow({'Name':i, 'Value':outdict[i]})
|
11576977
|
import os
import hashlib
import csv
import json
from bs4 import BeautifulSoup
from constants import *
import shsJsonApi
import shsHtmlApi
def writeLine(file, *messages):
n = "\n"
file.write(n.join(messages) + n)
def hashString(toBeHashed):
return hashlib.md5(bytes(toBeHashed, "utf-8")).hexdigest()
stringNone = str(None)
def fetchFileContents(fileName):
with open(fileName, 'r') as file:
contents = file.read()
if contents == '' or contents == stringNone:
return None
else:
return contents
def writeFileContents(fileName, contents):
with open(fileName, 'w') as file:
file.write(str(contents))
def requestWorkSearch(songName, authorCredits=""):
fileName = SHS_SEARCH_CACHE_DIR + hashString("work+++" + songName + "+++" + authorCredits)
if os.path.isfile(fileName):
contents = fetchFileContents(fileName)
response = json.loads(contents) if contents is not None else None
else:
response = shsJsonApi.searchWork(songName, authorCredits)
writeValue = json.dumps(response) if response is not None else stringNone
writeFileContents(fileName, writeValue)
if response is None:
return (None, SearchStatus.FAIL)
elif len(response["resultPage"]) < 1:
return (response, SearchStatus.EMPTY)
elif len(response["resultPage"]) > 1:
return (response, SearchStatus.MANY)
else:
return (response, SearchStatus.SUCCESS)
def requestPerformanceSearch(songName, performer=""):
fileName = SHS_SEARCH_CACHE_DIR + hashString("performance+++" + songName + "+++" + performer)
if os.path.isfile(fileName):
contents = fetchFileContents(fileName)
response = json.loads(contents) if contents is not None else None
else:
response = shsJsonApi.searchPerformance(songName, performer)
writeValue = json.dumps(response) if response is not None else stringNone
writeFileContents(fileName, writeValue)
if response is None:
return (None, SearchStatus.FAIL)
elif len(response["resultPage"]) < 1:
return (response, SearchStatus.EMPTY)
elif len(response["resultPage"]) > 1:
originalPerformances = [p for p in response["resultPage"] if p["isOriginal"]]
if len(originalPerformances) > 0:
response["resultPage"] = originalPerformances[:1]
return (response, SearchStatus.SUCCESS)
else:
return (response, SearchStatus.MANY)
else:
return (response, SearchStatus.SUCCESS)
def requestVersions(url):
fileName = SHS_SCRAPE_CACHE_DIR + hashString(url)
if os.path.isfile(fileName):
contents = fetchFileContents(fileName)
response = contents if contents is not None else None
else:
response = shsHtmlApi.makeRequest(url)
writeValue = response if response is not None else stringNone
writeFileContents(fileName, writeValue)
if response is None:
return (response, SearchStatus.FAIL)
else:
return (response, SearchStatus.SUCCESS)
# run a search for a work on secondhandsongs, and pull the versions down too
def searchSongVersions(songName, artistCredits=""):
# default uses the work search
isPerfSearch = False
searchResponse, status = requestWorkSearch(songName, artistCredits)
if status is SearchStatus.FAIL or status is SearchStatus.EMPTY:
# if work search doesn't pan out, use the performance search
isPerfSearch = True
searchResponse, status = requestPerformanceSearch(songName, artistCredits)
if status is SearchStatus.FAIL:
print ("Search failed")
writeLine(FILE_DEBUG_SEARCH, "Failed search:", songName, artistCredits, "")
return None
elif status is SearchStatus.EMPTY:
print("Search returned no results")
writeLine(FILE_DEBUG_SEARCH, "No Results:", songName, artistCredits, "")
return None
if status is SearchStatus.MANY:
print("Search returned more than one result")
writeLine(FILE_DEBUG_SEARCH, "Too many results:", songName, artistCredits)
writeLine(FILE_DEBUG_SEARCH, json.dumps(searchResponse, indent=1), "")
workInfo = searchResponse["resultPage"][0]
songPage, status = requestVersions(workInfo["uri"] + "/versions")
if status is SearchStatus.FAIL:
print("Song versions page request failed")
writeLine(FILE_DEBUG_SEARCH, "No versions page:", workInfo["uri"])
return None
soupObj = BeautifulSoup(songPage)
songData = workInfo.copy()
songData.update(shsHtmlApi.parseMetaData(soupObj))
itemData = shsHtmlApi.parseWorkData(soupObj) if not isPerfSearch else shsHtmlApi.parsePerformanceData(soupObj)
songData.update(itemData)
songData["versions"] = shsHtmlApi.parseWorkVersions(soupObj)
return songData
sourceList = csv.DictReader(FILE_SONG_SOURCE)
songInfoList = []
clearOpenFile(FILE_DEBUG_SEARCH)
clearOpenFile(FILE_SONG_OUTPUT)
for sourceItem in sourceList:
title = sourceItem["title"]
originalArtist = sourceItem["original_artist"]
credits = originalArtist if originalArtist != '' else sourceItem["artist"]
print(title, credits)
searchResults = searchSongVersions(title, credits)
if searchResults is not None:
songInfoList.append(searchResults)
json.dump(songInfoList, FILE_SONG_OUTPUT, indent=1)
|
11577031
|
from bitmovin_api_sdk.encoding.configurations.audio.vorbis.customdata.customdata_api import CustomdataApi
|
11577032
|
import numpy as np
import pytransform3d.rotations as pr
from ._base import DMPBase
from ._forcing_term import ForcingTerm
from ._canonical_system import canonical_system_alpha
from ._dmp import (dmp_open_loop, dmp_imitate, ridge_regression,
DMP_STEP_FUNCTIONS, DEFAULT_DMP_STEP_FUNCTION)
def dmp_step_quaternion_python(
last_t, t,
current_y, current_yd,
goal_y, goal_yd, goal_ydd,
start_y, start_yd, start_ydd,
goal_t, start_t, alpha_y, beta_y,
forcing_term,
coupling_term=None,
coupling_term_precomputed=None,
int_dt=0.001):
"""Integrate quaternion DMP for one step with Euler integration.
Parameters
----------
last_t : float
Time at last step.
t : float
Time at current step.
current_y : array, shape (7,)
Current position. Will be modified.
current_yd : array, shape (6,)
Current velocity. Will be modified.
goal_y : array, shape (7,)
Goal position.
goal_yd : array, shape (6,)
Goal velocity.
goal_ydd : array, shape (6,)
Goal acceleration.
start_y : array, shape (7,)
Start position.
start_yd : array, shape (6,)
Start velocity.
start_ydd : array, shape (6,)
Start acceleration.
goal_t : float
Time at the end.
start_t : float
Time at the start.
alpha_y : float
Constant in transformation system.
beta_y : float
Constant in transformation system.
forcing_term : ForcingTerm
Forcing term.
coupling_term : CouplingTerm, optional (default: None)
Coupling term. Must have a function coupling(y, yd) that returns
additional velocity and acceleration.
coupling_term_precomputed : tuple
A precomputed coupling term, i.e., additional velocity and
acceleration.
int_dt : float, optional (default: 0.001)
Time delta used internally for integration.
Raises
------
ValueError
If goal time is before start time.
"""
if start_t >= goal_t:
raise ValueError("Goal must be chronologically after start!")
if t <= start_t:
return np.copy(start_y), np.copy(start_yd), np.copy(start_ydd)
execution_time = goal_t - start_t
current_ydd = np.empty_like(current_yd)
current_t = last_t
while current_t < t:
dt = int_dt
if t - current_t < int_dt:
dt = t - current_t
current_t += dt
if coupling_term is not None:
cd, cdd = coupling_term.coupling(current_y, current_yd)
else:
cd, cdd = np.zeros(3), np.zeros(3)
if coupling_term_precomputed is not None:
cd += coupling_term_precomputed[0]
cdd += coupling_term_precomputed[1]
f = forcing_term(current_t).squeeze()
current_ydd[:] = (
alpha_y * (beta_y * pr.compact_axis_angle_from_quaternion(
pr.concatenate_quaternions(
goal_y, pr.q_conj(current_y)))
- execution_time * current_yd)
+ f + cdd) / execution_time ** 2
current_yd += dt * current_ydd + cd / execution_time
current_y[:] = pr.concatenate_quaternions(
pr.quaternion_from_compact_axis_angle(dt * current_yd), current_y)
CARTESIAN_DMP_STEP_FUNCTIONS = {
"python": dmp_step_quaternion_python
}
try:
from ..dmp_fast import dmp_step_quaternion
CARTESIAN_DMP_STEP_FUNCTIONS["cython"] = dmp_step_quaternion
DEFAULT_CARTESIAN_DMP_STEP_FUNCTION = "cython"
except ImportError:
DEFAULT_CARTESIAN_DMP_STEP_FUNCTION = "python"
class CartesianDMP(DMPBase):
"""Cartesian dynamical movement primitive.
The Cartesian DMP handles orientation and position separately. The
orientation is represented by a quaternion. The quaternion DMP is
implemented according to
<NAME>, <NAME>, <NAME>, <NAME>:
Orientation in Cartesian space dynamic movement primitives (2014),
IEEE International Conference on Robotics and Automation (ICRA),
pp. 2997-3004, doi: 10.1109/ICRA.2014.6907291,
https://ieeexplore.ieee.org/document/6907291
While the dimension of the state space is 7, the dimension of the
velocity, acceleration, and forcing term is 6.
Parameters
----------
execution_time : float
Execution time of the DMP.
dt : float, optional (default: 0.01)
Time difference between DMP steps.
n_weights_per_dim : int, optional (default: 10)
Number of weights of the function approximator per dimension.
int_dt : float, optional (default: 0.001)
Time difference for Euler integration.
Attributes
----------
dt_ : float
Time difference between DMP steps. This value can be changed to adapt
the frequency.
"""
def __init__(
self, execution_time, dt=0.01, n_weights_per_dim=10, int_dt=0.001):
super(CartesianDMP, self).__init__(7, 6)
self.execution_time = execution_time
self.dt_ = dt
self.n_weights_per_dim = n_weights_per_dim
self.int_dt = int_dt
alpha_z = canonical_system_alpha(
0.01, self.execution_time, 0.0, self.int_dt)
self.forcing_term_pos = ForcingTerm(
3, self.n_weights_per_dim, self.execution_time, 0.0, 0.8,
alpha_z)
self.forcing_term_rot = ForcingTerm(
3, self.n_weights_per_dim, self.execution_time, 0.0, 0.8,
alpha_z)
self.alpha_y = 25.0
self.beta_y = self.alpha_y / 4.0
def step(self, last_y, last_yd, coupling_term=None,
step_function=DMP_STEP_FUNCTIONS[DEFAULT_DMP_STEP_FUNCTION],
quaternion_step_function=CARTESIAN_DMP_STEP_FUNCTIONS[
DEFAULT_CARTESIAN_DMP_STEP_FUNCTION]):
"""DMP step.
Parameters
----------
last_y : array, shape (7,)
Last state.
last_yd : array, shape (6,)
Last time derivative of state (velocity).
coupling_term : object, optional (default: None)
Coupling term that will be added to velocity.
step_function : callable, optional (default: RK4)
DMP integration function.
quaternion_step_function : callable, optional (default: cython code if available)
DMP integration function.
Returns
-------
y : array, shape (14,)
Next state.
yd : array, shape (12,)
Next time derivative of state (velocity).
"""
assert len(last_y) == 7
assert len(last_yd) == 6
self.last_t = self.t
self.t += self.dt_
# TODO tracking error
self.current_y[:], self.current_yd[:] = last_y, last_yd
step_function(
self.last_t, self.t,
self.current_y[:3], self.current_yd[:3],
self.goal_y[:3], self.goal_yd[:3], self.goal_ydd[:3],
self.start_y[:3], self.start_yd[:3], self.start_ydd[:3],
self.execution_time, 0.0,
self.alpha_y, self.beta_y,
self.forcing_term_pos,
coupling_term=coupling_term,
int_dt=self.int_dt)
quaternion_step_function(
self.last_t, self.t,
self.current_y[3:], self.current_yd[3:],
self.goal_y[3:], self.goal_yd[3:], self.goal_ydd[3:],
self.start_y[3:], self.start_yd[3:], self.start_ydd[3:],
self.execution_time, 0.0,
self.alpha_y, self.beta_y,
self.forcing_term_rot,
coupling_term=coupling_term,
int_dt=self.int_dt)
return np.copy(self.current_y), np.copy(self.current_yd)
def open_loop(self, run_t=None, coupling_term=None,
step_function=DEFAULT_DMP_STEP_FUNCTION,
quaternion_step_function=DEFAULT_CARTESIAN_DMP_STEP_FUNCTION):
"""Run DMP open loop.
Parameters
----------
run_t : float, optional (default: execution_time)
Run time of DMP. Can be shorter or longer than execution_time.
coupling_term : object, optional (default: None)
Coupling term that will be added to velocity.
step_function : str, optional (default: 'rk4')
DMP integration function. Possible options: 'rk4', 'euler',
'euler-cython', 'rk4-cython'.
quaternion_step_function : str, optional (default: 'cython' if available)
DMP integration function. Possible options: 'python', 'cython'.
Returns
-------
T : array, shape (n_steps,)
Time for each step.
Y : array, shape (n_steps, 7)
State at each step.
"""
try:
step_function = DMP_STEP_FUNCTIONS[step_function]
except KeyError:
raise ValueError(
f"Step function must be in "
f"{DMP_STEP_FUNCTIONS.keys()}.")
T, Yp = dmp_open_loop(
self.execution_time, 0.0, self.dt_,
self.start_y[:3], self.goal_y[:3],
self.alpha_y, self.beta_y,
self.forcing_term_pos,
coupling_term,
run_t, self.int_dt,
step_function=step_function)
try:
quaternion_step_function = CARTESIAN_DMP_STEP_FUNCTIONS[
quaternion_step_function]
except KeyError:
raise ValueError(
f"Step function must be in "
f"{CARTESIAN_DMP_STEP_FUNCTIONS.keys()}.")
_, Yr = dmp_open_loop_quaternion(
self.execution_time, 0.0, self.dt_,
self.start_y[3:], self.goal_y[3:],
self.alpha_y, self.beta_y,
self.forcing_term_rot,
coupling_term,
run_t, self.int_dt,
quaternion_step_function)
return T, np.hstack((Yp, Yr))
def imitate(self, T, Y, regularization_coefficient=0.0,
allow_final_velocity=False):
"""Imitate demonstration.
Parameters
----------
T : array, shape (n_steps,)
Time for each step.
Y : array, shape (n_steps, 7)
State at each step.
regularization_coefficient : float, optional (default: 0)
Regularization coefficient for regression.
allow_final_velocity : bool, optional (default: False)
Allow a final velocity.
"""
self.forcing_term_pos.weights[:, :] = dmp_imitate(
T, Y[:, :3],
n_weights_per_dim=self.n_weights_per_dim,
regularization_coefficient=regularization_coefficient,
alpha_y=self.alpha_y, beta_y=self.beta_y,
overlap=self.forcing_term_pos.overlap,
alpha_z=self.forcing_term_pos.alpha_z,
allow_final_velocity=allow_final_velocity)[0]
self.forcing_term_rot.weights[:, :] = dmp_quaternion_imitation(
T, Y[:, 3:],
n_weights_per_dim=self.n_weights_per_dim,
regularization_coefficient=regularization_coefficient,
alpha_y=self.alpha_y, beta_y=self.beta_y,
overlap=self.forcing_term_rot.overlap,
alpha_z=self.forcing_term_rot.alpha_z,
allow_final_velocity=allow_final_velocity)[0]
self.configure(start_y=Y[0], goal_y=Y[-1])
def get_weights(self):
"""Get weight vector of DMP.
Returns
-------
weights : array, shape (6 * n_weights_per_dim,)
Current weights of the DMP.
"""
return np.concatenate((self.forcing_term_pos.weights.ravel(),
self.forcing_term_rot.weights.ravel()))
def set_weights(self, weights):
"""Set weight vector of DMP.
Parameters
----------
weights : array, shape (6 * n_weights_per_dim,)
New weights of the DMP.
"""
n_pos_weights = self.forcing_term_pos.weights.size
self.forcing_term_pos.weights[:, :] = weights[:n_pos_weights].reshape(
-1, self.n_weights_per_dim)
self.forcing_term_rot.weights[:, :] = weights[n_pos_weights:].reshape(
-1, self.n_weights_per_dim)
def dmp_quaternion_imitation(
T, Y, n_weights_per_dim, regularization_coefficient, alpha_y, beta_y,
overlap, alpha_z, allow_final_velocity):
"""Compute weights and metaparameters of quaternion DMP.
Parameters
----------
T : array, shape (n_steps,)
Time of each step.
Y : array, shape (n_steps, 4)
Orientation at each step.
n_weights_per_dim : int
Number of weights per dimension.
regularization_coefficient : float, optional (default: 0)
Regularization coefficient for regression.
alpha_y : float
Parameter of the transformation system.
beta_y : float
Parameter of the transformation system.
overlap : float
At which value should radial basis functions of the forcing term
overlap?
alpha_z : float
Parameter of the canonical system.
allow_final_velocity : bool
Whether a final velocity is allowed. Will be set to 0 otherwise.
Returns
-------
weights : array, shape (3, n_weights_per_dim)
Weights of the forcing term.
start_y : array, shape (4,)
Start orientation.
start_yd : array, shape (3,)
Start velocity.
start_ydd : array, shape (3,)
Start acceleration.
goal_y : array, shape (4,)
Final orientation.
goal_yd : array, shape (3,)
Final velocity.
goal_ydd : array, shape (3,)
Final acceleration.
"""
if regularization_coefficient < 0.0:
raise ValueError("Regularization coefficient must be >= 0!")
forcing_term = ForcingTerm(
3, n_weights_per_dim, T[-1], T[0], overlap, alpha_z)
F, start_y, start_yd, start_ydd, goal_y, goal_yd, goal_ydd = \
determine_forces_quaternion(T, Y, alpha_y, beta_y,
allow_final_velocity) # n_steps x n_dims
X = forcing_term.design_matrix(T) # n_weights_per_dim x n_steps
return (ridge_regression(X, F, regularization_coefficient),
start_y, start_yd, start_ydd, goal_y, goal_yd, goal_ydd)
def determine_forces_quaternion(T, Y, alpha_y, beta_y, allow_final_velocity):
"""Determine forces that the forcing term should generate.
Parameters
----------
T : array, shape (n_steps,)
Time of each step.
Y : array, shape (n_steps, n_dims)
Position at each step.
alpha_y : float
Parameter of the transformation system.
beta_y : float
Parameter of the transformation system.
allow_final_velocity : bool
Whether a final velocity is allowed. Will be set to 0 otherwise.
Returns
-------
F : array, shape (n_steps, n_dims)
Forces.
start_y : array, shape (4,)
Start orientation.
start_yd : array, shape (3,)
Start velocity.
start_ydd : array, shape (3,)
Start acceleration.
goal_y : array, shape (4,)
Final orientation.
goal_yd : array, shape (3,)
Final velocity.
goal_ydd : array, shape (3,)
Final acceleration.
"""
n_dims = 3
DT = np.gradient(T)
Yd = pr.quaternion_gradient(Y) / DT[:, np.newaxis]
if not allow_final_velocity:
Yd[-1, :] = 0.0
Ydd = np.empty_like(Yd)
for d in range(n_dims):
Ydd[:, d] = np.gradient(Yd[:, d]) / DT
Ydd[-1, :] = 0.0
execution_time = T[-1] - T[0]
goal_y = Y[-1]
F = np.empty((len(T), n_dims))
for t in range(len(T)):
F[t, :] = (
execution_time ** 2 * Ydd[t]
- alpha_y * (beta_y * pr.compact_axis_angle_from_quaternion(
pr.concatenate_quaternions(
goal_y, pr.q_conj(Y[t])))
- Yd[t] * execution_time))
return F, Y[0], Yd[0], Ydd[0], Y[-1], Yd[-1], Ydd[-1]
def dmp_open_loop_quaternion(
goal_t, start_t, dt, start_y, goal_y, alpha_y, beta_y, forcing_term,
coupling_term=None, run_t=None, int_dt=0.001,
quaternion_step_function=CARTESIAN_DMP_STEP_FUNCTIONS[
DEFAULT_CARTESIAN_DMP_STEP_FUNCTION]):
"""Run Cartesian DMP without external feedback.
Parameters
----------
goal_t : float
Time at the end.
start_t : float
Time at the start.
dt : float, optional (default: 0.01)
Time difference between DMP steps.
start_y : array, shape (7,)
Start position.
goal_y : array, shape (7,)
Goal position.
alpha_y : float
Constant in transformation system.
beta_y : float
Constant in transformation system.
forcing_term : ForcingTerm
Forcing term.
coupling_term : CouplingTerm, optional (default: None)
Coupling term. Must have a function coupling(y, yd) that returns
additional velocity and acceleration.
run_t : float, optional (default: goal_t)
Time at which the DMP will be stopped.
int_dt : float, optional (default: 0.001)
Time delta used internally for integration.
quaternion_step_function : callable, optional (default: cython code if available)
DMP integration function.
"""
t = start_t
y = np.copy(start_y)
yd = np.zeros(3)
T = [start_t]
Y = [np.copy(y)]
if run_t is None:
run_t = goal_t
while t < run_t:
last_t = t
t += dt
quaternion_step_function(
last_t, t, y, yd,
goal_y=goal_y, goal_yd=np.zeros_like(yd),
goal_ydd=np.zeros_like(yd),
start_y=start_y, start_yd=np.zeros_like(yd),
start_ydd=np.zeros_like(yd),
goal_t=goal_t, start_t=start_t, alpha_y=alpha_y, beta_y=beta_y,
forcing_term=forcing_term, coupling_term=coupling_term,
int_dt=int_dt)
T.append(t)
Y.append(np.copy(y))
return np.asarray(T), np.asarray(Y)
|
11577089
|
import os
import pytest
from oanda_bot import Bot
import time
@pytest.fixture(scope="module", autouse=True)
def scope_module():
class MyBot(Bot):
def strategy(self):
rsi = self.rsi(period=10)
ema = self.ema(period=20)
self.buy_entry = (rsi < 30) & (self.df.C < ema)
self.sell_entry = (rsi > 70) & (self.df.C > ema)
self.sell_exit = ema > self.df.C
self.buy_exit = ema < self.df.C
self.units = 10000
self.take_profit = 0
self.stop_loss = 0
yield MyBot(
account_id=os.environ["OANDA_BOT_ID"],
access_token=os.environ["OANDA_BOT_TOKEN"],
environment="practice",
instrument="USD_JPY",
granularity="M1",
trading_time=Bot.SUMMER_TIME,
slack_webhook_url=os.environ["SLACK_WEBHOOK_URL"],
line_notify_token=os.environ["LINE_NOTIFY_TOKEN"],
discord_webhook_url=os.environ["DISCORD_WEBHOOK_URL"],
)
@pytest.fixture(scope="function", autouse=True)
def bot(scope_module):
time.sleep(0.5)
yield scope_module
# @pytest.mark.skip
def test_error(bot):
bot._error("oanda bot error test")
# @pytest.mark.skip
def test_backtest(bot):
bot.backtest()
# @pytest.mark.skip
def test_report(bot):
bot.report()
|
11577149
|
import base64
import os
import time
import logging
from datetime import datetime
import unittest
from tonclient.client import TonClient
from tonclient.errors import TonException
from tonclient.test.helpers import async_core_client, sync_core_client, \
SAMPLES_DIR, send_grams, GIVER_ADDRESS, tonos_punch
from tonclient.types import ParamsOfQueryCollection, OrderBy, SortDirection, \
ParamsOfWaitForCollection, ParamsOfQuery, ParamsOfSubscribeCollection, \
SubscriptionResponseType, ResultOfSubscription, ClientError, Abi, \
ParamsOfEncodeMessage, Signer, DeploySet, CallSet, ParamsOfProcessMessage,\
ParamsOfFindLastShardBlock, ParamsOfAggregateCollection, \
FieldAggregation, AggregationFn, ParamsOfBatchQuery, \
ParamsOfQueryOperation, ParamsOfQueryCounterparties, \
ParamsOfQueryTransactionTree, MessageNode, TransactionNode, \
ParamsOfCreateBlockIterator, ParamsOfIteratorNext, \
ParamsOfResumeBlockIterator
class TestTonNetAsyncCore(unittest.TestCase):
def test_query_collection(self):
q_params = ParamsOfQueryCollection(
collection='messages', result='id', limit=1)
result = async_core_client.net.query_collection(params=q_params)
self.assertGreater(len(result.result), 0)
q_params = ParamsOfQueryCollection(
collection='accounts', result='id balance', limit=5)
result = async_core_client.net.query_collection(params=q_params)
self.assertEqual(5, len(result.result))
q_params = ParamsOfQueryCollection(
collection='messages', result='body created_at', limit=10,
filter={'created_at': {'gt': 1562342740}},
order=[OrderBy(path='created_at', direction=SortDirection.ASC)])
result = async_core_client.net.query_collection(params=q_params)
self.assertGreater(result.result[0]['created_at'], 1562342740)
with self.assertRaises(TonException):
q_params = ParamsOfQueryCollection(
collection='messages', result='id balance')
async_core_client.net.query_collection(params=q_params)
def test_wait_for_collection(self):
now = int(datetime.now().timestamp())
tonos_punch()
q_params = ParamsOfWaitForCollection(
collection='transactions', result='id now',
filter={'now': {'gt': now}})
result = async_core_client.net.wait_for_collection(params=q_params)
self.assertGreater(result.result['now'], now)
with self.assertRaises(TonException):
q_params = ParamsOfWaitForCollection(
collection='transactions', result='', timeout=1)
async_core_client.net.wait_for_collection(params=q_params)
def test_subscribe_collection(self):
results = []
def __callback(response_data, response_type, *args):
if response_type == SubscriptionResponseType.OK:
result = ResultOfSubscription(**response_data)
results.append(result.result)
if response_type == SubscriptionResponseType.ERROR:
raise TonException(error=ClientError(**response_data))
now = int(datetime.now().timestamp())
q_params = ParamsOfSubscribeCollection(
collection='messages', result='created_at',
filter={'created_at': {'gt': now}})
subscription = async_core_client.net.subscribe_collection(
params=q_params, callback=__callback)
while True:
if len(results) > 0 or int(datetime.now().timestamp()) > now + 30:
async_core_client.net.unsubscribe(params=subscription)
break
tonos_punch()
time.sleep(5)
self.assertGreater(len(results), 0)
def test_query(self):
tonos_punch()
q_params = ParamsOfQuery(
query='query($time: Float){messages(filter:{created_at:{ge:$time}}limit:5){id}}',
variables={'time': int(datetime.now().timestamp()) - 60})
result = async_core_client.net.query(params=q_params)
self.assertGreater(len(result.result['data']['messages']), 0)
def test_suspend_resume(self):
# Data for contract deployment
keypair = async_core_client.crypto.generate_random_sign_keys()
abi = Abi.from_path(path=os.path.join(SAMPLES_DIR, 'Hello.abi.json'))
with open(os.path.join(SAMPLES_DIR, 'Hello.tvc'), 'rb') as fp:
tvc = base64.b64encode(fp.read()).decode()
signer = Signer.Keys(keys=keypair)
deploy_set = DeploySet(tvc=tvc)
call_set = CallSet(function_name='constructor')
# Prepare deployment params
encode_params = ParamsOfEncodeMessage(
abi=abi, signer=signer, deploy_set=deploy_set, call_set=call_set)
encode = async_core_client.abi.encode_message(params=encode_params)
# Subscribe for address deploy transaction status
transactions = []
def __callback(response_data, response_type, *args):
if response_type == SubscriptionResponseType.OK:
result = ResultOfSubscription(**response_data)
transactions.append(result.result)
self.assertEqual(encode.address, result.result['account_addr'])
if response_type == SubscriptionResponseType.ERROR:
logging.info(ClientError(**response_data).__str__())
subscribe_params = ParamsOfSubscribeCollection(
collection='transactions', result='id account_addr',
filter={'account_addr': {'eq': encode.address}, 'status_name': {'eq': 'Finalized'}})
subscribe = async_core_client.net.subscribe_collection(
params=subscribe_params, callback=__callback)
# Send grams to new account to create first transaction
send_grams(address=encode.address)
# Give some time for subscription to receive all data
time.sleep(2)
# Suspend subscription
async_core_client.net.suspend()
time.sleep(2) # Wait a bit for suspend
# Deploy to create second transaction.
# Use another client, because of error: Fetch first block failed:
# Can not use network module since it is suspended
second_config = async_core_client.config
second_client = TonClient(config=second_config)
process_params = ParamsOfProcessMessage(
message_encode_params=encode_params, send_events=False)
second_client.processing.process_message(params=process_params)
second_client.destroy_context()
# Check that second transaction is not received when
# subscription suspended
self.assertEqual(1, len(transactions))
# Resume subscription
async_core_client.net.resume()
time.sleep(2) # Wait a bit for resume
# Run contract function to create third transaction
call_set = CallSet(function_name='touch')
encode_params = ParamsOfEncodeMessage(
abi=abi, signer=signer, address=encode.address, call_set=call_set)
process_params = ParamsOfProcessMessage(
message_encode_params=encode_params, send_events=False)
async_core_client.processing.process_message(params=process_params)
# Give some time for subscription to receive all data
time.sleep(2)
# Check that third transaction is now received after resume
self.assertEqual(2, len(transactions))
self.assertNotEqual(transactions[0]['id'], transactions[1]['id'])
# Unsubscribe
async_core_client.net.unsubscribe(params=subscribe)
def test_find_last_shard_block(self):
find_params = ParamsOfFindLastShardBlock(address=GIVER_ADDRESS)
result = async_core_client.net.find_last_shard_block(
params=find_params)
self.assertIsInstance(result.block_id, str)
# def test_endpoints(self):
# config = ClientConfig()
# config.network.endpoints = [
# 'cinet.tonlabs.io',
# 'cinet2.tonlabs.io/'
# ]
# client = TonClient(config=config)
#
# # Fetch/set endpoints
# endpoint_set = client.net.fetch_endpoints()
# client.net.set_endpoints(params=endpoint_set)
def test_get_endpoints(self):
result = async_core_client.net.get_endpoints()
self.assertGreaterEqual(len(result.endpoints), 1)
def test_aggregate_collection(self):
fields = [
FieldAggregation(field='', fn=AggregationFn.COUNT)
]
params = ParamsOfAggregateCollection(
collection='accounts', fields=fields)
result = async_core_client.net.aggregate_collection(params=params)
count = int(result.values[0])
self.assertGreater(count, 0)
params.filter = {'workchain_id': {'eq': -1}}
result = async_core_client.net.aggregate_collection(params=params)
count = int(result.values[0])
self.assertGreaterEqual(count, 0)
def test_batch_query(self):
operations = [
ParamsOfQueryOperation.QueryCollection(
params=ParamsOfQueryCollection(
collection='blocks_signatures', result='id', limit=1)),
ParamsOfQueryOperation.AggregateCollection(
params=ParamsOfAggregateCollection(
collection='accounts',
fields=[
FieldAggregation(field='', fn=AggregationFn.COUNT)
])),
ParamsOfQueryOperation.WaitForCollection(
params=ParamsOfWaitForCollection(
collection='transactions', filter={'now': {'gt': 20}},
result='id now'))
]
params = ParamsOfBatchQuery(operations=operations)
result = async_core_client.net.batch_query(params=params)
self.assertEqual(3, len(result.results))
# def test_query_counterparties(self):
# params = ParamsOfQueryCounterparties(
# account='-1:7777777777777777777777777777777777777777777777777777777777777777',
# first=5, result='counterparty last_message_id cursor')
# result = async_core_client.net.query_counterparties(params=params)
# counterparties_1 = result.result
# self.assertIsInstance(counterparties_1, list)
#
# if len(counterparties_1):
# params.after = counterparties_1[-1]['cursor']
# result = async_core_client.net.query_counterparties(params=params)
# counterparties_2 = result.result
# self.assertNotEqual(counterparties_1, counterparties_2)
def test_query_transaction_tree(self):
query_params = ParamsOfQueryCollection(
collection='messages', filter={'msg_type': {'eq': 1}}, limit=5,
result='id dst dst_transaction {id aborted out_messages {id dst msg_type_name dst_transaction {id aborted out_messages {id dst msg_type_name dst_transaction {id aborted}}}}}')
query_result = async_core_client.net.query_collection(
params=query_params)
abi_registry = [
Abi.from_path(path=os.path.join(SAMPLES_DIR, 'Hello.abi.json'))
]
for message in query_result.result:
tree_params = ParamsOfQueryTransactionTree(
in_msg=message['id'], abi_registry=abi_registry)
tree_result = async_core_client.net.query_transaction_tree(
params=tree_params)
self.assertIsInstance(tree_result.messages, list)
self.assertIsInstance(tree_result.messages[0], MessageNode)
self.assertIsInstance(tree_result.transactions, list)
self.assertIsInstance(tree_result.transactions[0], TransactionNode)
# TODO: Not working on TONOS SE
# def test_block_iterator(self):
# params = ParamsOfCreateBlockIterator(
# shard_filter=['0:8000000000000000'])
# iterator = async_core_client.net.create_block_iterator(params=params)
#
# items = []
# state = None
# params_next = ParamsOfIteratorNext(
# iterator=iterator.handle, return_resume_state=True)
# for i in range(10):
# result = async_core_client.net.iterator_next(params=params_next)
# items += result.items
# state = result.resume_state
# self.assertEqual(10, len(items))
# async_core_client.net.remove_iterator(params=iterator)
#
# params_resume = ParamsOfResumeBlockIterator(resume_state=state)
# resumed = async_core_client.net.resume_block_iterator(params=params_resume)
# params_next.iterator = resumed.handle
# params_next.return_resume_state = False
# result = async_core_client.net.iterator_next(params=params_next)
# items += result.items
# self.assertEqual(11, len(items))
# async_core_client.net.remove_iterator(params=resumed)
class TestTonNetSyncCore(unittest.TestCase):
""" Sync core is not recommended to use, so make just a couple of tests """
def test_query_collection(self):
q_params = ParamsOfQueryCollection(
collection='blocks', result='id', limit=1)
result = sync_core_client.net.query_collection(params=q_params)
self.assertGreater(len(result.result), 0)
q_params = ParamsOfQueryCollection(
collection='accounts', result='id balance', limit=5)
result = sync_core_client.net.query_collection(params=q_params)
self.assertEqual(5, len(result.result))
q_params = ParamsOfQueryCollection(
collection='messages', filter={'created_at': {'gt': 1562342740}},
result='body created_at', limit=10,
order=[OrderBy(path='created_at', direction=SortDirection.ASC)])
result = sync_core_client.net.query_collection(params=q_params)
self.assertGreater(result.result[0]['created_at'], 1562342740)
with self.assertRaises(TonException):
q_params = ParamsOfQueryCollection(
collection='messages', result='')
sync_core_client.net.query_collection(params=q_params)
def test_wait_for_collection(self):
now = int(datetime.now().timestamp())
tonos_punch()
q_params = ParamsOfWaitForCollection(
collection='transactions', filter={'now': {'gt': now}},
result='id now')
result = sync_core_client.net.wait_for_collection(params=q_params)
self.assertGreater(result.result['now'], now)
with self.assertRaises(TonException):
q_params = ParamsOfWaitForCollection(
collection='transactions', filter={'now': {'gt': now}},
result='')
sync_core_client.net.wait_for_collection(params=q_params)
|
11577151
|
import geopandas as gdp
import cartoframes
import pandas as pd
APIKEY = "<KEY>"
cc = cartoframes.CartoContext(base_url='https://lokiintelligent.carto.com/',
api_key=APIKEY)
from shapely.geometry import Point
from shapely.wkb import loads
arenas_df = cc.read('arenas_nba')
shp = r"C:\Data\US_States\US_States.shp"
states_df = gdp.read_file(shp)
data = []
for index, ref in arenas_df.iterrows():
check = 0
for index2, orig in states_df.iterrows():
if loads(ref['the_geom'], hex=True).intersects(orig['geometry']):
print(orig['STATE'], ref['team'])
data.append(orig['STATE'])
check = 1
if check == 0:
data.append(None)
arenas_df['state'] = data
cc.write(arenas_df,'arenas_nba', overwrite=True)
|
11577161
|
import setuptools
INSTALL_REQUIREMENTS = ["termcolor", "opencv-python"]
setuptools.setup(
name="cpu",
url="https://github.com/serend1p1ty/core-pytorch-utils.git",
description="Core APIs for deep learning.",
version="1.0.0",
author="serend1p1ty",
author_email="<EMAIL>",
packages=setuptools.find_packages(),
install_requires=INSTALL_REQUIREMENTS,
)
|
11577201
|
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler, FileSystemEventHandler
from blinker import signal
class WatchEventHandler(FileSystemEventHandler):
def on_any_event(self, event):
if not event.is_directory:
sig = signal('watch_change')
sig.send(event)
class Watcher(object):
def __init__(self, input_dir, templates_dir):
paths = [input_dir, templates_dir]
threads = []
try:
observer = Observer()
event_handler = WatchEventHandler()
for i in paths:
targetPath = str(i)
observer.schedule(event_handler, targetPath, recursive=True)
threads.append(observer)
observer.start()
signal_watch_init = signal('watch_init')
signal_watch_init.send(self)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
wrangler._reporter.log("Stopping with grace and poise", "green")
observer.stop()
observer.join()
except:
return None
|
11577219
|
import logging
from logging.handlers import SysLogHandler
import os.path
import sys
import traceback
import platform
import os
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
CRITICAL = logging.CRITICAL
_pid = os.getpid()
syslog_socket_paths = {
'Darwin': '/var/run/syslog',
'Linux': '/dev/log'
}
def quiet_boto_logging():
"""
Boto's debug logs are full dumps of the XML that was passed between the
client and server. This can be annoying. This is a simple function to
hide those dumps whenever you put your code into debug.
"""
logging.getLogger('boto').setLevel(logging.CRITICAL)
def quiet_paramiko_logging():
""" Paramiko is really noisy when set to INFO or below.
This sets the paramiko logger to only send WARNING or above messages.
"""
logging.getLogger('paramiko').setLevel(logging.WARNING)
def quiet_requests_connpool_logging():
""" Paramiko is really noisy when set to INFO or below.
This sets the paramiko logger to only send WARNING or above messages.
"""
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
logging.WARNING)
def get_syslog_path():
system_os = platform.system()
try:
return syslog_socket_paths[system_os]
except KeyError:
raise ValueError("Unable to find syslog unix domain socket for os "
"'%s'." % (system_os))
DEFAULT_FORMAT = ('pid:' + str(_pid) + ' %(levelname)s %(name)s '
'%(module)s(%(funcName)s):%(lineno)d - %(message)s')
def setup_root_logger(stdout=INFO, filename=None, file_level=INFO,
file_mode='w', syslog=None,
syslog_facility=SysLogHandler.LOG_LOCAL7,
syslog_socket_path=None, syslog_tag=None,
time_format="%Y/%m/%d %H:%M:%S %Z",
message_format=DEFAULT_FORMAT):
"""Setup basic logging, including stdout, file, and syslog logging.
Sets up the root logger, deleting any previously configured handlers. It
does this to make sure that we don't have multiples of the same handler
being attached to the root logger, resulting in multiple messages of the
same type.
This should be called in the main script/command/daemon itself, and never
inside libraries unless you really know what you're doing.
:type stdout: int
:param stdout: The logging level to send to stdout. Can be any of the
logging.* constants (logging.DEBUG, etc) or logutil constants
(logutil.DEBUG, etc) which are just pointers to the logging constants.
If set to None or False, disable stdout logging.
Default: logging.INFO
:type filename: string
:param filename: The path to a file to log to. Setting to None disables
file logging.
Default: None
:type file_level: int
:param file_level: The logging level to send to the file given in the
'filename' parameter. Can be any of the logging.* or logutil.*
constants like the 'stdout' parameter.
Default: logging.INFO
:type file_mode: string
:param file_mode: The mode to open the file at the 'filename' parameter
with.
Default: 'w'
:type syslog: int
:param syslog: The logging level to send to syslog. Can be any of the
logging.* or logutil.* constants. Set to None to disable syslog
logging.
Default: None
:type syslog_facility: int
:param syslog_facility: The syslog facility to send messages to if syslog
is enabled. Can be any of the SysLogHandler.LOG_* facility constants.
Default: SysLogHandler.LOG_LOCAL7
:type syslog_socket_path: string
:param syslog_socket_path: The path to the unix domain socket used by
syslog if syslog is enabled. If not given, will automatically try to
determine the correct path.
Default: None
:type syslog_tag: string
:param syslog_tag: The tag to be pre-pended to syslog messages. If not
given it will try to determine the name of the command that was
called, and use that.
Default: None
:type time_format: string
:param time_format: A time.strftime formatted string to use for the
timestamp format. This will be prepended to stdout and logfiles, but
not to syslog (since syslog has it's own timestamp system)
:type message_format: string
:param message_format: A logging.Formatter formatted string to use for
the output of log messages. See the following for variables:
http://docs.python.org/2/library/logging.html#logrecord-attributes
"""
base_format = message_format
timed_format = '[%(asctime)s] ' + base_format
timed_formatter = logging.Formatter(timed_format, datefmt=time_format)
logger = logging.getLogger()
# Delete all previous handlers.
for h in logger.handlers:
logger.removeHandler(h)
# Used to track what levels are being used by handlers.
levels = []
if stdout:
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(timed_formatter)
stdout_handler.setLevel(stdout)
levels.append(stdout)
logger.addHandler(stdout_handler)
if filename:
file_handler = logging.FileHandler(filename, mode=file_mode)
file_handler.setFormatter(timed_formatter)
file_handler.setLevel(file_level)
levels.append(file_level)
logger.addHandler(file_handler)
if syslog:
if not syslog_socket_path:
syslog_socket_path = get_syslog_path()
syslog_handler = SysLogHandler(syslog_socket_path,
facility=syslog_facility)
if not syslog_tag:
syslog_tag = os.path.basename(sys.argv[0])
syslog_format = syslog_tag + ": " + base_format
syslog_handler.setFormatter(logging.Formatter(syslog_format))
syslog_handler.setLevel(syslog)
levels.append(syslog)
logger.addHandler(syslog_handler)
# Set the logger level to the level of the lowest leveled handler
logger.setLevel(min(levels))
return logger
def log_exception(message=None, logger=logging):
"""
Used to produce more cleanly readable exceptions in syslog by breaking
the exception up over multiple logging calls.
"""
if message:
logger.error(message)
logger.error('Exception output: ')
exc_msg = traceback.format_exc().split('\n')
for line in exc_msg:
logger.error(' %s' % (line,))
|
11577223
|
import pytest
def test_get_int():
from util import get_int
assert 567 == get_int("BFFFBBFRRR")
assert 119 == get_int("FFFBBBFRRR")
assert 820 == get_int("BBFFBBFRLL")
|
11577231
|
from __future__ import annotations
import time
import typing
from typing_extensions import TypedDict
if typing.TYPE_CHECKING:
import asyncio
from ctc import binary
from ctc import rpc
from ctc import spec
async def async_get_block(
block: spec.BlockReference,
include_full_transactions: bool = False,
provider: spec.ProviderSpec = None,
) -> spec.Block:
if spec.is_block_number_reference(block):
block_data = await rpc.async_eth_get_block_by_number(
block_number=binary.standardize_block_number(block),
provider=provider,
include_full_transactions=include_full_transactions,
)
from ctc import db
await db.async_intake_block(
block=block_data,
network=rpc.get_provider_network(provider),
)
return block_data
elif spec.is_block_hash(block):
return await rpc.async_eth_get_block_by_hash(
block_hash=block,
provider=provider,
include_full_transactions=include_full_transactions,
)
else:
raise Exception('unknown block specifier: ' + str(block))
async def async_get_blocks(
blocks: typing.Sequence[spec.BlockReference],
include_full_transactions: bool = False,
chunk_size: int = 500,
provider: spec.ProviderSpec = None,
) -> list[spec.Block]:
provider = rpc.add_provider_parameters(provider, {'chunk_size': chunk_size})
if all(spec.is_block_number_reference(block) for block in blocks):
standardized = [
binary.standardize_block_number(block) for block in blocks
]
blocks_data = await rpc.async_batch_eth_get_block_by_number(
block_numbers=standardized,
include_full_transactions=include_full_transactions,
provider=provider,
)
from ctc import db
await db.async_intake_blocks(
blocks=blocks_data,
network=rpc.get_provider_network(provider),
)
return blocks_data
elif all(spec.is_block_hash(block) for block in blocks):
return await rpc.async_batch_eth_get_block_by_hash(
block_hashes=blocks,
include_full_transactions=include_full_transactions,
provider=provider,
)
else:
raise Exception(
'blocks should be all block number references or all block hashes'
)
class LatestBlockCacheEntry(TypedDict, total=False):
request_time: float
response_time: float
block_number: int
_latest_block_cache: typing.MutableMapping[str, LatestBlockCacheEntry] = {}
_latest_block_lock: typing.MutableMapping[str, asyncio.Lock | None] = {
'lock': None
}
async def async_get_latest_block_number(
provider: spec.ProviderSpec = None,
use_cache: bool = True,
cache_time: int | float = 1,
) -> int:
if not use_cache:
return await rpc.async_eth_block_number(provider=provider)
else:
# must initialize asyncio.Lock within a running event loop
# see https://stackoverflow.com/a/55918049
lock = _latest_block_lock['lock']
if lock is None:
import asyncio
lock = asyncio.Lock()
_latest_block_lock['lock'] = lock
async with lock:
network = rpc.get_provider_network(provider)
request_time = time.time()
network_cache = _latest_block_cache.get(network)
if (
network_cache is not None
and request_time - network_cache['request_time'] < cache_time
):
return network_cache['block_number']
result = await rpc.async_eth_block_number(provider=provider)
response_time = time.time()
_latest_block_cache[network] = {
'request_time': request_time,
'response_time': response_time,
'block_number': result,
}
return result
|
11577239
|
import copy
import os
import numpy as np
from ppqm import chembridge, constants, linesio, shell
from ppqm.calculator import BaseCalculator
MNDO_CMD = "mndo"
MNDO_ATOMLINE = "{atom:2s} {x} {opt_flag} {y} {opt_flag} {z} {opt_flag}"
class MndoCalculator(BaseCalculator):
def __init__(self, cmd=MNDO_CMD, scr=constants.SCR, method="PM3"):
super().__init__(scr=scr)
self.cmd = cmd
self.method = method
# TODO should be a parameter
self.read_params = False
# Constants
self.atomline = MNDO_ATOMLINE
self.filename = "_tmp_mndo.inp"
def optimize(
self,
molobj,
return_copy=True,
return_properties=False,
read_params=False,
):
header = (
"{self.method} MULLIK PRECISE charge={charge} jprint=5\n" "nextmol=-1\nTITLE {title}"
)
if return_copy:
molobj = copy.deepcopy(molobj)
result_properties = self.calculate(molobj, header, optimize=True)
for i, properties in enumerate(result_properties):
if "coord" not in properties:
pass
# TODO What need to happen here? @anders
properties["coord"]
# TODO Set coord on conformer
return molobj
def optimize_axyzc(self, atoms, coord, charge, title=""):
""""""
header = (
"{self.method} MULLIK PRECISE charge={charge} " "jprint=5\nnextmol=-1\nTITLE {title}"
)
properties_ = self.calculate_axyzc(atoms, coord, header, optimize=True)
return properties_
def calculate(self, molobj, header, optimize=False):
input_string = self._get_input_from_molobj(
molobj,
self.method,
read_params=self.read_params,
optimize=optimize,
)
filename = os.path.join(self.scr, self.filename)
with open(filename, "w") as f:
f.write(input_string)
calculations = self._run_mndo_file()
for output_lines in calculations:
properties = get_properties(output_lines)
yield properties
return
def calculate_axyzc(self, atoms, coords, header, optimize=False):
input_txt = get_input(atoms, coords, header, optimize=optimize)
filename = os.path.join(self.scr, self.filename)
with open(filename, "w") as f:
f.write(input_txt)
calculations = self._run_mndo_file()
for output_lines in calculations:
properties = get_properties(output_lines)
yield properties
return
def _run_mndo_file(self):
runcmd = f"{self.cmd} < {self.filename}"
lines = shell.stream(runcmd, cwd=self.scr)
molecule_lines = []
for line in lines:
molecule_lines.append(line.strip("\n"))
if "STATISTICS FOR RUNS WITH MANY MOLECULES" in line:
return
if "COMPUTATION TIME" in line:
yield molecule_lines
molecule_lines = []
return
def _get_input_from_molobj(self, molobj, header, read_params=False, optimize=False, title=""):
""""""
# TODO Switch from header to options
atoms, _, charge = chembridge.molobj_to_axyzc(molobj, atom_type="str")
n_confs = molobj.GetNumConformers()
# Create input
txt = []
for i in range(n_confs):
coord = chembridge.molobj_to_coordinates(molobj, idx=i)
# header_prime = header.format(
# charge=charge, method=self.method, title=f"{title}_Conf_{i}"
# )
tx = get_input(
atoms,
coord,
header,
read_params=self.read_params,
optimize=optimize,
)
txt.append(tx)
txt = "".join(txt)
return txt
def _set_input_file(self, input_str):
# TODO Set in scr
return
def __repr__(self):
return "MndoCalc(cmd={self.cmd},scr={self.scr}method={self.method})"
def get_input(atoms, coords, header, read_params=False, optimize=False):
"""
# note: internal coordinates are assumed for three-atom systems
"""
n_atoms = len(atoms)
txt = header
if read_params:
txt = txt.split("\n")
txt[0] += " iparok=1"
txt = "\n".join(txt)
txt += "\n"
if n_atoms <= 3:
txt += get_internal_coordinates(atoms, coords, optimize=optimize)
txt += "\n"
return txt
opt_flag = 0
if optimize:
opt_flag = 1
for atom, coord in zip(atoms, coords):
fmt = {
"atom": atom,
"x": coord[0],
"y": coord[1],
"z": coord[2],
"opt_flag": opt_flag,
}
line = MNDO_ATOMLINE.format(**fmt)
txt += line + "\n"
txt += "\n"
return
def get_internal_coordinates(atoms, coord, optimize=False):
"""
:param atoms: List[Str]
:param coord: Array[]
:param optimize: Boolean
:return Str:
"""
n_atoms = len(atoms)
opt_flag = 0
if optimize:
opt_flag = 1
output = ""
if n_atoms == 3:
ba = coord[1] - coord[0]
bc = coord[1] - coord[2]
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle) / np.pi * 180.0
norm_ba = np.linalg.norm(ba)
norm_bc = np.linalg.norm(bc)
output += f"{atoms[0]}\n"
output += f"{atoms[1]} {norm_ba} {opt_flag}\n"
output += f"{atoms[2]} {norm_bc} {opt_flag} {angle} {opt_flag}\n"
elif n_atoms == 2:
ba = coord[1] - coord[0]
norm_ba = np.linalg.norm(ba)
output += f"{atoms[0]}\n"
output += f"{atoms[1]} {norm_ba} {opt_flag}\n"
elif n_atoms == 1:
output += f"{atoms[0]}\n"
return output
def run_mndo_file(filename, scr=None, mndo_cmd=MNDO_CMD):
# TODO Needed here? or force people to use class
return
def get_properties(output):
""""""
if isinstance(output, str):
output = output.split("\n")
result = get_properties_optimize(output)
# TODO Read keywords to detect property type
# TODO Detect failures
return result
def get_properties_1scf(lines):
properties = {}
# Check if input coordiantes is internal
# INPUT IN INTERNAL COORDINATES
# INPUT IN CARTESIAN COORDINATES
idx = linesio.get_index(lines, "INPUT IN")
line = lines[idx]
is_internal = "INTERNAL" in line
keywords = [
"CORE HAMILTONIAN MATRIX.",
"NUCLEAR ENERGY",
"IONIZATION ENERGY",
"INPUT GEOMETRY",
]
idx_keywords = linesio.get_rev_indexes(lines, keywords)
# SCF energy
idx_core = idx_keywords[0]
if idx_core is None:
e_scf = float("nan")
properties["e_scf"] = e_scf
else:
idx = idx_core
idx -= 9
line = lines[idx]
if "SCF CONVERGENCE HAS BEE" in line:
idx -= 2
line = lines[idx]
# NOTE This should never happen, but better safe than sorry
line = line.split()
if len(line) < 2:
e_scf = float("nan")
else:
value = line[1]
e_scf = float(value)
properties["e_scf"] = e_scf # ev
# Nuclear energy
if idx_keywords[1] is None:
e_nuc = float("nan")
properties["e_nuc"] = e_nuc
else:
idx = idx_keywords[1]
line = lines[idx]
line = line.split()
value = line[2]
e_nuc = float(value)
properties["e_nuc"] = e_nuc # ev
# eisol
eisol = dict()
idxs = linesio.get_indexes_with_stop(lines, "EISOL", "IDENTIFICATION")
for idx in idxs:
line = lines[idx]
line = line.split()
atom = int(line[0])
value = line[2]
eisol[atom] = float(value) # ev
# # Enthalpy of formation
idx_hof = linesio.get_index(lines, "SCF HEAT OF FORMATION")
line = lines[idx_hof]
line = line.split("FORMATION")
line = line[1]
line = line.split()
value = line[0]
value = float(value)
properties["h"] = value # kcal/mol
# ionization
# idx = get_rev_index(lines, "IONIZATION ENERGY")
idx = idx_keywords[2]
if idx is None:
e_ion = float("nan")
properties["e_ion"] = e_ion
else:
line = lines[idx]
value = line.split()[-2]
e_ion = float(value) # ev
properties["e_ion"] = e_ion
# # Dipole
# idx = get_rev_index(lines, "PRINCIPAL AXIS")
# line = lines[idx]
# line = line.split()
# value = line[-1]
# value = float(value) # Debye
# properties["mu"] = value
# input coords
atoms = []
coord = []
if is_internal:
idx_atm = 1
idx_x = 2
idx_y = 3
idx_z = 4
idx_coord = linesio.get_index(lines, "INITIAL CARTESIAN COORDINATES")
idx_coord += 5
j = idx_coord
# continue until we hit a blank line
while not lines[j].isspace() and lines[j].strip():
line = lines[j].split()
atom = line[idx_atm]
atom = int(atom)
x = float(line[idx_x])
y = float(line[idx_y])
z = float(line[idx_z])
atoms.append(atom)
coord.append([x, y, z])
j += 1
else:
idx_atm = 1
idx_x = 2
idx_y = 3
idx_z = 4
idx = idx_keywords[3]
idx += 6
j = idx
# continue until we hit a blank line
while not lines[j].isspace() and lines[j].strip():
line = lines[j].split()
atoms.append(int(line[idx_atm]))
x = line[idx_x]
y = line[idx_y]
z = line[idx_z]
xyz = [x, y, z]
xyz = [float(c) for c in xyz]
coord.append(xyz)
j += 1
# calculate energy
e_iso = [eisol[a] for a in atoms]
e_iso = np.sum(e_iso)
energy = e_nuc + e_scf - e_iso
properties["energy"] = energy
return properties
def get_properties_optimize(lines):
"""
TODO Read how many steps
"""
properties = {}
# # Enthalpy of formation
idx_hof = linesio.get_index(lines, "SCF HEAT OF FORMATION")
line = lines[idx_hof]
line = line.split("FORMATION")
line = line[1]
line = line.split()
value = line[0]
value = float(value)
properties["h"] = value # kcal/mol
# optimized coordinates
i = linesio.get_rev_index(lines, "CARTESIAN COORDINATES")
idx_atm = 1
idx_x = 2
idx_y = 3
idx_z = 4
n_skip = 4
if i < idx_hof:
i = linesio.get_rev_index(lines, "X-COORDINATE")
idx_atm = 1
idx_x = 2
idx_y = 4
idx_z = 6
n_skip = 3
j = i + n_skip
symbols = []
coord = []
# continue until we hit a blank line
while not lines[j].isspace() and lines[j].strip():
line = lines[j].split()
symbols.append(int(line[idx_atm]))
x = line[idx_x]
y = line[idx_y]
z = line[idx_z]
xyz = [x, y, z]
xyz = [float(c) for c in xyz]
coord.append(xyz)
j += 1
coord = np.array(coord)
properties["coord"] = coord
properties["atoms"] = symbols
return
def get_properties_gradient():
""""""
return
|
11577250
|
from .command import Command
class Placeholder(Command):
"""
An command that holds an overwritable reference to another command.
It is used for most UI navigation, in order to rebind navigation keys to
other values.
"""
def __init__(self, parent, hook):
Command.__init__(self, parent, "placeholder")
# hook is the command that will get overwritten.
self.hook = hook
def generate(self):
return str(self.hook)
|
11577286
|
from __future__ import print_function
import sys
import os
import unittest
import logging
from itertools import product
os.environ['ENABLE_CNNL_TRYCATCH'] = 'OFF' # pylint: disable=C0413
import torch
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(cur_dir + "/../../")
from common_utils import testinfo, TestCase # pylint: disable=C0413, C0411
logging.basicConfig(level=logging.DEBUG)
torch.manual_seed(2)
class TestLinspaceOp(TestCase):
# @unittest.skip("not test")
@testinfo()
def test_linspace(self):
start_list = [1, 3, 3.5, 3.5, 4.1, 8.9, 11]
end_list = [2, 5, 2.5, 10.5, 11.3, 99.1, 121]
steps_list = [0, 3, 1, 11, 6, 100, 121]
type_list = [(torch.float, torch.half), (torch.float, torch.float)]
err = 1e-7
for t1, t2 in type_list: # pylint: disable=C0200
for start, end, steps in product(start_list, end_list, steps_list): # pylint: disable=C0200
# default support fp32 and fp16if t == torch.half:
if t2 == torch.half:
err = 1e-1
x = torch.linspace(start, end, steps=steps, device="cpu", dtype=t1)
x_mlu = torch.linspace(start, end, steps=steps, device="mlu", dtype=t2)
self.assertTensorsEqual(x, x_mlu.cpu(), err, use_MSE=True)
#@<EMAIL>("not test")
@testinfo()
def test_linspace_out(self):
start_list = [1, 3, 3.5, 3.5, 4.1, 8.9, 11]
end_list = [2, 5, 2.5, 10.5, 11.3, 99.1, 121]
steps_list = [0, 3, 1, 1, 11, 6, 121]
type_l = [torch.float, torch.half]
err = 1e-7
for t in type_l:
for start, end, steps in product(start_list, end_list, steps_list): # pylint: disable=C0200
in1 = torch.randn(1).to(t)
in1_mlu = in1.to('mlu')
if t == torch.half:
in1 = torch.randn(1)
err = 1e-1
x = torch.linspace(start, end, steps=steps,
device="cpu", out=in1)
x_mlu = torch.linspace(start, end, steps=steps,
out=in1_mlu)
self.assertTensorsEqual(x, x_mlu.cpu(), err, use_MSE=True)
# @unittest.skip("not test")
@testinfo()
def test_linspace_exception(self):
ref_msg = "number of steps must be non-negative"
with self.assertRaisesRegex(RuntimeError, ref_msg):
torch.linspace(1, 10, -1, device='mlu')
if __name__ == '__main__':
unittest.main()
|
11577336
|
import os
import numpy as np
from Bio.Alphabet import IUPAC, Gapped
from BConverters.Converters import convert_alignment, convert_tree
from RouToolPa.Parsers.R import get_indices_from_names
def generate_partition_finder_control_file(alignment_file_name,
genes_file,
output_dir=None,
genes_names_list=None,
branchlengths="linked",
models="raxml",
model_selection="AIC",
search="greedy"):
#supported models: all, raxml, mrbayes, beast
#supported model_selection: AIC, AICc, BIC
#supported search: all, greedy, rcluster, hcluster, user
print("Generating config file for PartionFinder")
output_filename = "partition_finder.cfg"
if output_dir:
output_filename = output_dir + "/partition_finder.cfg"
fd_genes = open(genes_file, "r")
fd_genes.readline()
coordinates_list = []
for line in fd_genes:
line_list = [int(x) for x in line.strip().split("\t")]
coordinates_list.append(line_list)
if not line_list[-1]:
del(line_list[-1])
fd_genes.close()
fd = open(output_filename, "w")
fd.write("""
## ALIGNMENT FILE ##
alignment = %s;
## BRANCHLENGTHS: linked | unlinked ##
branchlengths = %s;
## MODELS OF EVOLUTION for PartitionFinder: all | raxml | mrbayes | beast | <list> ##
## for PartitionFinderProtein: all_protein | <list> ##
models = %s;
# MODEL SELECCTION: AIC | AICc | BIC #
model_selection = %s;
## DATA BLOCKS: see manual for how to define ##
[data_blocks]\n
""" % (alignment_file_name, branchlengths, models, model_selection))
for i in range(0, len(coordinates_list)):
name = "part%i" % (i + 1)
if genes_names_list:
name = genes_names_list[i]
fd.write("%s_pos1 = %i-%i\\3;\n" % (name, coordinates_list[i][1], coordinates_list[i][2]))
fd.write("%s_pos2 = %i-%i\\3;\n" % (name, coordinates_list[i][1] + 1, coordinates_list[i][2]))
fd.write("%s_pos3 = %i-%i\\3;\n" % (name, coordinates_list[i][1] + 2, coordinates_list[i][2]))
fd.write("""
## SCHEMES, search: all | greedy | rcluster | hcluster | user ##
[schemes]
search = %s;
#user schemes go here if search=user. See manual for how to define.#
""" % search)
fd.close()
def find_best_partion_model(work_dir, raxml_threads=6, partition_finder_path="PartitionFinder.py"):
os.system("%s --raxml -p 1 --cmdline-extras '-T %i' %s" % (partition_finder_path, raxml_threads, work_dir))
def generate_raxml_partitions_file(partion_finder_output_file, output_filename):
fd = open(partion_finder_output_file, "r")
while fd.readline().strip() != "RaxML-style partition definitions":
pass
fd_raxml = open(output_filename, "w")
for line in fd:
fd_raxml.write(line)
fd.close()
fd_raxml.close()
def construct_raxml_tree(alignment_file,
partition_file,
model="GTRGAMMAI",
bootstrap=10000,
threads=6,
tree_prefix="raxml_tree",
path_to_raxml=None,
outgroup=None):
raxml_path = "raxml"
if path_to_raxml:
raxml_path = path_to_raxml
ougroup_string = ""
if outgroup:
ougroup_string = "-o " + outgroup
os.system("%s -f a -m %s -p 12345 -x 12345 -T %i -s %s -# %i -q %s -n %s %s" %
(raxml_path, model, threads, alignment_file, bootstrap, partition_file, tree_prefix, ougroup_string))
def add_outgroup(sequences_file, outgroup_file, output_file):
os.system("cat %s %s > %s" % (sequences_file, outgroup_file, output_file))
def get_distances(taxa_dict, row_names, table):
distances = np.array([])
for taxa in taxa_dict:
record_list = []
for record_id in taxa_dict[taxa]:
if record_id in row_names:
record_list.append(record_id)
if len(record_list) < 2:
continue
#print(record_list)
indices = get_indices_from_names(row_names, record_list)
temp_table = table[np.array(indices), :][:, np.array(indices)]
temp_table = temp_table[np.triu_indices(np.shape(temp_table)[0], 1)]
distances = np.hstack((distances, temp_table))
return distances
def merge_alignment_and_tree_nexus(alignment_file,
alignment_format,
tree_file,
tree_format,
output_file,
alphabet=Gapped(IUPAC.ambiguous_dna)):
output_filetype = "nexus"
if alignment_format == "nexus":
os.system("/bin/cp -rf alignment %s" % output_file)
else:
convert_alignment(alignment_file, alignment_format, output_file, output_filetype, alphabet=alphabet)
tmp_file = "tmp_tree.nex"
convert_tree(tree_file, tree_format, tmp_file, "nexus")
with open(tmp_file, "r") as tmp_fd:
tmp_fd.readline()
with open(output_file, "a") as out_fd:
for line in tmp_fd:
out_fd.write(line)
os.system("rm -rf %s" % tmp_file)
#regular expression to convert tree to cladogramm :\d+\.[\dE-]+([\,\)]) \1
|
11577361
|
import editor
from markdown2 import markdown
import ui
TEMPLATE = '''
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width">
<title>Preview</title>
<style type="text/css">
body {
font-family: helvetica;
font-size: 15px;
margin: 10px;
}
</style>
</head>
<body>{{CONTENT}}</body>
</html>
'''
welcome_md = '''
# Welcome to Pythonista
Thank you for downloading Pythonista! You now have everything you need to build and run Python scripts directly on your iPhone or iPad.
To give you an idea of what you can do with the app, various sample scripts are included in the *Examples* folder. Feel free to use them as starting points for your own experiments. To share your creations, get help with coding problems, or just to meet fellow Pythonistas, please visit our [community forum](http://forum.omz-software.com).
# Getting Started
If you're new to Pythonista, here are some tips to help you get up and running:
* To create a new script, first tap `≡` to reveal the library, then `+` (at the bottom). You can also use left and right swipe gestures to switch between the file browser, editor, and console panels.
* The settings ("gear" button in the file browser) contain useful options to customize the editor font, color theme, indentation type (tabs/spaces), and much more.
* Swipe left to show the **console** panel. This is where text output appears, and you can use the prompt at the bottom to evaluate individual lines of Python code directly.
* You'll also find the included **documentation** in the console panel; simply tap the `(?)` button to open it in a separate tab. Reference documentation is also available while you're editing code -- simply select a word (e.g. a function name), and choose *Help…* from the menu.
* For easier navigation in long scripts, tap the file name at the top to show a list of classes and functions. This is also where you can rename the current file.
* If you enjoy coding in Pythonista, please consider leaving a rating or [review in the App Store][review]. Thank you!
💚
# Tips
* Tap and hold the run (▷) button for some additional options, e.g. to pass arguments (`sys.argv`) to your scripts, or to run the integrated PEP8 style checker.
* Tap the *Edit* button in the "wrench" menu to add your own script shortcuts there. You can use this to launch your favorite scripts more quickly, or to extend the editor's functionality with the `editor` module.
* A lot of keys on Pythonista's extra keyboard row have multiple mappings. For example, you can tap and hold the tab key to get an unindent option.
* Tap with two fingers in the editor to select an entire line of code.
* You can run Pythonista scripts directly within other apps that support the standard iOS share sheet. To get started, open the share sheet in a supported app (e.g. Safari, Notes, Maps...) and select "More..." to add the Pythonista action extension. You can use the `appex` module to access data that was passed to the share sheet (e.g. the current URL in Safari, location data in Maps, etc.).
* If you use Pythonista with an external (Bluetooth) keyboard, you can show a list of available shortcuts by pressing and holding the `Cmd` key.
* Swipe left on a file in the script library to open it in a new tab or move it to the trash.
# What's New in 3.2
For full release notes, and to see what was added in previous releases, please refer to the "What's New in Pythonista" page in the documentation. You can also open the release notes from an empty tab. The following are just the highlights:
* You can now sync your scripts via iCloud Drive, and open Python files from other apps.
* Pythonista now uses Python 3.6. Among other things, this enables the use of f-strings for easier string formatting.
* Files in the script library can be moved via drag'n'drop (requires iOS 11).
* The "Convert Tabs" feature is now called "Reformat Code", and supports applying coding style guidelines, in addition to converting indentation.
* Improved search in the script library and documentation.
* Improved support for the iPhone X.
* The tab bar UI is now the same on iPhone and iPad.
* The Python interpreter is now built with IPv6 support (this should fix connectivity issues users were experiencing with some cellular providers).
# Feedback
I hope you enjoy coding in Pythonista. If you have any feedback, please send an email to <<EMAIL>>, or visit the [community forum][forum] to share code and get help with your programming questions. You can also find me on Twitter:[@olemoritz][twitter].
---
[forum]: https://forum.omz-software.com
[twitter]: http://twitter.com/olemoritz
[review]: itms-apps://itunes.apple.com/app/id1085978097?action=write-review
'''
def main():
text = welcome_md
converted = markdown(text)
html = TEMPLATE.replace('{{CONTENT}}', converted)
webview = ui.WebView(name='Markdown Preview')
webview.load_html(html)
webview.present()
if __name__ == '__main__':
main()
|
11577364
|
import io
from drawille import Canvas
from PIL import Image
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase)
from matplotlib.backends.backend_agg import RendererAgg, FigureCanvasAgg
class RendererDrawille(RendererAgg):
def __init__(self, width, height, dpi):
super(RendererDrawille, self).__init__(width, height, dpi)
self.texts = []
def clear(self):
super(RendererDrawille, self).clear()
self.texts = []
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# super(RendererDrawille, self).draw_text(gc, x, y, s, prop, angle, ismath=ismath, mtext=mtext)
self.texts.append((x,y,s))
def show():
try:
for manager in Gcf.get_all_fig_managers():
canvas = manager.canvas
canvas.draw()
string = canvas.to_txt()
print(string)
# display(HTML("<div style=\"font-size:2px; line-height:90%;\"><tt>" + string + "</tt></div>"))
finally:
#if close and Gcf.get_all_fig_managers():
# matplotlib.pyplot.close('all')
pass
class FigureCanvasDrawille(FigureCanvasAgg):
def get_renderer(self, cleared=False):
l, b, w, h = self.figure.bbox.bounds
key = w, h, self.figure.dpi
reuse_renderer = (hasattr(self, "renderer")
and getattr(self, "_lastKey", None) == key)
if not reuse_renderer:
self.renderer = RendererDrawille(w, h, self.figure.dpi)
self._lastKey = key
elif cleared:
self.renderer.clear()
return self.renderer
def to_txt(self, sep="\n", tw=240, invert=False, threshold=200):
# import pdb; pdb.set_trace()
buf = io.BytesIO()
self.print_png(buf)
buf.seek(0)
i = Image.open(buf)
w, h = i.size
ratio = tw / float(w)
w = tw
h = int(h * ratio)
i = i.resize((w, h), Image.ANTIALIAS)
i = i.convert(mode="L")
can = Canvas()
for y in range(h):
for x in range(w):
pix = i.getpixel((x,y))
if invert:
if pix > threshold:
can.set(x, y)
else:
if pix < threshold:
can.set(x, y)
for x,y,s in self.renderer.texts:
can.set_text(int(x*ratio), int(y*ratio), s)
return can.frame()
FigureCanvas = FigureCanvasDrawille
FigureManager = FigureManagerBase
|
11577421
|
from ecdsa.curves import SECP256k1
import threshold_library as threshold
from unittest import TestCase
class ThresholdTestCase(TestCase):
def setUp(self):
self.message = 55555
self.t = 10
self.n = 25
def test_encryption_decryption(self):
""" Tests that a message can be encypted and decrypted.
"""
(s_key, p_key, s, F) = threshold.generate_threshold_parameters(self.t, self.n)
r_key = threshold.reconstruct_key(s, self.t)
cipher = threshold.encrypt(p_key, self.message)
_message = threshold.decrypt(r_key, cipher)
self.assertEqual(self.message, _message)
def test_secret_shares(self):
""" Tests that each generated when splitting the secret is valid.
"""
(s_key, p_key, s, F) = threshold.generate_threshold_parameters(self.t, self.n)
for i in range(self.n):
self.assertTrue(threshold.verify_secret_share(s[i], i, F))
def test_key_reconstruction(self):
""" Tests that a secret key can be reconstructed.
"""
(s_key, p_key, s, F) = threshold.generate_threshold_parameters(self.t, self.n)
r_key = threshold.reconstruct_key(s, self.t)
self.assertEqual(r_key, s_key)
def test_file_write_read(self):
""" Tests that parameters being generated to file can be
imported again.
"""
a = threshold.save_params_file(self.t, self.n)
b = threshold.load_params_file()
self.assertEqual(a[0], b[0])
def test_custom_secret_shares(self):
(s_k, p_k, s, F) = threshold.load_params_file()
for i in range(len(s)):
self.assertTrue(threshold.verify_secret_share(s[i], i, F))
|
11577425
|
from unittest import TestCase
import six
from sdb import telnet
class TestTelnet(TestCase):
def setUp(self):
self.stdin = six.StringIO()
self.stdout = six.StringIO()
class t(telnet):
sent = six.StringIO()
def _send(self, line):
self.sent.write(line.decode('utf-8'))
self.t = t(6899, self.stdin, self.stdout)
def char(self, char):
pos = self.stdin.tell()
self.stdin.write(char)
self.stdin.seek(pos)
self.t.send()
def test_simple_command(self):
for x in 'list':
self.char(x)
self.char('\n')
assert self.t.sent.getvalue() == 'list\n'
self.t.recv('<list output>'.encode('utf-8'))
assert self.stdout.getvalue() == 'list\n<list output>'
assert self.t.history == ['list']
def test_history(self):
for word in ('list', 'next', 'continue'):
for x in word:
self.char(x)
self.char('\n')
assert self.t.history == ['list', 'next', 'continue']
assert self.t.line_buff == ''
self.char('\x1b[A')
assert self.t.line_buff == 'continue'
self.char('\x1b[A')
assert self.t.line_buff == 'next'
self.char('\x1b[A')
assert self.t.line_buff == 'list'
self.char('\x1b[A')
self.char('\x1b[A')
self.char('\x1b[A')
self.char('\x1b[A')
assert self.t.line_buff == ''
self.char('\x1b[B')
assert self.t.line_buff == 'list'
self.char('\x1b[B')
assert self.t.line_buff == 'next'
self.char('\x1b[B')
assert self.t.line_buff == 'continue'
self.char('\x1b[B')
assert self.t.line_buff == ''
self.char('\x1b[B')
self.char('\x1b[B')
self.char('\x1b[B')
self.char('\x1b[B')
self.char('\x1b[A')
assert self.t.line_buff == 'continue'
def test_backspace(self):
for x in 'list':
self.char(x)
self.char('\x7f')
self.char('\x7f')
self.char('\x7f')
self.char('\n')
assert self.t.sent.getvalue() == 'l\n'
self.t.recv('<list output>'.encode('utf-8'))
def test_single_tab_complete(self):
self.char('l')
self.char('i')
self.char('\t')
assert self.t.sent.getvalue() == 'li<!TAB!>\n'
assert self.t.completing == 'li'
self.t.recv('list'.encode('utf-8'))
assert self.t.line_buff == 'list'
assert self.stdout.getvalue() == 'li\x1b[2K\rlist'
def test_multi_tab_complete(self):
self.char('l')
self.char('i')
self.char('\t')
assert self.t.sent.getvalue() == 'li<!TAB!>\n'
assert self.t.completing == 'li'
self.t.recv('list lit live'.encode('utf-8'))
assert self.t.line_buff == 'list'
|
11577444
|
from django.contrib import admin
from django_use_email_as_username.admin import BaseUserAdmin
from .models import User
admin.site.register(User, BaseUserAdmin)
|
11577477
|
import os
import unittest
import torch
from meddlr.config.config import get_cfg
from meddlr.engine.defaults import init_reproducible_mode
from meddlr.utils import env
class TestDefaultSetup(unittest.TestCase):
"""Test that default setup and initialization works as expected."""
_env = None
@classmethod
def setUpClass(cls):
cls._env = dict(os.environ)
@classmethod
def tearDownClass(cls):
cls._reset_env_vars()
@classmethod
def _reset_env_vars(cls):
os.environ.clear()
os.environ.update(cls._env)
def _reset_var(self, env_var, value, force=False):
if force:
os.environ[env_var] = value
return
if value == "":
os.environ.pop(env_var, None)
else:
os.environ[env_var] = value
def test_init_reproducible_mode(self):
"""Test that we properly initialize reproducibility."""
base_cfg = get_cfg()
base_cfg.defrost()
base_cfg.SEED = -1
base_cfg.DATALOADER.SUBSAMPLE_TRAIN.SEED = -1
base_cfg.freeze()
os.environ["MEDDLR_REPRO"] = ""
cfg = base_cfg.clone()
init_reproducible_mode(cfg, eval_only=False)
assert cfg.SEED > 0
assert cfg.DATALOADER.SUBSAMPLE_TRAIN.SEED > 0
assert torch.backends.cudnn.deterministic
assert not torch.backends.cudnn.benchmark
assert env.is_repro()
self._reset_env_vars()
cfg = base_cfg.clone()
cfg.defrost()
cfg.SEED = 1000
cfg.freeze()
init_reproducible_mode(cfg, eval_only=False)
assert cfg.SEED == 1000
assert cfg.DATALOADER.SUBSAMPLE_TRAIN.SEED > 0
assert torch.backends.cudnn.deterministic
assert not torch.backends.cudnn.benchmark
self._reset_env_vars()
cfg = base_cfg.clone()
cfg.defrost()
cfg.CUDNN_BENCHMARK = True
cfg.freeze()
init_reproducible_mode(cfg, eval_only=True)
assert torch.backends.cudnn.benchmark
self._reset_env_vars()
cfg = base_cfg.clone()
cfg.defrost()
cfg.CUDNN_BENCHMARK = True
cfg.freeze()
init_reproducible_mode(cfg, eval_only=False)
assert not torch.backends.cudnn.benchmark
self._reset_env_vars()
if __name__ == "__main__":
unittest.main()
|
11577494
|
import numpy as np
import chainer
import chainer.links as L
import argparse
import time
def main():
parser = argparse.ArgumentParser(
description='generate 2d proessing operator output')
parser.add_argument('--with-ideep', action='store_true', help='enable ideep')
parser.add_argument('--input', type=str, help='input file path')
args = parser.parse_args()
with open(args.input, "r") as f:
dims_num = int(f.readline())
shape = tuple(int(d) for d in f.readline().strip().split(" "))
raw_data = [np.float32(d) for d in f.readline().strip().split(" ")]
x = np.array(raw_data).reshape(shape)
chainer.config.train = False
model = L.VGG16Layers()
if args.with_ideep:
chainer.config.use_ideep = "auto"
model.to_intel64()
start = time.process_time()
y = model(x)
end = time.process_time()
print((end - start) * 1000)
if __name__ == "__main__":
main()
|
11577523
|
from eval.cap_eval_utils import calc_pr_ovr_noref
import numpy as np
def compute_map(all_logits, all_labels):
num_classes = all_logits.shape[1]
APs = []
for cid in range(num_classes):
this_logits = all_logits[:, cid]
this_labels = (all_labels == cid).astype('float32')
if np.sum(this_labels) == 0:
print('No positive videos for class {}. Ignoring...'.format(cid))
continue
_, _, _, ap = calc_pr_ovr_noref(this_labels, this_logits)
APs.append(ap)
mAP = np.mean(APs)
return mAP, APs
|
11577527
|
import pandas as pd
import numpy as np
from scipy import sparse
import matplotlib.pylab as plt
import scipy as sp
resume_text = pd.read_csv("data/cleaned_resume.csv", index_col=0)
job_text = pd.read_csv("~/data/full_requisition_data_tokenized.csv").fillna('')
resume_text['Last Recruiting Stage'].value_counts()
resume_text['Latest Recruiting Step'].value_counts()
resume_text.drop_duplicates(subset=['Req ID','Candidate ID'], keep='last', inplace=True)
###
interaction_dict = {'Review': 0
, 'Screen': 1
, 'Interview': 2
, 'Ready for Hire': 3
, 'Offer': 3
, 'Background Check': 3}
resume_text['interaction'] = resume_text['Last Recruiting Stage'].map(interaction_dict)
interaction_matrix = resume_text.pivot(index='Req ID'
, columns='Candidate ID'
, values='interaction')
interaction_matrix.fillna(0)
interaction_sparse = sparse.csr_matrix(interaction_matrix.values)
sparse.save_npz('data/interaction_v1.npz', interaction_sparse)
##### smaller interaction matrix with NRNC dropped ####
resume_subset = resume_text.drop(resume_text[resume_text['Latest Recruiting Step'] == 'Not Reviewed Not Considered'].index)
resume_subset['interaction'] = resume_subset['Last Recruiting Stage'].map(interaction_dict)
interaction_matrix_small = resume_subset.pivot(index='Req ID'
, columns='Candidate ID'
, values='interaction')
interaction_matrix_small.fillna(0)
interaction_sparse_small = sparse.csr_matrix(interaction_matrix_small.values)
sparse.save_npz('data/interaction_small.npz', interaction_sparse_small)
#####
interaction_dict_v2 = {'Not Reviewed Not Considered': 0
, 'Hiring Restrictions': 0
, 'Hiring Policy': 0
, 'Voluntary Withdrew' : 0
, 'Position Cancelled': 1
, 'Selected other more qualified candidate' : 1
, 'Basic Qualifications' : 1
, 'Salary Expectations too high' : 1
, 'Review' : 2
, 'Skills or Abilities' : 2
, 'Phone Screen' : 3
, 'Schedule Interview' : 3
, 'Schedule interview' : 3
, 'No Show (Interview / First Day)' : 3
, 'Second Round Interview' : 4
, 'Final Round Interview' : 4
, 'Completion' : 5
, 'Offer' : 5
, 'Offer Rejected' : 5
, 'Revise Offer' : 5
, 'Background Check' : 5}
resume_text['interaction'] = resume_text['Latest Recruiting Step'].map(interaction_dict_v2)
interaction_matrix = resume_text.pivot(index='Req ID'
, columns='Candidate ID'
, values='interaction')
# interaction_matrix.fillna(0)
interaction_sparse = sparse.csr_matrix(interaction_matrix.values)
sparse.save_npz('data/interaction_v2.npz', interaction_sparse)
#####
resume_text['interaction'] = resume_text['Last Recruiting Stage'].map(interaction_dict)
interaction_matrix = resume_text.pivot(index='Req ID'
, columns='Candidate ID'
, values='interaction')
interaction_sparse = sparse.csr_matrix(interaction_matrix.values)
sparse.save_npz('data/interaction_v3.npz', interaction_sparse)
#####
interaction_dict_v4 = {'Not Reviewed Not Considered': 0
, 'Hired For Another Job': 0
, 'Hiring Restrictions': 0
, 'Hiring Policy': 0
, 'Voluntary Withdrew' : 0
, 'Position Cancelled': 0
, 'Selected other more qualified candidate' : 0
, 'Basic Qualifications' : 0
, 'Salary Expectations too high' : 0
, 'Skills or Abilities' : 0
, 'Review' : 1
, 'Phone Screen' : 2
, 'Schedule Interview' : 3
, 'Schedule interview' : 3
, 'No Show (Interview / First Day)' : 3
, 'Second Round Interview' : 4
, 'Final Round Interview' : 4
, 'Completion' : 5
, 'Offer' : 5
, 'Offer Rejected' : 5
, 'Revise Offer' : 5
, 'Background Check' : 5}
resume_text['interaction'] = resume_text['Latest Recruiting Step'].map(interaction_dict_v4)
resume_text = resume_text.sort_values('Req ID')
interaction_matrix = resume_text.pivot(index='Req ID'
, columns='Candidate ID'
, values='interaction')
interaction_matrix = interaction_matrix.sort_values()
#interaction_sparse = sparse.csr_matrix(interaction_matrix.values)
#interaction_sparse2 = sparse.coo_matrix(interaction_matrix.values)
sparse.save_npz('data/interaction_v4.npz', interaction_sparse)
##### binary
job_text = pd.read_csv("~/data/full_requisition_data_tokenized.csv").fillna('')
job_text.drop('Job Description Clean',axis=1, inplace=True)
resume_text = pd.read_csv("~/data/Candidate Report_tokenized.csv").fillna('')
#drop all rows that do not have a resume
resume_text = resume_text[resume_text['Resume Text'] != '[\'nan\']']
#keep job IDs that(1)had at least one candidate with a resume looked at,
# (2)at least 5 applicants with resumes
jobs_reviewed_atleast_once = ['Review', 'Completion', 'Phone Screen',
'Schedule Interview', 'Offer Rejected',
'Schedule interview',
'No Show (Interview / First Day)', 'Offer',
'Second Round Interview',
'Background Check', 'Revise Offer',
'Final Round Interview']
temp_df = resume_text[resume_text['Latest Recruiting Step'].isin(jobs_reviewed_atleast_once)]
temp_df = temp_df[temp_df['Resume Text'] != '[\'nan\']']
x = temp_df.merge(job_text, how='left',on='Req ID')
x = x['Req ID'].value_counts()
x = x[x >= 5]
jobIDs = x.index
temp_df= resume_text[resume_text['Req ID'].isin(jobIDs)]
#drop duplicates
temp_df.drop_duplicates(subset=['Req ID','Candidate ID'], keep='last', inplace=True)
interaction_dict_binary = {'Not Reviewed Not Considered': 0
, 'Hiring Restrictions': 0
, 'Hiring Policy': 0
, 'Voluntary Withdrew' : 0
, 'Position Cancelled': 0
, 'Skills or Abilities': 0
, 'Selected other more qualified candidate' : 0
, 'Basic Qualifications' : 0
, 'Salary Expectations too high' : 0
, 'Hired For Another Job' : 0
, 'Review' : 1
, 'Phone Screen' : 1
, 'Schedule Interview' : 1
, 'Schedule interview' : 1
, 'No Show (Interview / First Day)' : 1
, 'Second Round Interview' : 1
, 'Final Round Interview' : 1
, 'Completion' : 1
, 'Offer' : 1
, 'Offer Rejected' : 1
, 'Revise Offer' : 1
, 'Background Check' : 1}
temp_df['interaction'] = temp_df['Latest Recruiting Step'].map(interaction_dict_binary)
interaction_matrix = temp_df.pivot(index='Req ID', columns='Candidate ID', values='interaction')
interaction_matrix = interaction_matrix.fillna(0).astype(int)
interaction_sparse = sparse.csr_matrix(interaction_matrix.values)
sparse.save_npz('data/interaction_v_binary.npz', interaction_sparse)
interaction_sparse.data = np.nan_to_num(interaction_sparse.data, nan=0, copy=False)
plt.grid(b=None)
plt.spy(interaction_sparse2, aspect='auto', markersize=0.001)
plt.spy(interaction_sparse, aspect='auto', precision=0.1, markersize=1,marker=',')
plt.spy(interaction_sparse, aspect='auto', precision=0.1, markersize=1,marker='_')
##### updated with client feedback
interaction_dict_v5 = {'Not Reviewed Not Considered': 0
, 'Hired For Another Job': 0
, 'Hiring Restrictions': 0
, 'Hiring Policy': 0
, 'Voluntary Withdrew' : 1
, 'Position Cancelled': 0
, 'Selected other more qualified candidate' : 0
, 'Basic Qualifications' : 0
, 'Salary Expectations too high' : 1
, 'Skills or Abilities' : 2
, 'Review' : 1
, 'Phone Screen' : 2
, 'Schedule Interview' : 3
, 'Schedule interview' : 3
, 'No Show (Interview / First Day)' : 3
, 'Second Round Interview' : 4
, 'Final Round Interview' : 4
, 'Completion' : 5
, 'Offer' : 5
, 'Offer Rejected' : 5
, 'Revise Offer' : 5
, 'Background Check' : 5}
resume_text['interaction'] = resume_text['Last Recruiting Stage'].map(interaction_dict_v5)
interaction_matrix = resume_text.pivot(index='Req ID'
, columns='Candidate ID'
, values='interaction')
interaction_sparse = sparse.csr_matrix(interaction_matrix.values)
sparse.save_npz('data/interaction_v5.npz', interaction_sparse)
plt.grid(b=None)
plt.spy(interaction_sparse, aspect='auto', precision=0.1, markersize=1,marker=',')
|
11577536
|
import os
import pickle
from types import SimpleNamespace
import numpy.testing as npt
import pandas as pd
from pandas._testing import assert_frame_equal
# from fluxpart import flux_partition
from fluxpart import fvs_partition, fpread
TESTDIR = os.path.dirname(os.path.realpath(__file__))
def test_flux_partition():
"""Integrated test of highest level flux_partition function.
In test below, the "desired" results are not exact or even
necessarily correct. They were obtained with an independent (matlab)
code, and there should be reasonable agreement.
"""
wue_data = {
"meas_ht": 7.11,
"canopy_ht": 4.42,
"ppath": "C3",
"ci_mod": "const_ppm",
"diff_ratio": 1 / 0.7,
}
# soln exists for this data without any wavelet filtering
fname = os.path.join(
TESTDIR, "data/TOA5_6843.ts_Above_2012_06_07_1300.dat"
)
matlab_fluxes = SimpleNamespace(
Fcp=-1.02807378762793,
Fcr=0.402683101177712,
Fqe=0.00500869036088203,
Fqt=0.145615860044424,
Fcp_mol=-23.3600042633021,
Fcr_mol=9.14980916104776,
LEe=12.1870591763138,
LEt=354.310004313924,
)
fvsp = fvs_partition(fname, wue_options=wue_data, hfd_format="ec-TOA5")
npt.assert_allclose(fvsp.df["fvsp_solution"]["var_cp"], 18.9272, atol=1)
assert_flux_components(fvsp.df["fluxes"], matlab_fluxes)
# test reading heights and temperature from file
wue_data = {
"heights": os.path.join(TESTDIR, "data/heights.csv"),
"leaf_temper": os.path.join(TESTDIR, "data/leaf_temper.csv"),
"ppath": "C3",
"ci_mod": "const_ppm",
"diff_ratio": 1 / 0.7,
}
fvsp = fvs_partition(fname, wue_options=wue_data, hfd_format="ec-TOA5")
npt.assert_allclose(fvsp.df["fvsp_solution"]["var_cp"], 18.9272, atol=1)
assert_flux_components(fvsp.df["fluxes"], matlab_fluxes)
# soln is obtained after some wavelet filtering
fname = os.path.join(
TESTDIR, "data/TOA5_6843.ts_Above_2012_06_07_1245.dat"
)
wue_data = {
"meas_ht": 7.11,
"canopy_ht": 4.42,
"ppath": "C3",
"ci_mod": "const_ppm",
"diff_ratio": 1 / 0.7,
}
matlab_fluxes = SimpleNamespace(
Fcp=-0.866856083109642,
Fcr=0.353428894620522,
Fqe=0.0124697200158396,
Fqt=0.117438136138301,
Fcp_mol=-23.1074540435422,
Fcr_mol=10.6590633820467,
LEe=35.6007693518818,
LEt=335.283229492226,
)
fvsp = fvs_partition(fname, wue_options=wue_data, hfd_format="ec-TOA5")
npt.assert_allclose(
fvsp.df.iloc[0]["fvsp_solution"]["var_cp"], 15.2944, atol=1
)
assert_flux_components(fvsp.df.iloc[0]["fluxes"], matlab_fluxes)
def test_fpread(tmpdir):
"""Test of writing and reading partitioning results."""
wue_data = {
"meas_ht": 7.11,
"canopy_ht": 4.42,
"ppath": "C3",
"ci_mod": "const_ratio",
}
fname = os.path.join(
TESTDIR, "data/TOA5_6843.ts_Above_2012_06_07_1300.dat"
)
fvsp = fvs_partition(
fname, wue_options=wue_data, hfd_format="ec-TOA5", interval="1min"
)
results_file = os.path.join(tmpdir, "foo.pkl")
fvsp.save(results_file)
readfvsp = fpread(results_file)
assert_frame_equal(fvsp.df, readfvsp.df)
fvsp.save_pickle(results_file)
readfvsp = fpread(results_file)
assert_frame_equal(fvsp.df, readfvsp.df)
fvsp.save_csv(results_file)
readfvsp = fpread(results_file)
#TODO: not testing wave_lvl, it shouldn't be a tuple
assert_frame_equal(
fvsp.df.drop("wave_lvl", axis=1, level=1),
readfvsp.df.drop("wave_lvl", axis=1, level=1),
)
def assert_flux_components(calc, desired):
npt.assert_allclose(calc.Fcp, desired.Fcp, atol=0.5)
npt.assert_allclose(calc.Fcr, desired.Fcr, atol=0.1)
npt.assert_allclose(calc.Fqe, desired.Fqe, atol=0.01)
npt.assert_allclose(calc.Fqt, desired.Fqt, atol=0.05)
npt.assert_allclose(calc.Fcp_mol, desired.Fcp_mol, atol=10)
npt.assert_allclose(calc.Fcr_mol, desired.Fcr_mol, atol=10)
npt.assert_allclose(calc.LEe, desired.LEe, atol=10)
npt.assert_allclose(calc.LEt, desired.LEt, atol=50)
if __name__ == "__main__":
test_flux_partition()
|
11577548
|
import os
import sys
import argparse
import struct
import tensorflow as tf
from tensorflow.core.example import example_pb2
END_TOKENS = frozenset(['.', '!', '?', '...', "'", "`", '"', ")"]) # acceptable ways to end a sentence
def fix_missing_period(line):
"""Adds a period to a line that is missing a period"""
if "@highlight" in line: return line
if line=="": return line
if line[-1] in END_TOKENS: return line
return line + " ."
def get_art_abs(input_string):
lines = input_string.splitlines()
lines = [line.lower() for line in lines]
lines = [fix_missing_period(line) for line in lines]
article_lines = []
highlights = []
next_is_highlight = False
for idx,line in enumerate(lines):
if line == "":
continue # no line
elif line.startswith("@highlight"):
next_is_highlight = True
elif next_is_highlight:
highlights.append(line)
else:
article_lines.append(line)
# To a string
article = ' '.join(article_lines)
return article
def convert_to_bin(input_string, out_file):
with open(out_file, 'wb') as writer:
# start to write .bin file
article = get_art_abs(input_string)
article=tf.compat.as_bytes(article, encoding='utf-8')
# tf.Example write
tf_example = example_pb2.Example()
tf_example.features.feature['article'].bytes_list.value.extend([article])
tf_example_str = tf_example.SerializeToString()
str_len = len(tf_example_str)
writer.write(struct.pack('q', str_len))
writer.write(struct.pack('%ds' % str_len, tf_example_str))
|
11577556
|
import tensorflow as tf
from ..settings import ztypes
def generate_1d_grid(data, num_grid_points, absolute_boundary=0.0, relative_boundary=0.05):
minimum = tf.math.reduce_min(data)
maximum = tf.math.reduce_max(data)
space_width = maximum - minimum
outside_borders = tf.maximum(relative_boundary * space_width, absolute_boundary)
return tf.linspace(minimum - outside_borders, maximum + outside_borders, num=num_grid_points)
def bin_1d(binning_method, data, grid, weights=None):
if binning_method == 'simple':
return bin_1d_simple(data, grid, weights)
elif binning_method == 'linear':
return bin_1d_linear(data, grid, weights)
else:
raise ValueError(f"Binning method '{binning_method}' not supported, only 'simple' or 'linear'.")
def bin_1d_simple(data, grid, weights=None):
if weights is None:
bincount = tf.cast(
tf.histogram_fixed_width(data, [tf.math.reduce_min(grid), tf.math.reduce_max(grid)], tf.size(grid)),
ztypes.float)
else:
bincount = _bin_1d_weighted(data, grid, weights, 'simple')
return bincount
def bin_1d_linear(data, grid, weights=None):
return _bin_1d_weighted(data, grid, weights)
def _bin_1d_weighted(data, grid, weights, method='linear'):
if weights is None:
weights = tf.ones_like(data, ztypes.float)
weights = weights / tf.reduce_sum(weights)
grid_size = tf.size(grid)
grid_min = tf.math.reduce_min(grid)
grid_max = tf.math.reduce_max(grid)
num_intervals = tf.math.subtract(grid_size, tf.constant(1))
dx = tf.math.divide(tf.math.subtract(grid_max, grid_min), tf.cast(num_intervals, ztypes.float))
transformed_data = tf.math.divide(tf.math.subtract(data, grid_min), dx)
# Compute the integral and fractional part of the data
# The integral part is used for lookups, the fractional part is used
# to weight the data
integral = tf.math.floor(transformed_data)
fractional = tf.math.subtract(transformed_data, integral)
if method == 'simple':
fractional = tf.cast(fractional > 0.5, fractional.dtype) * fractional
# Compute the weights for left and right side of the linear binning routine
frac_weights = tf.math.multiply(fractional, weights)
neg_frac_weights = tf.math.subtract(weights, frac_weights)
# tf.math.bincount only works with tf.int32
bincount_left = tf.roll(tf.concat(
tf.math.bincount(tf.cast(integral, tf.int32), weights=frac_weights, minlength=grid_size, maxlength=grid_size),
tf.constant(0)), shift=1, axis=0)
bincount_right = tf.math.bincount(tf.cast(integral, tf.int32), weights=neg_frac_weights, minlength=grid_size,
maxlength=grid_size)
bincount = tf.cast(tf.add(bincount_left, bincount_right), ztypes.float)
return bincount
|
11577566
|
from pypy.rpython.lltypesystem.rffi import CConstant, CExternVariable, INT
from pypy.rpython.lltypesystem import lltype, ll2ctypes
from pypy.translator.tool.cbuild import ExternalCompilationInfo
from pypy.rlib.rarithmetic import intmask
class CConstantErrno(CConstant):
# these accessors are used when calling get_errno() or set_errno()
# on top of CPython
def __getitem__(self, index):
assert index == 0
try:
return ll2ctypes.TLS.errno
except AttributeError:
raise ValueError("no C function call occurred so far, "
"errno is undefined")
def __setitem__(self, index, value):
assert index == 0
ll2ctypes.TLS.errno = value
errno_eci = ExternalCompilationInfo(
includes=['errno.h']
)
_get_errno, set_errno = CExternVariable(INT, 'errno', errno_eci,
CConstantErrno, sandboxsafe=True)
def get_errno():
return intmask(_get_errno())
|
11577609
|
from tests import TestCase
class TestWifidog(TestCase):
def test_login_without_gw_id(self):
response = self.client.get('/wifidog/login/')
self.assertEqual(404, response.status_code)
def test_login_with_invalid_gw_id(self):
response = self.client.get('/wifidog/login/?gw_id=foobar')
self.assertEqual(404, response.status_code)
def test_login_with_valid_gw(self):
response = self.client.get('/wifidog/login/?gw_id=main-gateway1')
self.assertEqual(200, response.status_code)
def test_portal_without_gw_id(self):
response = self.client.get('/wifidog/portal/')
self.assertEqual(404, response.status_code)
def test_portal_with_invalid_gw_id(self):
response = self.client.get('/wifidog/portal/?gw_id=foobar')
self.assertEqual(404, response.status_code)
def test_portal_with_valid_gw(self):
response = self.client.get('/wifidog/portal/?gw_id=main-gateway1')
self.assertEqual(200, response.status_code)
|
11577614
|
import re
import json
import socket
import requests
import threading
from decorators import validate_payload, parse_results
from sseclient import SSEClient
class FirebaseEvents(object):
CHILD_CHANGED = 0
CHILD_ADDED = 2
CHILD_DELETED = 1
@staticmethod
def id(event_name):
ev = None
mapping = {
'child_changed': FirebaseEvents.CHILD_CHANGED,
'child_added': FirebaseEvents.CHILD_ADDED,
'child_deleted': FirebaseEvents.CHILD_DELETED
}
try:
ev = mapping.get(event_name)
finally:
return ev
class ClosableSSEClient(SSEClient):
def __init__(self, *args, **kwargs):
self.should_connect = True
super(ClosableSSEClient, self).__init__(*args, **kwargs)
def _connect(self):
if self.should_connect:
super(ClosableSSEClient, self)._connect()
else:
raise StopIteration()
def close(self):
self.should_connect = False
self.retry = 0
self.resp.raw._fp.fp._sock.shutdown(socket.SHUT_RDWR)
self.resp.raw._fp.fp._sock.close()
class EventSourceClient(threading.Thread):
def __init__(self, url, event_name, callback):
self.url = url
self.event_name = event_name
self.callback = callback
super(EventSourceClient, self).__init__()
def run(self):
try:
self.sse = ClosableSSEClient(self.url)
for msg in self.sse:
event = msg.event
if event is not None and event in ('put', 'patch'):
response = json.loads(msg.data)
if response is not None:
# Default to CHILD_CHANGED event
occurred_event = FirebaseEvents.CHILD_CHANGED
if response['data'] is None:
occurred_event = FirebaseEvents.CHILD_DELETED
# Get the event I'm trying to listen to
ev = FirebaseEvents.id(self.event_name)
if occurred_event == ev or ev == FirebaseEvents.CHILD_CHANGED:
self.callback(event, response)
except socket.error:
pass
class FirebaseReference(object):
def __new__(cls, *args):
if len(args) == 2:
connector = args[0]
if isinstance(connector, Firebase):
if args[1] is None or FirebaseReference.is_valid(args[1]):
return super(FirebaseReference, cls).__new__(cls)
return None
def __init__(self, connector, reference=None):
self.connector = connector
self.current = reference or ''
def child(self, reference):
if not FirebaseReference.is_valid(reference):
raise ValueError("Invalid reference value")
self.current = "{}/{}".format(self.current, reference)
return self
@parse_results
@validate_payload
def push(self, payload):
return requests.post(self.current_url, json=payload)
@parse_results
def get(self):
return requests.get(self.current_url)
@parse_results
@validate_payload
def set(self, payload):
return requests.put(self.current_url, json=payload)
@parse_results
@validate_payload
def update(self, payload):
return requests.patch(self.current_url, json=payload)
@parse_results
def delete(self):
return requests.delete(self.current_url)
@staticmethod
def is_valid(reference):
pattern = re.compile('^[a-zA-Z0-9_-]+(\/[a-zA-Z0-9_-]+)*$')
matches = pattern.match(reference)
if matches:
return True
return False
@property
def current_url(self):
base = self.connector.FIREBASE_URL
return "{}/{}.json".format(base, self.current)
def patch_url(self):
if self.current == '':
return self.current_url
base = self.connector.FIREBASE_URL
return "{}/{}/.json".format(base, self.current)
def on(self, event_name, **kwargs):
url = self.patch_url()
callback = kwargs.get('callback', None)
if event_name is None or callback is None:
raise AttributeError(
'No callback parameter provided'
)
if FirebaseEvents.id(event_name) is None:
raise AttributeError(
'Unsupported event'
)
# Start Event Source Listener on this ref on a new thread
self.client = EventSourceClient(url, event_name, callback)
self.client.start()
return True
def off(self):
try:
# Close Event Source Listener
self.client.sse.close()
self.client.join()
return True
except Exception:
print "Error while trying to end the thread. Try again!"
class Firebase(object):
FIREBASE_URL = None
def __new__(cls, *args):
if len(args) == 1:
if Firebase.is_valid_firebase_url(args[0]):
return super(Firebase, cls).__new__(cls)
return None
def __init__(self, url):
self.FIREBASE_URL = url.strip('/')
@staticmethod
def is_valid_firebase_url(url):
pattern = re.compile(
r'^https://[a-zA-Z0-9_\-]+\.firebaseio(-demo)?\.com/?$'
)
matches = pattern.match(url)
if matches:
return True
return False
def ref(self, reference=None):
ref = FirebaseReference(self, reference)
if ref is None:
raise Exception(
"Something went wrong when trying to create your ref"
)
return ref
|
11577615
|
import discord
import textgenrnn
import kaizen85modules
class Module(kaizen85modules.ModuleHandler.Module):
name = "KaizenAI"
desc = "Пожилой киберсыч нового поколения!"
async def run(self, bot: kaizen85modules.KaizenBot):
class AICommand(kaizen85modules.ModuleHandler.Command):
title = ""
file_name = ""
color = 0
model: textgenrnn.textgenrnn
model_loaded = True
name = ""
desc = "ИИ %s"
args = "[prefix=<prefix>]"
def __init__(self, title, file_name, command_name, color):
self.title = title
self.file_name = file_name
self.name = command_name
self.color = color
self.desc = self.desc % title
try:
self.model = textgenrnn.textgenrnn(weights_path="%s_weights.hdf5" % file_name,
vocab_path="%s_vocab.json" % file_name,
config_path="%s_config.json" % file_name)
except FileNotFoundError:
bot.logger.log("Could not find model files for AI %s. The command will not work." % title,
bot.logger.PrintColors.FAIL)
self.model_loaded = False
else:
bot.logger.log("Initialized AI with name %s." % self.title, bot.logger.PrintColors.OKBLUE)
async def run(self, message: discord.Message, args, keys):
if not self.model_loaded:
await bot.send_error_embed(message.channel, "Модель данного ИИ не загружена.")
return True
prefix = None
if len(args) > 0 and args[0].startswith("prefix="):
prefix = " ".join(args)[7:]
embed = bot.get_special_embed(self.color, "ИИ %s" % self.title)
embed.description = \
self.model.generate(temperature=bot.module_handler.params["aiTemp"], return_as_list=True,
prefix=prefix)[0]
await message.channel.send(embed=embed)
return True
bot.module_handler.add_param("aiTemp", 0.9)
models = [AICommand("Kaizen", "kaizen", "kz", 0x64E3E1), AICommand("Icarus", "icarus", "ic", 0xFFD700)]
for model in models:
bot.module_handler.add_command(model, self)
|
11577637
|
from qcircuits.state import qubit, zeros, ones, bitstring
from qcircuits.state import positive_superposition, bell_state
from qcircuits.state import State
from qcircuits.operators import Identity, PauliX, PauliY, PauliZ
from qcircuits.operators import Hadamard, Phase, PiBy8, SqrtNot
from qcircuits.operators import Rotation, RotationX, RotationY, RotationZ
from qcircuits.operators import CNOT, Toffoli, Swap, SqrtSwap
from qcircuits.operators import ControlledU, U_f
from qcircuits.operators import Operator
from qcircuits.density_operator import DensityOperator
__version__ = '0.6.0'
|
11577642
|
from setuptools import setup
def readme():
with open("README.md") as f:
return f.read()
setup(
name="pyzorsocket",
version="0.1",
license="MIT",
author="<NAME>",
author_email="<EMAIL>",
url = "https://github.com/cgt/rspamd-plugins/tree/master/pyzor/pyzorsocket",
description="Expose pyzor on a socket",
long_description=readme(),
py_modules=["pyzorsocket"],
entry_points={
"console_scripts": [
"pyzorsocket=pyzorsocket:main",
],
},
install_requires=[
"pyzor>=1.0.0",
],
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3 :: Only",
],
keywords="pyzor spam",
)
|
11577650
|
from django.conf import settings
from django.urls import path
from . import views
app_name = "beanstalk_worker"
urlpatterns = [path("task/", views.task, name="task"), path("cron/", views.cron, name="cron")]
if settings.DEBUG:
urlpatterns.append(path("run_all/", views.run_all, name="run_all"))
|
11577673
|
import unittest
from os.path import join
from praatio import audio
from praatio.utilities import utils
from test.praatio_test_case import PraatioTestCase
class AudioTests(PraatioTestCase):
def test_get_audio_duration(self):
"""Tests that the two audio duration methods output the same value."""
wavFN = join(self.dataRoot, "bobby.wav")
durationA = utils.getWavDuration(wavFN)
durationB = audio.getDuration(wavFN)
self.assertTrue(durationA == durationB)
if __name__ == "__main__":
unittest.main()
|
11577693
|
import requests
from allauth.socialaccount.adapter import get_adapter
from allauth.socialaccount.models import SocialAccount, SocialLogin
from allauth.socialaccount.providers.oauth2.views import OAuth2Adapter, OAuth2LoginView, OAuth2CallbackView
from django.conf import settings
from .provider import ChaHubProvider
BASE_URL = settings.SOCIAL_AUTH_CHAHUB_BASE_URL
class ChaHubOAuth2Adapter(OAuth2Adapter):
provider_id = ChaHubProvider.id
api_url = '{}/api/v1/'.format(BASE_URL)
authorize_url = '{}/oauth/authorize/'.format(BASE_URL)
access_token_url = '{}/oauth/token/'.format(BASE_URL)
identity_url = "{}my_profile/".format(api_url)
supports_state = True
def complete_login(self, request, app, token, **kwargs):
extra_data = self.get_data(token.token)
uid = str(extra_data['id'])
user = get_adapter().populate_new_user(
email=extra_data.get('email'),
username=extra_data.get('login'),
name=extra_data.get('name')
)
account = SocialAccount(
user=user,
uid=uid,
extra_data=extra_data,
provider=self.provider_id
)
return SocialLogin(account)
def get_data(self, token):
data = requests.get(self.identity_url, headers={'Authorization': 'Bearer {}'.format(token)})
data = data.json()
return {
'username': data.get('username'),
'email': data.get('email'),
'name': data.get('name', ''),
'id': data.get('id'),
'github_info': data.get('github_info', {})
}
oauth2_login = OAuth2LoginView.adapter_view(ChaHubOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(ChaHubOAuth2Adapter)
|
11577704
|
import asyncio
from datetime import datetime
from decimal import Decimal
import logging
import os
from hexbytes import HexBytes
import pytz
from tenacity import retry, stop_after_attempt, wait_fixed, wait_random
from web3 import Web3
from web3.exceptions import BlockNotFound
import beneath
LATEST_COUNT = 24
STABLE_AFTER = 12
POLL_SECONDS = 1
WEB3_PROVIDER_URL = os.getenv("WEB3_PROVIDER_URL", default=None)
w3 = Web3(Web3.HTTPProvider(WEB3_PROVIDER_URL))
SCHEMA = """
type Block @schema {
" Block number "
number: Int! @key
" Block timestamp "
timestamp: Timestamp!
" Block hash "
hash: Bytes32!
" Hash of parent block "
parent_hash: Bytes32!
" Address of block miner "
miner: Bytes20!
" Size of block in bytes "
size: Int!
" Number of transactions in block "
transactions: Int!
" Block difficulty "
difficulty: Numeric!
" Total difficulty of the chain until this block "
total_difficulty: Numeric!
" Limit on the amount of gas that can be consumed in a single block at the time of this block "
gas_limit: Int!
" Total amount of gas consumed by transactions in this block "
gas_used: Int!
" Extra data embedded in the block by its miner "
extra_data: Bytes!
" Extra data parsed as a UTF-8 encoded string, if possible "
extra_data_text: String
" Proof-of-work for this block "
nonce: Bytes!
" Root value of the receipts trie at this block "
receipts_root: Bytes32!
" Root value of the state trie at this block "
state_root: Bytes32!
" Root value of the transactions trie at this block "
transactions_root: Bytes32!
" Bloom filter of logs emitted in this block "
logs_bloom: Bytes256!
" SHA3 hash of the uncles in the block "
sha3_uncles: Bytes32!
}
"""
retry_logger = logging.getLogger("tenacity")
def log_retry(retry_state):
retry_logger.info(
"get_block_retry attempt=%d outcome=%s",
retry_state.attempt_number,
retry_state.outcome,
)
def safe_to_utf8(val):
try:
return val.decode("utf-8")
except UnicodeDecodeError:
return None
@retry(
before_sleep=log_retry,
reraise=True,
stop=stop_after_attempt(5),
wait=wait_fixed(2) + wait_random(0, 2),
)
def get_block(num):
# pylint: disable=no-member
try:
block = w3.eth.getBlock(num)
except BlockNotFound:
return None
return {
"number": block["number"],
"timestamp": datetime.utcfromtimestamp(block["timestamp"]).replace(
tzinfo=pytz.utc
),
"hash": bytes(block["hash"]),
"parent_hash": bytes(block["parentHash"]),
"miner": bytes(HexBytes(block["miner"])),
"size": block["size"],
"transactions": len(block["transactions"]),
"difficulty": Decimal(block["difficulty"]),
"total_difficulty": Decimal(block["totalDifficulty"]),
"gas_limit": block["gasLimit"],
"gas_used": block["gasUsed"],
"extra_data": bytes(block["extraData"]),
"extra_data_text": safe_to_utf8(bytes(block["extraData"])),
"nonce": bytes(block["nonce"]),
"receipts_root": bytes(block["receiptsRoot"]),
"state_root": bytes(block["stateRoot"]),
"transactions_root": bytes(block["transactionsRoot"]),
"logs_bloom": bytes(block["logsBloom"]),
"sha3_uncles": bytes(block["sha3Uncles"]),
}
async def generate_blocks(p: beneath.Pipeline):
checkpoint = await p.checkpoints.get(
"checkpoint",
default={
"next_stable": 0,
"next_unstable": 0,
"latest_hashes": [],
},
)
cached_blocks = []
while True:
# get next block
unstable_block = get_block(checkpoint["next_unstable"])
if not unstable_block:
await asyncio.sleep(POLL_SECONDS)
continue
# reprocess previous block if parent hash doesn't match
if (len(checkpoint["latest_hashes"]) > 0) and (
unstable_block["parent_hash"] != checkpoint["latest_hashes"][-1]
):
checkpoint["latest_hashes"].pop()
p.logger.info("fork next_number=%d", checkpoint["next_unstable"])
checkpoint["next_unstable"] -= 1
if len(cached_blocks) > 0:
cached_blocks.pop()
if checkpoint["next_unstable"] < checkpoint["next_stable"]:
checkpoint["next_stable"] = checkpoint["next_unstable"]
p.logger.info(
"fork_before_stable next_number=%d", checkpoint["next_unstable"]
)
continue
# process unstable_block
yield ("unstable", unstable_block)
p.logger.info(
"write_unstable number=%d hash=%s",
unstable_block["number"],
unstable_block["hash"].hex(),
)
checkpoint["next_unstable"] += 1
cached_blocks.append(unstable_block)
# track in latest hashes (and keep it trimmed)
checkpoint["latest_hashes"].append(unstable_block["hash"])
if len(checkpoint["latest_hashes"]) >= LATEST_COUNT:
checkpoint["latest_hashes"] = checkpoint["latest_hashes"][1:]
# get and write stable if necessary
while (checkpoint["next_stable"] + STABLE_AFTER) < checkpoint["next_unstable"]:
# get stable block from cached_blocks or using get_block
stable_block = None
if (len(cached_blocks) > 0) and cached_blocks[0]["number"] == checkpoint[
"next_stable"
]:
stable_block = cached_blocks[0]
cached_blocks = cached_blocks[1:]
p.logger.info(
"get_stable_cache_hit number=%d", checkpoint["next_stable"]
)
else:
p.logger.info(
"get_stable_cache_miss number=%d", checkpoint["next_stable"]
)
stable_block = get_block(checkpoint["next_stable"])
if stable_block is None:
raise Exception(
"get_block should not return a null value for a stable block"
)
# write stable block
yield ("stable", stable_block)
p.logger.info(
"write_stable number=%d hash=%s",
stable_block["number"],
stable_block["hash"].hex(),
)
checkpoint["next_stable"] += 1
# write updated checkpoint
await p.checkpoints.set("checkpoint", checkpoint)
async def filter_stable(record):
(key, block) = record
if key == "stable":
return block
async def filter_unstable(record):
(key, block) = record
if key == "unstable":
return block
if __name__ == "__main__":
p = beneath.Pipeline(parse_args=True)
blocks = p.generate(generate_blocks)
stable = p.apply(blocks, filter_stable)
unstable = p.apply(blocks, filter_unstable)
p.write_table(
unstable,
"blocks-unstable",
schema=SCHEMA,
description=(
"Blocks loaded in real-time from the Ethereum mainnet. "
"Blocks are loaded without delay, so forks will frequently occur in this table."
),
)
p.write_table(
stable,
"blocks-stable",
schema=SCHEMA,
description=(
"Blocks loaded in real-time from the Ethereum mainnet. "
"Blocks are loaded with a 12-block delay to minimize the chance of forks."
),
)
p.main()
|
11577716
|
import datetime
import pytz
import flask
import flask.json
from flaskext.csrf import csrf_exempt
import sqlalchemy
import common.time
from common import utils
from common.config import config
from www import server
from www import login
MILESTONES = [
("Multi-Gift Subscriptions", config["timezone"].localize(datetime.datetime(2018, 8, 9, 9, 0))),
("Gift Subscriptions", config["timezone"].localize(datetime.datetime(2017, 11, 15, 9, 0))),
("Twitch Prime", config["timezone"].localize(datetime.datetime(2016, 9, 30, 12, 0))),
("LoadingReadyLive premiere", config["timezone"].localize(datetime.datetime(2016, 5, 14, 17, 0))),
("Pre-PreRelease premiere", config["timezone"].localize(datetime.datetime(2016, 3, 26, 12, 0))),
("YRR of LRR finale", config["timezone"].localize(datetime.datetime(2014, 12, 29, 18, 30))),
("YRR of LRR launch", config["timezone"].localize(datetime.datetime(2014, 1, 7))),
("Twitch partnership", config["timezone"].localize(datetime.datetime(2013, 8, 31, 10, 0))),
("First Twitch stream", config["timezone"].localize(datetime.datetime(2012, 1, 14, 21, 0))),
("LoadingReadyRun launch", config["timezone"].localize(datetime.datetime(2003, 10, 13))),
]
def get_events():
events = server.db.metadata.tables['events']
recent_events = []
query = sqlalchemy.select([events.c.id, events.c.event, events.c.data, events.c.time, sqlalchemy.func.current_timestamp() - events.c.time]) \
.where(events.c.time > sqlalchemy.func.current_timestamp() - datetime.timedelta(days=2)) \
.where(events.c.event.in_({'twitch-subscription', 'twitch-resubscription', 'twitch-subscription-mysterygift', 'twitch-message', 'twitch-cheer', 'patreon-pledge'})) \
.order_by(events.c.time.desc())
with server.db.engine.begin() as conn:
for id, event, data, time, duration in conn.execute(query):
if not data.get('ismulti'):
data['time'] = time
recent_events.append({
'id': id,
'event': event,
'data': data,
'duration': common.time.nice_duration(duration, 2)
})
last_event_id = conn.execute(sqlalchemy.select([sqlalchemy.func.max(events.c.id)])).first()
last_event_id = last_event_id[0] if last_event_id is not None else 0
return last_event_id, recent_events
def get_milestones():
now = datetime.datetime.now(config['timezone'])
for name, dt in MILESTONES:
months = (now.year - dt.year) * 12 + now.month - dt.month
if (now.day, now.hour, now.minute, now.second) > (dt.day, dt.hour, dt.minute, dt.second):
months += 1
yield name, dt.strftime("%Y-%m-%d"), months
@server.app.route('/notifications')
@login.with_session
def notifications(session):
last_event_id, events = get_events()
patreon_users = server.db.metadata.tables['patreon_users']
users = server.db.metadata.tables['users']
with server.db.engine.begin() as conn:
name = conn.execute(sqlalchemy.select([patreon_users.c.full_name])
.select_from(users.join(patreon_users))
.where(users.c.name == config['channel'])
).first()
if name:
name = name[0]
return flask.render_template('notifications.html', events=events, last_event_id=last_event_id, session=session, patreon_creator_name=name, milestones=get_milestones())
# Compatibility shim
@server.app.route('/notifications/events')
def events():
return flask.redirect(flask.url_for("api_v2.events", **flask.request.args), 301)
|
11577756
|
import os
import re
import logging
from django.core.management.base import BaseCommand
from django.core import exceptions
from search.read_csv_data_from_file import read_csv_data_from_file
from search.models import TaskServiceSimilarityScore
from human_services.services.models import Service
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
help = ('Given a path to a directory, this script reads all CSV files from that '
'directory as manual recommendations of services for topics. Format: The '
'filenames must match corresponding topic ids, the content of the files are '
'CSV files with a column headed "service_id" and another column headed '
'"Include/Exclude". Values from these columns are used to create recommended '
'service records for the given topic. All such records will have similarity '
'scores of 1.0. All other columns are ignored. If the "Include/Exclude" column '
'contains "Exclude", then records are not created, instead any existing '
'recommendation record for the given topic and service is removed, so that '
'the given service will not be recommended for the given topic.')
def add_arguments(self, parser):
parser.add_argument('path',
metavar='path',
help='path to folder containing per-topic files with recommendations')
parser.add_argument('--reset_recommendations', action='store_true',
help='Remove all existing recommendations from database before importing')
parser.add_argument('--region', metavar='region', help='Add regional postfix to service and topic primary keys')
def handle(self, *args, **options):
path = options['path']
reset_recommendations = options['reset_recommendations']
region = options['region']
if reset_recommendations:
reset_all_existing_recommendations()
csv_filenames = get_all_csv_filenames_from_folder(path)
for filename in csv_filenames:
try:
handle_recommendation_file(filename, region)
except exceptions.ValidationError as error:
self.print_error(filename, error)
except ValueError as error:
self.print_error(filename, error)
def print_error(self, filename, error):
error = '{filename}: {error_message}'.format(
filename=filename, error_message=error.__str__())
self.stdout.write(self.style.ERROR(error))
def reset_all_existing_recommendations():
TaskServiceSimilarityScore.objects.all().delete()
def get_all_csv_filenames_from_folder(path):
result = []
directory = os.fsencode(path)
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".csv"):
result.append(path + filename)
return result
def handle_recommendation_file(filename, region):
topic_id = get_topic_id_from_filename(filename)
csv_data = read_csv_data_from_file(filename)
change_records = parse_csv_data(topic_id, region, csv_data)
save_changes_to_database(change_records)
def parse_csv_data(topic_id, region, csv_data):
header = csv_data[0]
rows = csv_data[1:]
valid_rows = remove_row_count_line(rows)
service_id_index = get_index_for_header(header, 'service_id')
exclude_index = get_index_for_header(header, 'Include/Exclude')
return build_change_records(topic_id, service_id_index, exclude_index, region, valid_rows)
def get_topic_id_from_filename(path):
filename = os.path.basename(path)
return filename.split('.')[0]
def get_index_for_header(header_row, expected_header):
return header_row.index(expected_header)
def build_change_records(topic_id, service_id_index, exclude_index, region, rows):
def add_region(region, the_id):
if region:
return f'{the_id}_{region}'
return the_id
def make_record(line): return {
'topic_id': add_region(region, topic_id),
'service_id': add_region(region, line[service_id_index]),
'exclude': line[exclude_index],
}
return list(map(make_record, rows))
def remove_row_count_line(rows):
invalid_line_pattern = "\\(\\d+ rows\\)"
def is_valid(row): return not re.match(invalid_line_pattern, str(row[0]))
return filter(is_valid, rows)
def save_changes_to_database(change_records):
valid_records = validate_records(change_records)
for record in filter_excluded_records(valid_records):
remove_record(record)
for record in filter_included_records(valid_records):
if validate_service_id(record):
save_record(record)
def validate_records(change_records):
for record in change_records:
exclude = record['exclude']
if exclude != 'Exclude' and exclude != 'Include':
raise exceptions.ValidationError(exclude + ': Invalid value in the Include/Exclude column')
return change_records
def validate_service_id(record):
try:
Service.objects.get(id=record['service_id'])
except:
LOGGER.warning('%s: Invalid service id', record['service_id'])
return False
return True
def filter_included_records(change_records):
return filter(lambda record: record['exclude'] != 'Exclude', change_records)
def filter_excluded_records(change_records):
return filter(lambda record: record['exclude'] == 'Exclude', change_records)
def remove_record(record):
(TaskServiceSimilarityScore.objects.
filter(task_id__exact=record['topic_id']).
filter(service_id__exact=record['service_id']).
delete())
def save_record(record):
manual_similarity_score = 1.0
TaskServiceSimilarityScore.objects.update_or_create(
task_id=record['topic_id'],
service_id=record['service_id'],
defaults={
'similarity_score': manual_similarity_score
}
)
|
11577757
|
ALERT_RESP = {
"primary_id": 3232,
"alert_type": {
"id": 1793,
"created_at": "2019-05-25T19:40:09.132456Z",
"updated_at": "2019-08-12T18:40:12.132456Z",
"type_id": "8916-1b5d68c0519f",
"category": "Host",
"detail_fields": [
"username"
],
"is_default": False,
"is_internal": True,
"name": "HX",
"summary_fields": [
"malwaretype",
"virus"
],
"source": [
"agenthostname",
"agentip"
],
"destination": [],
"created_by": "id",
"updated_by": "id"
},
"assigned_to": None,
"context": None,
"created_by": {
"id": "id",
"avatar": "avatar",
"name": "System User",
"username": "system_user",
"primary_email": "<EMAIL>"
},
"events_count": 2,
"notes_count": 0,
"queues": [
"Default Queue"
],
"source_url": "https://url",
"updated_by": {
"id": "id",
"avatar": "avatar",
"name": "George",
"username": "<EMAIL>",
"primary_email": "<EMAIL>"
},
"organization": "demisto",
"created_at": "2019-03-30T19:40:16.132456Z",
"updated_at": "2019-10-20T12:35:02.132456Z",
"id": 123,
"alert_threat": "Unknown",
"alert_type_details": {
"source": "siem",
"detail": {
"username": "demon",
"processpath": "c:\\windows\\microsoft.net\\framework\\v7.0.30319\\csc.exe",
"confidence": "high",
"sha1": "sha1",
"agenthostname": "siem",
"pid": 11,
"objecttype": "file",
"hostname": "helix.apps.fireeye.com",
"bytes": 35,
"meta_deviceid": "deviceID",
"agentip": "192.168.0.1",
"virus": "gen:variant.ursu",
"result": "quarantined",
"malwaretype": "malware",
"createdtime": "2019-03-30T14:07:53.667Z",
"lastmodifiedtime": "2019-03-31T14:07:53.778Z",
"filename": "c:\\users\\demon\\appdata\\local\\temp",
"accountdomain": "siem",
"method": "oas",
"lastaccessedtime": "2019-03-30T14:07:53.217Z",
"md5": "md5"
},
"summary": {
"virus": "gen:variant.ursu",
"malwaretype": "malware"
}
},
"assigned_at": None,
"classification": 30,
"closed_reason": "",
"closed_state": "Unknown",
"confidence": "High",
"description": "FireEye HX detected and quarantined malware on this system.",
"distinguisher_key": "quarantined",
"distinguishers": {
"virus": "gen:variant.ursu",
"agentid": "4fkds",
"result": "quarantined",
"malwaretype": "malware"
},
"emailed_at": 7371,
"events_threshold": 1,
"external_id": "",
"first_event_at": "2019-03-30T14:07:34.132456ZZ",
"last_event_at": "2019-03-31T14:08:07.132456ZZ",
"external_ips": [],
"external_ips_count": 0,
"info_links": [],
"internal_ips": [],
"internal_ips_count": 0,
"is_suppressed": False,
"is_threat": False,
"is_tuned": False,
"kill_chain": [
"5 - Installation"
],
"last_sync_ms": 15535426,
"message": "FIREEYE H",
"metaclasses": {
"ids,antivirus": 2
},
"mongo_id": "5c99",
"origin_id": "map_rule",
"products": {
"hx": 2
},
"risk": "Medium",
"risk_order": 2,
"search": "class=fireeye_hx_alert eventlog=mal result=quarantined NOT srcipv4:$exclusions.global.srcipv4",
"seconds_threshold": 60,
"severity": "Medium",
"source_revision": 0,
"state": "Open",
"tags": [
"fireeye"
],
"threat_changed_at": None,
"threat_type": 50,
"trigger_id": "2615",
"trigger_revision": 0,
"tuning_search": "",
"type": "fireeye_rule"
}
ALERTS_RESP = {
"meta": {
"count": 115,
"previous": None,
"limit": 2,
"offset": 0,
"next": ""
},
"results": [
{
"primary_id": 3232,
"alert_type": {
"id": 1793,
"created_at": "2019-05-25T19:40:09.132456Z",
"updated_at": "2019-08-12T18:40:12.132456Z",
"type_id": "8916-1b5d68c0519f",
"category": "Host",
"detail_fields": [
"username"
],
"is_default": False,
"is_internal": True,
"name": "HX",
"summary_fields": [
"malwaretype",
"virus"
],
"source": [
"agenthostname",
"agentip"
],
"destination": [],
"created_by": "id",
"updated_by": "id"
},
"assigned_to": None,
"context": None,
"created_by": {
"id": "id",
"avatar": "avatar",
"name": "System User",
"username": "system_user",
"primary_email": "<EMAIL>"
},
"events_count": 2,
"notes_count": 0,
"queues": [
"Default Queue"
],
"source_url": "https://url",
"updated_by": {
"id": "id",
"avatar": "avatar",
"name": "George",
"username": "<EMAIL>",
"primary_email": "<EMAIL>"
},
"organization": "demisto",
"created_at": "2019-03-30T19:40:16.132456Z",
"updated_at": "2019-10-20T12:35:02.132456Z",
"id": 123,
"alert_threat": "Unknown",
"alert_type_details": {
"source": "siem",
"detail": {
"username": "demon",
"processpath": "c:\\windows\\microsoft.net\\framework\\v7.0.30319\\csc.exe",
"confidence": "high",
"sha1": "sha1",
"agenthostname": "siem",
"pid": 11,
"objecttype": "file",
"hostname": "helix.apps.fireeye.com",
"bytes": 35,
"meta_deviceid": "deviceID",
"agentip": "192.168.0.1",
"virus": "gen:variant.ursu",
"result": "quarantined",
"malwaretype": "malware",
"createdtime": "2019-03-30T14:07:53.667Z",
"lastmodifiedtime": "2019-03-31T14:07:53.778Z",
"filename": "c:\\users\\demon\\appdata\\local\\temp",
"accountdomain": "siem",
"method": "oas",
"lastaccessedtime": "2019-03-30T14:07:53.217Z",
"md5": "md5"
},
"summary": {
"virus": "gen:variant.ursu",
"malwaretype": "malware"
}
},
"assigned_at": None,
"classification": 30,
"closed_reason": "",
"closed_state": "Unknown",
"confidence": "High",
"description": "FireEye HX detected and quarantined malware on this system.",
"distinguisher_key": "quarantined",
"distinguishers": {
"virus": "gen:variant.ursu",
"agentid": "4fkds",
"result": "quarantined",
"malwaretype": "malware"
},
"emailed_at": 7371,
"events_threshold": 1,
"external_id": "",
"first_event_at": "2019-03-30T14:07:34.132456ZZ",
"last_event_at": "2019-03-31T14:08:07.132456ZZ",
"external_ips": [],
"external_ips_count": 0,
"info_links": [],
"internal_ips": [],
"internal_ips_count": 0,
"is_suppressed": False,
"is_threat": False,
"is_tuned": False,
"kill_chain": [
"5 - Installation"
],
"last_sync_ms": 15535426,
"message": "FIREEYE H",
"metaclasses": {
"ids,antivirus": 2
},
"mongo_id": "5c99",
"origin_id": "map_rule",
"products": {
"hx": 2
},
"risk": "Medium",
"risk_order": 2,
"search": "class=fireeye_hx_alert eventlog=mal result=quarantined NOT srcipv4:$exclusions.global.srcipv4",
"seconds_threshold": 60,
"severity": "Medium",
"source_revision": 0,
"state": "Open",
"tags": [
"fireeye"
],
"threat_changed_at": None,
"threat_type": 50,
"trigger_id": "2615",
"trigger_revision": 0,
"tuning_search": "",
"type": "fireeye_rule"
},
{
"primary_id": 23,
"alert_type": {
"id": 18,
"created_at": "2019-03-25T10:40:09.132456Z",
"updated_at": "2019-09-10T18:40:13.132456Z",
"type_id": "03e1099a-38d8",
"category": "Host",
"detail_fields": [
"eventtime"
],
"is_default": False,
"is_internal": True,
"name": "HX",
"summary_fields": [
"result",
"iocnames"
],
"source": [
"agenthostname",
"agentip"
],
"destination": [],
"created_by": "ab",
"updated_by": "ab"
},
"assigned_to": None,
"context": None,
"created_by": {
"id": "ab",
"avatar": "avatar",
"name": "System User",
"username": "system_user",
"primary_email": "<EMAIL>"
},
"events_count": 2,
"notes_count": 0,
"queues": [
"Default Queue"
],
"source_url": "https://source_url.com",
"updated_by": {
"id": "e7",
"avatar": "avatar",
"name": "George",
"username": "<EMAIL>",
"primary_email": "<EMAIL>"
},
"organization": "",
"created_at": "2019-03-30T19:40:17.132456Z",
"updated_at": "2019-10-23T20:35:02.132456Z",
"id": 32,
"alert_threat": "Unknown",
"alert_type_details": {
"source": "siem",
"detail": {
"username": "system",
"processpath": "c:\\windows\\system32\\cmd.exe",
"eventtime": "2019-03-30T14:11:31.000Z",
"hostname": "helix.apps.fireeye.com",
"iocnames": "cobalt strike",
"process": "cmd.exe",
"args": "cmd.exe /c echo zhfrlb",
"pid": 99,
"agentip": "192.168.0.1",
"meta_deviceid": "86",
"result": "alert",
"starttime": "2019-03-30T14:11:20.002Z",
"pprocess": "services.exe",
"ppid": 66,
"agenthostname": "siem",
"md5": "md5"
},
"summary": {
"result": "alert",
"iocnames": "cobalt strike"
}
},
"assigned_at": None,
"classification": 2,
"closed_reason": "",
"closed_state": "Unknown",
"confidence": "High",
"description": "This rule alerts on IOC.",
"distinguisher_key": "cobalt strike",
"distinguishers": {
"agentid": "fw",
"iocnames": "cobalt strike"
},
"emailed_at": 737100,
"events_threshold": 1,
"external_id": "",
"first_event_at": "2019-03-25T14:09:45.132456Z",
"last_event_at": "2019-03-25T14:11:31.132456Z",
"external_ips": [],
"external_ips_count": 0,
"info_links": [],
"internal_ips": [],
"internal_ips_count": 0,
"is_suppressed": False,
"is_threat": False,
"is_tuned": False,
"kill_chain": [
"5 - Installation"
],
"last_sync_ms": 1553542006849,
"message": "FIREEYE HX [IOC Process Event]",
"metaclasses": {
"ids": 2
},
"mongo_id": "5c",
"origin_id": "map_rule",
"products": {
"hx": 2
},
"risk": "Medium",
"risk_order": 2,
"search": "class=fireeye_hx_alert eventlog=ioc eventtype=processevent NOT srcipv4:$exclusions.global.srcipv4",
"seconds_threshold": 60,
"severity": "Medium",
"source_revision": 0,
"state": "Open",
"tags": [
"fireeye",
"helixhxrule"
],
"threat_changed_at": None,
"threat_type": 50,
"trigger_id": "42399",
"trigger_revision": 0,
"tuning_search": "",
"type": "fireeye_rule"
}
]
}
CASES_BY_ALERT_RESP = {
"meta": {
"count": 1,
"previous": None,
"limit": 30,
"offset": 0,
"next": None
},
"results": [
{
"assigned_to": None,
"created_at": "created_at",
"created_by": {
"id": "id",
"avatar": "avatar",
"name": "name",
"username": "username",
"primary_email": "primary_email"
},
"description": "",
"events_count": 10,
"id": 35,
"info_links": [],
"name": "demisto test case",
"notes_count": 0,
"priority": "Critical",
"priority_order": 4,
"severity": 10,
"state": "Testing",
"status": "Declared",
"tags": [],
"total_days_unresolved": "16 23:52:09.819390",
"updated_at": "updated_at",
"updated_by": {
"id": "id",
"avatar": "avatar",
"name": "name",
"username": "username",
"primary_email": "primary_email"
}
}
]
}
ENDPOINTS_BY_ALERT_RESP = {
"meta": {
"count": 1,
"previous": None,
"limit": 30,
"offset": 0,
"next": None
},
"results": {
"status": "completed",
"endpoints": [
{
"id": 191,
"customer_id": "demisto",
"agent_id": "agent_id",
"containment_queued": False,
"containment_state": "normal",
"created_at": "created_at",
"device_id": "device_id",
"domain": "WORKGROUP",
"hostname": "Demisto",
"mac_address": "mac_address",
"operating_system": "Windows 10 Pro",
"primary_ip_address": "primary_ip_address",
"updated_at": "updated_at",
"timezone": "timezone",
"hash": "hash",
"source_url": "source_url"
}
]
}
}
EVENTS_BY_ALERT_RESP = {
"meta": {
"count": 10,
"previous": None,
"limit": 1,
"offset": 0,
"next": ""
},
"results": [
{
"username": "admin",
"_eventid": "",
"process": "net1",
"agenturi": "/hx/api/v3/hosts/f9zsksax",
"pid": 404,
"matched_at": "2019-08-11t06:51:40.000z",
"pprocesspath": "c:\\windows\\system32\\net1",
"result": "alert",
"meta_ts": "2019-09-11T06:51:40.000Z",
"processpath": "c:\\windows\\system32\\net1.exe",
"_errors": [],
"meta_agenturi": "/hx/api/v3/hosts/f9zsksax",
"meta_rule": "fireeye_hx_alert",
"indicator": {
"category": "custom",
"display_name": "tactic",
"url": "/hx/api/v3/indicators/custom/f9zsksax",
"signature": None,
"_id": "f9zsksax",
"uri_name": "f9zsksax"
},
"uuid": "f9zsksax",
"eventlog": "ioc",
"reported_at": "2019-09-13t06:53:08.000",
"eventtype": "processevent",
"msr_ruleids": [],
"agentstatus": "normal",
"condition": {
"indicators": [
{
"category": "custom",
"name": "tactic",
"signature": None
}
]
},
"hx_alert_id": 859,
"detect_rulematches": [
{
"confidence": "high",
"severity": "medium",
"ruleid": "99",
"tags": [
"fireeye",
"helixhxrule",
"ioc"
],
"rulename": "fireeye hx",
"revision": 0
},
{
"confidence": "medium",
"severity": "medium",
"ruleid": "1",
"tags": [],
"rulename": "test",
"revision": 0
}
],
"alerturi": "f9zsksax==",
"ppid": 142,
"metaclass": "ids",
"eventid": "101",
"eventtime": "2019-09-13T06:51:59.000Z",
"iocnames": "tactic",
"md5values": [
"md5"
],
"uri_parsed": "uri",
"args": "c:\\windows\\system32\\net1",
"detect_ruleids": [
"99"
],
"agentdetails": {
"containmentState": "normal",
"appStarted": "2019-09-10t05:41:17z",
"regOwner": "george",
"ProRemSvcStatus": "running",
"ProcessTrackerStatus": "disabled",
"configId": "sljlx==",
"timezone": "",
"productID": "00311",
"totalphysical": "170053200",
"ExdPluginStatus": "running",
"uptime": "pt3514s",
"installDate": "2019-07-08t13:28:00z",
"MalwareProtectionStatus": "running",
"@created": "2019-09-13t06:22:12z",
"KernelServices": {
"Status": "loaded"
},
"procConfigInfo": {
"lpcDevice": "intel",
"iommu": "enabled",
"virtualization": "enabled",
"vmGuest": "no"
},
"appVersion": "30.0",
"machine": "desktop",
"platform": "win",
"configChannel": "6430f3d0aea8",
"stateAgentStatus": "ok",
"intelVersion": "101",
"biosInfo": {
"biosVersion": "dell inc.",
"biosDate": "05/09/2009",
"biosType": "uefi"
},
"appCreated": "2019-07-21t16:00:05z",
"networkArray": {
"networkInfo": [
{
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"ipAddress": "192.168.0.1"
}
]
},
"MAC": "MAC",
"adapter": "{adapter}",
"description": "pangp virtual #2"
},
{
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"subnetMask": "255.255.0.0",
"ipAddress": "192.168.0.1"
}
]
},
"MAC": "mac",
"adapter": "{}",
"description": "npcap loopback adapter"
},
{
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"subnetMask": "255.255.255.0",
"ipAddress": "192.168.0.1"
}
]
},
"MAC": "mac",
"adapter": "{}",
"description": "virtualbox host"
},
{
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"ipAddress": "192.168.0.1"
}
]
},
"MAC": "mac",
"adapter": "{}",
"description": "microsoft wi-fi"
},
{
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"ipAddress": "192.168.0.1"
}
]
},
"MAC": "mac",
"adapter": "{}",
"description": "microsoft wi-fi"
},
{
"dhcpLeaseObtained": "2019-09-13t06:50:36z",
"description": "vmware virtual ethernet",
"adapter": "{}",
"MAC": "mac",
"dhcpServerArray": {
"dhcpServer": [
"192.168.0.1"
]
},
"dhcpLeaseExpires": "2019-09-13t07:23:36z",
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"subnetMask": "255.255.255.0",
"ipAddress": "192.168.0.1"
}
]
}
},
{
"dhcpLeaseObtained": "2019-09-11t11:18:59z",
"ipGatewayArray": {
"ipGateway": [
"192.168.0.1"
]
},
"description": "intel(r) dual band",
"adapter": "{}",
"MAC": "mac",
"dhcpServerArray": {
"dhcpServer": [
"192.168.0.1"
]
},
"dhcpLeaseExpires": "2019-01-19t16:18:59z",
"ipArray": {
"ipInfo": [
{
"subnetMask": "255.255.255.0",
"ipAddress": "192.168.0.1"
}
]
}
},
{
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"ipAddress": "192.168.0.1"
}
]
},
"MAC": "mac",
"adapter": "{}",
"description": "bluetooth device"
},
{
"ipArray": {
"ipInfo": [
{
"ipv6Address": "1:1:1:1"
},
{
"ipAddress": "192.168.0.1"
}
]
},
"adapter": "{}",
"description": "software loopback interface 1"
}
]
},
"drives": "c:,g:",
"intelTimestamp": "2019-01-12t06:51:20z",
"malware": {
"mg": {
"engine": {
"version": "30.19"
},
"content": {
"updated": "2019-01-16t06:12:55z",
"version": "14"
}
},
"UserFPExclusionsContentVersion": "0.0.0",
"DTIExclusionsContentVersion": "1.13.5",
"UserFPExclusionsSchemaVersion": "1.0.0",
"version": "30.17.0",
"QuarantineStatus": "cleanenabled",
"av": {
"engine": {
"version": "11.0"
},
"content": {
"updated": "2019-09-11t04:52:56z",
"version": "7"
}
},
"config": {
"mg": {
"status": "enabled",
"quarantine": {
"status": "enabled"
}
},
"av": {
"status": "enabled",
"quarantine": {
"status": "cleanenabled"
}
}
},
"DTIExclusionsSchemaVersion": "1.0.0"
},
"buildNumber": "18",
"FIPS": "disabled",
"user": "system",
"date": "2019-09-13T06:52:57.000Z",
"productName": "windows 10 home",
"gmtoffset": "+p",
"intelETag": "v1",
"ExdPlugin": {
"engine": {
"version": "300"
},
"content-rules": {
"version": "3.6"
},
"content-whitelist": {
"version": "1.6"
},
"version": "30.6"
},
"OSbitness": "64-bit",
"procType": "multiprocessor free",
"primaryIpv4Address": "192.168.0.1",
"timezoneDST": "",
"EventorStatus": "running",
"availphysical": "4666",
"timezoneStandard": "",
"configETag": "v1/156",
"directory": "c:\\windows\\system32",
"processor": "intel(r) core(tm)",
"clockSkew": "+pts"
},
"meta_deviceid": "",
"agentdomain": "workgroup",
"pprocess": "net.exe",
"is_false_positive": False,
"class": "fireeye_hx_alert",
"agentos": "windows 10 home 18362",
"md5": "md5",
"agentmac": "mac",
"__metadata__": {
"raw_batch_id": "ed5b3525b0c4",
"data_type": "passthrough",
"disable_index": False,
"dynamic_taxonomy": True,
"num_events": 1,
"source_type": "json",
"target_index": "alerts",
"batch_id": "ee7d3ebbed5b3525b0c4",
"customer_id": "",
"id": "9-09-12",
"sequence_number": 0
},
"agentloggedonusers": "font driver host",
"conditionid": "jjvnefleq==",
"uri": "",
"detect_rulenames": [
"fireeye hx [ioc process event]",
"test"
],
"agentip": "192.168.0.1",
"subtype": "None",
"deviceid": "759c",
"starttime": "2019-09-13T06:51:59.276Z",
"agentid": "hcldmjf9zfmwxov9",
"agenthostname": "dm1ps9",
"meta_agentid": "hczFMWXOV9",
"event_values": {
"processEvent/processCmdLine": "c:\\windows\\system32\\net1",
"processEvent/parentPid": 14,
"processEvent/md5": "md5",
"processEvent/processPath": "c:\\windows\\system32\\net1",
"processEvent/parentProcess": "net",
"processEvent/timestamp": "2019-09-13t06:51:59.276z",
"processEvent/startTime": "2019-09-13t06:51:59.276z",
"processEvent/process": "net1.exe",
"processEvent/username": "desktop-54m",
"processEvent/pid": 400,
"processEvent/parentProcessPath": "c:\\windows\\system32\\net.exe",
"processEvent/eventType": "start"
}
}
]
}
NOTES_GET_RESP = {
"meta": {
"count": 2,
"previous": None,
"limit": 30,
"offset": 0,
"next": None
},
"results": [
{
"created_by": {
"id": "a",
"avatar": "avatar",
"name": "George",
"username": "<EMAIL>",
"primary_email": "<EMAIL>"
},
"created_at": "2019-10-28T07:41:30.396000Z",
"id": 9,
"updated_at": "2019-10-28T07:41:42.000123Z",
"note": "This is a note test"
},
{
"created_by": {
"id": "a",
"avatar": "avatar",
"name": "George",
"username": "<EMAIL>",
"primary_email": "<EMAIL>"
},
"created_at": "2019-10-24T13:52:19.021299Z",
"id": 91,
"updated_at": "2019-10-24T13:52:19.021399Z",
"note": "What a great note this is"
}
]
}
NOTES_CREATE_RESP = {
"created_by": {
"id": "a",
"avatar": "avatar",
"name": "George",
"username": "<EMAIL>",
"primary_email": "<EMAIL>"
},
"created_at": "2019-10-28T07:41:30.396000Z",
"id": 9,
"updated_at": "2019-10-28T07:41:42.000123Z",
"note": "This is a note test"
}
LIST_SINGLE_ITEM_RESP = {
"id": 163,
"value": "aTest list",
"type": "misc",
"risk": "Medium",
"notes": "test ok",
"list": 3232
}
LIST_ITEMS_RESP = {
"meta": {
"count": 1,
"previous": None,
"limit": 30,
"offset": 0,
"next": None
},
"results": [
{
"id": 163,
"value": "Test list",
"type": "misc",
"risk": "Low",
"notes": "",
"list": 3232
}
]
}
SEARCH_MULTI_RESP = {
"dsl": {
"from": 0,
"aggs": {
"groupby:subject": {
"meta": {
"field": "subject",
"type": "groupby"
},
"terms": {
"field": "subject.raw",
"order": {
"_count": "desc"
},
"min_doc_count": 1,
"size": 50
}
}
},
"terminate_after": 1,
"directives": {
"scroll_id": "",
"page_size": 2,
"start": "2019-10-28T08:00:00.000Z",
"highlight_terms": [],
"limit": 1,
"timeout": 120000,
"offset": 0,
"indices": [
"events",
"alerts",
"appliance_health"
],
"end": "2019-10-29T08:36:16.947Z",
"search_customer_ids": [
"demisto"
],
"customer_id": "demisto",
"scroll": False
},
"timeout": "120000ms",
"query": {
"bool": {
"filter": [
{
"range": {
"meta_ts": {
"gte": "2019-10-28T08:00:00.000Z",
"lte": "2019-10-29T08:36:16.947Z"
}
}
},
{
"common": {
"domain": {
"cutoff_frequency": 0.001,
"query": "google.com",
"high_freq_operator": "and",
"low_freq_operator": "and"
}
}
}
]
}
},
"size": 2
},
"highlight_terms": None,
"options": {
"disable_regex": False,
"default_timestamp": "meta_ts",
"analyzer_impl": "legacy",
"indices": [
"events",
"alerts",
"appliance_health"
],
"quick_mode": True,
"filters": [],
"offset": 0,
"default_field": "rawmsg",
"use_terminate_after": True,
"scroll": False,
"page_size": 10,
"groupby": {
"threshold": 1,
"separator": "|%$,$%|",
"size": 50
},
"search_customer_ids": [
"demisto"
],
"limit": -1,
"list_type": "indicator",
"es6_compatible": True,
"use_limit_filters": False,
"customer_id": "demisto",
"script_impl": "native"
},
"mql": "domain:google.com and meta_ts>=2019-10-25T09:07:43.810Z {page_size:2 offset:1 limit:1} | groupby subject sep=`|%$,$%|`", # noqa: E501
"results": {
"hits": {
"hits": [
{
"_score": 0.0,
"_type": "event",
"_id": "demisto",
"_source": {
"status": "delivered",
"domain": "mx.google.com",
"_eventid": "demisto",
"rawmsg": "raw_msg",
"meta_cbname": "helix-etp_stats",
"srcipv4": "8.8.8.8",
"meta_ts": "2019-10-28T10:49:27.210Z",
"srclongitude": -122.0785140991211,
"size": "21.23",
"srccountry": "united states",
"eventtype": "trace",
"srccity": "mountain view",
"to": "<EMAIL>",
"srclatitude": 37.40599060058594,
"subject": "google",
"metaclass": "email",
"eventid": "demisto",
"inreplyto": "demisto",
"eventtime": "2019-10-28T10:43:11.000Z",
"srcregion": "california",
"meta_oml": 1036,
"class": "fireeye_etp",
"mailfrom": "<EMAIL>",
"rawmsghostname": "helix-etp_stats-demisto-etp_stats",
"__metadata__": {
"raw_batch_id": "demisto",
"data_type": "passthrough",
"disable_index": False,
"dynamic_taxonomy": False,
"num_events": 1,
"source_type": "json",
"target_index": "",
"batch_id": "demisto",
"customer_id": "demisto",
"id": "demisto",
"sequence_number": 0
},
"srcdomain": "google.com",
"srcisp": "google llc",
"srcusagetype": "dch",
"srccountrycode": "us",
"meta_rts": "2019-10-28T10:49:27.000Z",
"meta_cbid": 99999
},
"_index": "2019-10-28t00:00:00.000z"
},
{
"_score": 0.0,
"_type": "event",
"_id": "demisto",
"_source": {
"status": "delivered",
"domain": "gmr-mx.google.com",
"_eventid": "demisto",
"rawmsg": "demisto",
"meta_cbname": "helix-etp_stats",
"srcipv4": "8.8.8.8",
"meta_ts": "2019-10-29T05:13:24.009Z",
"srclongitude": -122.0785140991211,
"size": "315.29",
"srccountry": "united states",
"eventtype": "trace",
"srccity": "mountain view",
"to": "<EMAIL>",
"srclatitude": 37.40599060058594,
"subject": "Demisto subj",
"metaclass": "email",
"eventid": "demisto",
"inreplyto": "<EMAIL>",
"eventtime": "2019-10-29T05:08:39.000Z",
"srcregion": "california",
"meta_oml": 1178,
"class": "fireeye_etp",
"mailfrom": "<EMAIL>",
"rawmsghostname": "helix-etp_stats-demisto-etp_stats",
"__metadata__": {
"raw_batch_id": "demisto",
"data_type": "passthrough",
"disable_index": False,
"dynamic_taxonomy": False,
"num_events": 4,
"source_type": "json",
"target_index": "",
"batch_id": "demisto",
"customer_id": "demisto",
"id": "demisto",
"sequence_number": 1
},
"srcdomain": "google.com",
"srcisp": "google llc",
"srcusagetype": "dch",
"srccountrycode": "us",
"meta_rts": "2019-10-29T05:13:24.000Z",
"meta_cbid": 99999
},
"_index": "2019-10-29t00:00:00.000z"
}
],
"total": 11,
"max_score": 0.0
},
"_shards": {
"successful": 66,
"failed": 0,
"total": 66
},
"took": 3046,
"aggregations": {
"groupby:subject": {
"buckets": [
{
"key": "google alert - gold",
"doc_count": 3
},
{
"key": "accepted: meeting",
"doc_count": 1
},
{
"key": "invitation: Declined",
"doc_count": 1
}
],
"meta": {
"field": "subject",
"type": "groupby"
},
"sum_other_doc_count": 0,
"doc_count_error_upper_bound": 0
}
},
"metrics": {
"load": 2.8539999999999996,
"regex": False,
"list": False,
"aggregation": True,
"subsearch": False
},
"terminated_early": True,
"timed_out": False,
"failures": []
}
}
SEARCH_ARCHIVE_RESP = {
"meta": {
"totalCount": 2,
"limit": 30,
"offset": 0
},
"data": [
{
"_createdBy": {
"id": "demisto",
"avatar": "avatar",
"name": "George",
"username": "<EMAIL>",
"primary_email": "<EMAIL>"
},
"_updatedBy": {
"id": "demisto",
"avatar": "avatar",
"name": "George",
"username": "<EMAIL>",
"primary_email": "<EMAIL>"
},
"completeAfterCount": 0,
"completeAfterDuration": 0,
"createDate": "2019-10-09T11:19:38.253848Z",
"customer_id": "demisto",
"emailNotify": False,
"errors": [],
"id": "82",
"is_part_of_report": False,
"name": "",
"numResults": 457,
"percentComplete": 100.0,
"query": "domain:[google,com] | groupby eventtype",
"queryAST": "{}",
"searchEndDate": "2019-10-09T11:19:00Z",
"searchStartDate": "2019-10-09T11:19:00Z",
"sourceBucket": "",
"state": "completed",
"timeRemaining": 0.0,
"updateDate": "2019-10-09T11:19:00.686503Z"
},
{
"_createdBy": {
"id": "demisto",
"avatar": "avatar",
"name": "George",
"username": "<EMAIL>",
"primary_email": "<EMAIL>"
},
"_updatedBy": {
"id": "demisto",
"avatar": "avatar",
"name": "George",
"username": "<EMAIL>",
"primary_email": "<EMAIL>"
},
"completeAfterCount": 0,
"completeAfterDuration": 0,
"createDate": "2019-10-09T11:18:52.250000Z",
"customer_id": "demisto",
"emailNotify": False,
"errors": [],
"id": "83",
"is_part_of_report": False,
"name": "",
"numResults": 20,
"percentComplete": 100.0,
"query": "domain:[google] | groupby eventtype",
"queryAST": "{}",
"searchEndDate": "2019-10-09T11:18:28Z",
"searchStartDate": "2019-10-09T11:18:28Z",
"sourceBucket": "",
"state": "completed",
"timeRemaining": 0.0,
"updateDate": "2019-10-09T11:19:21.916006Z"
}
]
}
SEARCH_AGGREGATIONS_SINGLE_RESP = {
"groupby:subject": {
"buckets": [
{
"key": "Test 1",
"doc_count": 1
},
{
"key": "Test 2",
"doc_count": 2
},
{
"key": "Test 3",
"doc_count": 3
},
{
"key": "Test 4",
"doc_count": 4
}
],
"meta": {
"field": "subject",
"type": "groupby"
}
}
}
SEARCH_ARCHIVE_RESULTS_RESP = {
"data": [
{
"_createdBy": {
"id": "demisto",
"avatar": "demisto",
"name": "George",
"username": "<EMAIL>",
"primary_email": "<EMAIL>"
},
"_updatedBy": {
"id": "demisto",
"avatar": "demisto",
"name": "George",
"username": "<EMAIL>",
"primary_email": "<EMAIL>"
},
"completeAfterCount": 0,
"completeAfterDuration": 0,
"createDate": "2019-10-06T11:18:38.253848Z",
"customer_id": "demisto",
"emailNotify": False,
"_errors": [],
"errors": [],
"id": "82",
"is_part_of_report": False,
"name": "",
"numResults": 457,
"percentComplete": 100.0,
"query": "domain:[google,com] | groupby eventtype",
"queryAST": "{}",
"searchEndDate": "2019-10-06T11:18:28Z",
"searchStartDate": "2019-10-05T11:18:28Z",
"sourceBucket": "",
"state": "completed",
"timeRemaining": 0.0,
"updateDate": "2019-10-06T11:18:54.686503Z"
}
],
"results": {
"dsl": {
"from": 0,
"aggs": {
"groupby:eventtype": {
"meta": {
"field": "eventtype",
"type": "groupby"
},
"terms": {
"field": "eventtype",
"order": {
"_count": "desc"
},
"min_doc_count": 1,
"size": 50
}
}
},
"terminate_after": -1,
"directives": {
"scroll_id": "",
"page_size": 10,
"start": "2019-10-28T15:00:00.000Z",
"highlight_terms": [],
"limit": -1,
"timeout": 120000,
"offset": 0,
"indices": [
"events",
"alerts",
"appliance_health"
],
"end": "2019-10-29T15:40:48.571Z",
"search_customer_ids": None,
"customer_id": "",
"scroll": False
},
"timeout": "120000ms",
"query": {
"bool": {
"filter": [
{
"range": {
"meta_ts": {
"gte": "2019-10-28T15:00:00.000Z",
"lte": "2019-10-29T15:40:48.571Z"
}
}
}
],
"minimum_should_match": 1,
"should": [
{
"common": {
"domain": {
"cutoff_frequency": 0.001,
"query": "google",
"high_freq_operator": "and",
"low_freq_operator": "and"
}
}
},
{
"common": {
"domain": {
"cutoff_frequency": 0.001,
"query": "com",
"high_freq_operator": "and",
"low_freq_operator": "and"
}
}
}
]
}
},
"size": 10
},
"mql": "domain:[google,com] | groupby eventtype sep=`|%$,$%|`",
"results": {
"hits": {
"stored": 457,
"hits": [
{
"_type": "event",
"_id": "demisto",
"_source": {
"status": "delivered",
"domain": "domain.com",
"_eventid": "demsito",
"rawmsg": "{}",
"meta_cbname": "helix-etp",
"srcipv4": "8.8.8.8",
"meta_ts": "2019-10-06T10:55:26.103Z",
"srclongitude": -0.1257400,
"size": "40.04",
"srccountry": "",
"eventtype": "trace",
"srccity": "london",
"to": "<EMAIL>",
"srclatitude": 51.8594,
"subject": "dictation users",
"metaclass": "email",
"eventid": "evenid",
"inreplyto": "squidward <<EMAIL>>",
"eventtime": "2019-10-06T10:48:13.000Z",
"srcregion": "",
"meta_oml": 908,
"class": "fireeye_etp",
"mailfrom": "<EMAIL>",
"rawmsghostname": "helix-etp_stats-etp_stats",
"__metadata__": {
"raw_batch_id": "",
"data_type": "passthrough",
"disable_index": False,
"dynamic_taxonomy": False,
"num_events": 10,
"source_type": "json",
"target_index": "",
"batch_id": "",
"customer_id": "",
"id": "",
"sequence_number": 1
},
"srcdomain": "",
"srcisp": "",
"srcusagetype": "",
"srccountrycode": "",
"meta_rts": "2019-10-06T10:55:26.000Z",
"meta_cbid": 99999
},
"_index": "archive"
},
{
"_type": "event",
"_id": "demisto",
"_source": {
"status": "delivered",
"domain": "demisto.com",
"_eventid": "",
"rawmsg": "{}",
"meta_cbname": "helix-etp_stats",
"srcipv4": "8.8.8.8",
"meta_ts": "2019-10-06T11:09:25.946Z",
"srclongitude": -75.19625,
"size": "10.75",
"srccountry": "",
"eventtype": "trace",
"srccity": "cha",
"to": "<EMAIL>",
"srclatitude": 40.282958,
"subject": "meet world",
"metaclass": "email",
"eventid": "demisto",
"inreplyto": "\"squidward\" <<EMAIL>>",
"eventtime": "2019-10-06T11:02:01.000Z",
"srcregion": "penn",
"meta_oml": 1160,
"class": "fireeye_etp",
"mailfrom": "<EMAIL>",
"rawmsghostname": "helix-etp_stats-etp_stats",
"__metadata__": {
"raw_batch_id": "demisto",
"data_type": "passthrough",
"disable_index": False,
"dynamic_taxonomy": False,
"num_events": 5,
"source_type": "json",
"target_index": "",
"batch_id": "",
"customer_id": "",
"id": "",
"sequence_number": 0
},
"srcdomain": "squidward.com",
"srcisp": "squidward",
"srcusagetype": "com",
"srccountrycode": "us",
"meta_rts": "2019-10-06T11:09:25.000Z",
"meta_cbid": 99999
},
"_index": "archive"
},
{
"_type": "event",
"_id": "demisto",
"_source": {
"status": "delivered",
"domain": "demisto.com",
"_eventid": "demiostop",
"rawmsg": "{}",
"meta_cbname": "helix-etp_stats",
"srcipv4": "8.8.8.8",
"meta_ts": "2019-10-06T11:09:25.946Z",
"srclongitude": -93.119,
"size": "26.92",
"srccountry": "united states",
"eventtype": "trace",
"srccity": "",
"to": "<EMAIL>",
"srclatitude": 33.50,
"subject": "fw: reminder",
"metaclass": "email",
"eventid": "dwasdkffv",
"inreplyto": "squidward <<EMAIL>>",
"eventtime": "2019-10-06T11:02:18.000Z",
"srcregion": "lo",
"meta_oml": 1065,
"class": "fireeye_etp",
"mailfrom": "<EMAIL>",
"rawmsghostname": "helix-etp_etp_stats",
"__metadata__": {
"raw_batch_id": "sdfdsfdsdfvbvd",
"data_type": "passthrough",
"disable_index": False,
"dynamic_taxonomy": False,
"num_events": 5,
"source_type": "json",
"target_index": "",
"batch_id": "afasvjbjhsde4",
"customer_id": "",
"id": "outg85cgj5",
"sequence_number": 1
},
"srcdomain": "demisto.com",
"srcisp": "demistos",
"srcusagetype": "dch",
"srccountrycode": "us",
"meta_rts": "2019-10-06T11:09:25.000Z",
"meta_cbid": 99999
},
"_index": "archive"
},
{
"_type": "event",
"_id": "squidsdaasfwardsasd",
"_source": {
"status": "delivered",
"domain": "demisto.com",
"_eventid": "jjdpse3",
"rawmsg": "{}",
"meta_cbname": "helix-etp_stats",
"srcipv4": "8.8.8.8",
"meta_ts": "2019-10-06T11:09:27.091Z",
"srclongitude": -84.377,
"size": "16.46",
"srccountry": "united states",
"eventtype": "trace",
"srccity": "at",
"to": "<EMAIL>",
"srclatitude": 33.770843,
"subject": "magic link",
"metaclass": "email",
"eventid": "93730",
"inreplyto": "geroge <<EMAIL>>",
"eventtime": "2019-10-06T11:03:00.000Z",
"srcregion": "georga",
"meta_oml": 1100,
"class": "fireeye_etp",
"mailfrom": "<EMAIL>",
"rawmsghostname": "helix-etp_s",
"__metadata__": {
"raw_batch_id": "ssas7",
"data_type": "passthrough",
"disable_index": False,
"dynamic_taxonomy": False,
"num_events": 5,
"source_type": "json",
"target_index": "",
"batch_id": "94gfjs83",
"customer_id": "",
"id": "skdjf8723d",
"sequence_number": 2
},
"srcdomain": "demisto.com",
"srcisp": "the demisto group",
"srcusagetype": "com",
"srccountrycode": "us",
"meta_rts": "2019-10-06T11:09:27.000Z",
"meta_cbid": 99999
},
"_index": "archive"
}
],
"total": 457
},
"aggregations": {
"groupby:eventtype": {
"limited": False,
"buckets": [
{
"key": "trace",
"doc_count": 452
},
{
"key": "dnslookupevent",
"doc_count": 5
}
],
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0
}
},
"took": 4605
}
}
}
RULE_RESP = {
"rules": [
{
"customer_id": "demisto",
"id": "1.1.1",
"_rulePack": "1.1.1",
"assertions": [],
"assertionsCount": 0,
"alertType": "demisto",
"dependencies": [],
"dependenciesCount": 0,
"description": "demisto",
"internal": True,
"deleted": False,
"enabled": True,
"supported": False,
"createDate": "2019-03-30T19:25:00.11113Z",
"_createdBy": {
"id": "demisto",
"avatar": "avatar",
"name": "Demisto",
"username": "demisto",
"primary_email": "<EMAIL>"
},
"updateDate": "2019-10-30T20:07:27.330083Z",
"_updatedBy": {
"id": "demisto",
"avatar": "avatar",
"name": "Demisto",
"username": "demisto",
"primary_email": "<EMAIL>"
},
"classification": 40,
"confidence": "Medium",
"disabledReason": "",
"distinguishers": [
"srcipv4",
"srcipv6",
"category"
],
"eventsThreshold": 1,
"hash": "demisto",
"infoLinks": [],
"isTuned": False,
"protected": False,
"killChain": [
"6 - C2"
],
"message": "demisto",
"output": [
"alert"
],
"playbooks": [],
"queues": [
"Default Queue"
],
"risk": "Medium",
"search": "demisto",
"searches": [
{
"header": "demisto",
"category": "",
"search": "demisto",
"relativeTime": 860
},
{
"header": "demisto",
"category": "",
"search": "class=demisto msg=<%=msg%> | groupby [srcipv4]",
"relativeTime": 864
}
],
"secondsThreshold": 60,
"severity": "Medium",
"sourceRevision": 0,
"tags": [
"demisto",
"malware",
"http",
"md-info"
],
"threatType": 5,
"type": "alert",
"tuningEventsThreshold": 0,
"tuningSearch": "",
"tuningSecondsThreshold": 0,
"revisions": [
{
"enabled": True,
"_updatedBy": {
"id": "demisto",
"avatar": "avatar",
"name": "Demisto",
"username": "demisto",
"primary_email": "<EMAIL>"
},
"updateDate": "2019-10-29T30:07:27.380007Z"
},
{
"enabled": False,
"_updatedBy": {
"id": "demisto",
"avatar": "avatar",
"name": "Demisto",
"username": "demisto",
"primary_email": "<EMAIL>"
},
"updateDate": "2019-10-29T23:07:14.560140Z"
},
{
"updateDate": "2019-08-19T23:38:19.518212Z",
"_updatedBy": {
"id": "demisto",
"avatar": "avatar",
"name": "Demisto",
"username": "demisto",
"primary_email": "<EMAIL>"
},
"distinguishers": "[\"srcipv4\", \"srcipv6\", \"category\"]"
}
],
"revision": 3
}
],
"meta": {
"count": 2,
"previous": None,
"offset": 1,
"limit": 30,
"next": None
}
}
SEARCH_AGGREGATIONS_MULTI_RESP = {
"groupby:srcipv4_to_subject": {
"buckets": [
{
"key": "192.168.0.1|%$,$%|<EMAIL>|%$,$%|accepted",
"doc_count": 1
},
{
"key": "192.168.0.2|%$,$%|<EMAIL>|%$,$%|resume",
"doc_count": 2
},
{
"key": "192.168.0.3|%$,$%|<EMAIL>.com|%$,$%|position",
"doc_count": 3
}
],
"meta": {
"fields": [
"srcipv4",
"to",
"subject"
],
"type": "multi_groupby"
}
}
}
|
11577759
|
from .instance.config import *
import requests
import json
import time
import redis
def get_whats_new(country):
if country in supported_countries:
url = netflix_url
querystring = {"q": "get:new7:{}".format(
country), "p": "1", "t": "ns", "st": "adv"}
headers = netflix_headers
try:
response = requests.request(
"GET", url, headers=headers, params=querystring)
data = json.loads(response.text)['ITEMS']
except:
return False
final_result = []
for item in data:
synopsis = item['synopsis'].split("<br>")
result = {
"netflixid": item['netflixid'],
"title": item['title'],
"image": item['image'],
"synopsis": synopsis[0],
"rating": item['rating'],
"type": item['type'],
"release_date": item['released'],
"time": item['runtime'],
"download": item['download'],
"large_image": item['largeimage'],
"link": "https://www.netflix.com/title/" + item['netflixid']
}
final_result.append(result)
return final_result
else:
# Recursively sending the default value
return get_whats_new('uk')
def redis_content(country):
named_tuple = time.localtime() # get struct_time
time_string = time.strftime("%m/%d/%Y", named_tuple)
time_string += country
try:
r = redis.from_url(redis_url)
if r.exists(time_string):
unpacked_json = json.loads(r.get(time_string))
return unpacked_json
else:
final_result = get_whats_new(country)
jsonify = json.dumps(final_result)
r.set(time_string, jsonify)
return final_result
except:
return get_whats_new(country)
|
11577775
|
from itertools import islice, permutations
import re
def circular_window(seq, n=2):
it = iter(seq + seq[:n - 1])
result = tuple(islice(it, n))
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def calculate_happiness_change(graph, order):
return sum(graph[m][l] + graph[m][r]
for l, m, r in circular_window(order, 3))
def find_max_happiness_change(graph):
return max(calculate_happiness_change(graph, o)
for o in permutations(graph))
def read_input(fileobj):
linepattern = re.compile(
r'(\w+) would (gain|lose) (\d+) happiness units '
r'by sitting next to (\w+).')
graph = {}
for line in fileobj:
match = linepattern.search(line)
if not match:
continue
name1, direction, change, name2 = match.groups()
change = int(change) if direction == 'gain' else -int(change)
graph.setdefault(name1, {})[name2] = change
return graph
if __name__ == '__main__':
import os.path
import sys
filename = sys.argv[-1]
with open(filename) as inf:
graph = read_input(inf)
print('Part 1:', find_max_happiness_change(graph))
for name in graph:
graph[name]['Myself'] = 0
graph['Myself'] = dict.fromkeys(graph, 0)
print('Part 2:', find_max_happiness_change(graph))
if '--graph' in sys.argv:
dirname, basename = os.path.split(filename)
output = os.path.join(dirname, os.path.splitext(basename)[0] + '.dot')
order = max((o for o in permutations(graph)),
key=lambda o: calculate_happiness_change(graph, o))
with open(output, 'w') as df:
df.write('graph advent_seating {\nlayout="circo";\n')
for name, toright in circular_window(order, 2):
df.write(
'n{0} [shape=circle, label="{0}", width=1]\n'
'n{0} -- n{1} '
'[headlabel="{2}", taillabel="{3}", '
'labeldistance=2];\n'.format(
name, toright,
graph[name][toright], graph[toright][name]))
df.write('}\n')
print('Written graph to', output)
|
11577781
|
import logging
from typing import Any, List, Union
from sacrerouge.data import EvalInstance
from sacrerouge.data.dataset_readers import DatasetReader
from sacrerouge.data.fields import DocumentsField, Fields, SummaryField
from sacrerouge.io import JsonlReader
logger = logging.getLogger(__name__)
def flatten_document(document: Any) -> Union[List[str], str]:
""""
If the document is a single string, nothing is changed. If the document is a nested list of strings,
it will be flattened to be one list of strings without any nesting.
"""
if isinstance(document, str):
return document
flat = []
for item in document:
if isinstance(item, str):
flat.append(item)
else:
flat.extend(flatten_document(item))
return flat
def flatten_documents(documents: List[Any]) -> List[Union[List[str], str]]:
return [flatten_document(document) for document in documents]
@DatasetReader.register('document-based')
class DocumentBasedDatasetReader(DatasetReader):
def read(self, input_jsonl: str) -> List[EvalInstance]:
logger.info(f'Loading evaluation instances from {input_jsonl}')
instances = []
with JsonlReader(input_jsonl) as f:
for data in f:
fields = {}
fields['summary'] = SummaryField(data['summary']['text'])
if 'document' in data:
fields['documents'] = DocumentsField([data['document']['text']])
else:
fields['documents'] = DocumentsField([document['text'] for document in data['documents']])
fields = Fields(fields)
instance = EvalInstance(
data['instance_id'],
data['summarizer_id'],
data['summarizer_type'],
fields
)
instances.append(instance)
logger.info(f'Loaded {len(instances)} instances')
return instances
@DatasetReader.register('split-document-based')
class SplitDocumentBasedDatasetReader(DatasetReader):
def read(self, documents_jsonl: str, summaries_jsonl) -> List[EvalInstance]:
logger.info(f'Loading documents from {documents_jsonl}')
documents_dict = {}
with JsonlReader(documents_jsonl) as f:
for data in f:
instance_id = data['instance_id']
if 'document' in data:
documents = [data['document']['text']]
else:
documents = [document['text'] for document in data['documents']]
documents = flatten_documents(documents)
documents_dict[instance_id] = DocumentsField(documents)
logger.info(f'Loaded {len(documents_dict)} document sets')
logger.info(f'Loading summaries from {summaries_jsonl}')
instances = []
with JsonlReader(summaries_jsonl) as f:
for data in f:
fields = {}
fields['summary'] = SummaryField(data['summary']['text'])
instance_id = data['instance_id']
fields['documents'] = documents_dict[instance_id]
fields = Fields(fields)
instance = EvalInstance(
data['instance_id'],
data['summarizer_id'],
data['summarizer_type'],
fields
)
instances.append(instance)
logger.info(f'Loaded {len(instances)} instances')
return instances
|
11577891
|
plt.scatter(range(1,11),np.cumsum(pca_10_transformer.explained_variance_ratio_))
plt.xlabel("PCA Dimension")
plt.ylabel("Total Variance Captured")
plt.title("Variance Explained by PCA");
|
11577948
|
from collections import OrderedDict
import theano
from numpy.testing import assert_allclose
from theano import tensor
from blocks.algorithms import BasicMomentum
from blocks_extras.algorithms import BasicNesterovMomentum, NesterovMomentum
from blocks.utils import shared_floatx
def test_basic_nesterov_momentum():
a = shared_floatx([3, 4])
cost = (a ** 2).sum()
steps, updates = BasicNesterovMomentum(0.5).compute_steps(
OrderedDict([(a, tensor.grad(cost, a))]))
f = theano.function([], [steps[a]], updates=updates)
steps_classic, updates_classic = BasicMomentum(0.5).compute_steps(
OrderedDict([(a, tensor.grad(cost, a))]))
f_classic = theano.function([], [steps_classic[a]],
updates=updates_classic)
f_classic() # One call for the "peek ahead" of the Nesterov momentum.
assert_allclose(f()[0], f_classic()[0])
assert_allclose(f()[0], f_classic()[0])
assert_allclose(f()[0], f_classic()[0])
def test_nesterov_momentum():
a = shared_floatx([3, 4])
cost = (a ** 2).sum()
steps, updates = NesterovMomentum(0.1, 0.5).compute_steps(
OrderedDict([(a, tensor.grad(cost, a))]))
f = theano.function([], [steps[a]], updates=updates)
assert_allclose(f()[0], [0.9, 1.2])
assert_allclose(f()[0], [1.05, 1.4])
assert_allclose(f()[0], [1.125, 1.5])
|
11577964
|
import re
from typing import Tuple, List
import dynet_config
dynet_config.set(autobatch=1, mem="2048")
from itertools import chain
import dynet as dy
import numpy as np
from data.reader import DataReader
from eval.bleu.eval import BLEU
from planner.planner import Planner
from scorer.scorer import get_relations
from utils.delex import concat_entity
from utils.dynet_model_executer import Vocab, DynetModelExecutor, BaseDynetModel, arg_sample
from utils.graph import Graph, readable_edge
from utils.tokens import tokenize
class Model(BaseDynetModel):
def __init__(self, embedding_size=20, entity_dropout=0.3, relation_dropout=0.3, max_edges=10):
super().__init__()
self.vocab = None
self.embedding_size = embedding_size
self.counter_size = 5
self.entity_dropout = entity_dropout
self.relation_dropout = relation_dropout
self.decoder = None
self.counters = Vocab(list(range(max_edges + 1)))
def set_vocab(self, in_vocab: Vocab, out_vocab: Vocab):
self.vocab = out_vocab # Use same vocab for both the input and the output
def init_params(self):
super().init_params()
self.entity_encoder = self.pc.add_parameters((self.embedding_size, self.embedding_size * 3)) # e N e
self.relation_encoder = self.pc.add_parameters((self.embedding_size, self.embedding_size * 3)) # N e N
self.no_ent = self.pc.add_parameters(self.embedding_size)
self.vocab.create_lookup(self.pc, self.embedding_size)
self.counters.create_lookup(self.pc, self.counter_size)
self.decoder = dy.LSTMBuilder(3, self.embedding_size + self.counter_size * 4, self.embedding_size, self.pc)
def fix_out(self, plan: str):
if not plan:
return None
for d, r in get_relations(plan):
plan = plan.replace(d + ' ' + r + ' [ ', d + '_' + r + '_')
return plan.split(" ")
def eval(self, predictions, truth):
print("predictions", predictions[-1])
predictions = [" ".join(chain.from_iterable(p)) for p in predictions]
print("predictions", predictions[-1])
print("truth", truth[-1])
return BLEU(predictions, truth, single_ref=True)[0]
def forward(self, g: Graph, out: str = None, greedy=True):
out_tokens = self.fix_out(out)
# Encoding
nodes = {n: self.vocab.lookup(self.word_dropout(n, self.entity_dropout if out else 0))
for n in g.nodes}
unique_edges = set(chain.from_iterable(g.edges.values()))
edges = {e: self.vocab.lookup(self.word_dropout(e, self.relation_dropout if out else 0))
for e in unique_edges}
node_connections = {node: ([], []) for node in g.nodes}
for ((n1, n2), es) in g.edges.items():
for e in es:
edge_rep = edges[e]
node_connections[n2][0].append(edge_rep)
node_connections[n1][1].append(edge_rep)
ne_rep = lambda e: dy.average(e) if len(e) > 0 else self.no_ent
node_reps = {n: self.entity_encoder *
dy.concatenate([ne_rep(node_connections[n][0]), nodes[n], ne_rep(node_connections[n][1])])
for n in g.nodes}
edge_reps = [self.relation_encoder * dy.concatenate([node_reps[n1], edges[e], node_reps[n2]])
for ((n1, n2), es) in g.edges.items() for e in es]
# In decoding time we will remove 1 RDF at a time until none is left.
rdfs = {((n1, n2), e): edge_reps[i] for i, ((n1, n2), es) in enumerate(g.edges.items()) for e in es}
# Decoding
nodes_stack = []
edges_coverage = {"yes": 0, "no": len(g.edges), "current": 0}
counter_vec = lambda: dy.concatenate([self.counters.lookup(c) for c in
[len(nodes_stack)] + list(edges_coverage.values())])
c_vec = counter_vec()
initial_input = dy.concatenate([dy.average(edge_reps), c_vec])
decoder = self.decoder.initial_state().add_input(initial_input)
def choose(item):
if out_tokens:
out_tokens.pop(0)
if item[0] == "pop":
nodes_stack.pop()
res = [item[1]]
if len(nodes_stack) == 0:
edges_coverage["current"] = 0
elif item[0] == "node":
nodes_stack.append(item[1])
res = [item[1]]
elif item[0] == "edge":
edges_coverage["yes"] += 1
edges_coverage["current"] += 1
edges_coverage["no"] -= 1
_, d, e, n = item
prev_node = nodes_stack[-1]
nodes_stack.append(n)
res = [d, e, "[", n]
if d == ">":
del rdfs[(prev_node, n), e]
elif d == "<":
del rdfs[(n, prev_node), e]
else:
raise ValueError("direction can only be > or <. got " + d)
else:
raise ValueError("type can only be: pop, node, edge. got " + item[0])
c_vec = counter_vec()
for w in res:
if w in node_reps:
vec = node_reps[w]
elif w in edges:
vec = edges[w]
else:
vec = self.vocab.lookup(w)
decoder.add_input(dy.concatenate([vec, c_vec]))
return res
is_pop = False
while len(rdfs) > 0:
# Possible vocab
if len(nodes_stack) == 0:
is_pop = False
vocab = {("node", n): node_reps[n] for n in
set(chain.from_iterable([ns for ns, e in rdfs.keys()]))}
else:
last_node = nodes_stack[-1]
f_edges = {("edge", ">", e, n2): rep for ((n1, n2), e), rep in rdfs.items() if n1 == last_node}
b_edges = {("edge", "<", e, n1): rep for ((n1, n2), e), rep in rdfs.items() if n2 == last_node}
vocab = {**f_edges, **b_edges}
if is_pop:
# What node are we popping to. To help neighboring facts
pop_node = self.no_ent if len(nodes_stack) == 1 else node_reps[nodes_stack[-2]]
pop_char = "." if len(nodes_stack) == 1 else "]"
vocab[("pop", pop_char)] = dy.esum([self.vocab.lookup(pop_char), pop_node])
is_pop = True # next iteration is popable
vocab_list = list(vocab.items())
vocab_index = ["_".join(i[1:]) for i, _ in vocab_list]
try:
if len(vocab_list) == 1:
if out:
assert out_tokens[0] == vocab_index[0]
choice = choose(vocab_list[0][0])
if not out:
yield choice
continue
vocab_matrix = dy.transpose(dy.concatenate_cols([rep for _, rep in vocab_list]))
pred_vec = vocab_matrix * decoder.output()
if out:
best_i = vocab_index.index(out_tokens[0])
choose(vocab_list[best_i][0])
yield dy.pickneglogsoftmax(pred_vec, best_i)
else:
if greedy:
best_i = int(np.argmax(pred_vec.npvalue()))
else:
best_i = arg_sample(list(dy.softmax(pred_vec).npvalue()))
yield choose(vocab_list[best_i][0])
except Exception as e:
print()
print("is_pop", is_pop)
print("out", out)
print("out tokens", out_tokens)
print("vocab_index", vocab_index)
print("original_rdf", g.as_rdf())
print("rdf", list(rdfs.keys()))
print()
raise e
if not out:
yield ["]"]
class NeuralPlanner(Planner):
re_plan = True
def __init(self):
self.executor = None
def convert_relation(self, r: str):
return "_".join(tokenize(readable_edge(r)))
def convert_graph(self, g: Graph):
rdf = [(concat_entity(s), self.convert_relation(r), concat_entity(o)) for s, r, o in g.as_rdf()]
return Graph(rdf)
def convert_plan(self, p: str):
relations = get_relations(p)
for d, r in relations:
p = p.replace(r, self.convert_relation(r))
while "]]" in p:
p = p.replace("]]", "] ]")
p = p.replace("].", "] .")
return re.sub("\[(\w)", r"[ \1", p)
def convert_set(self, reader: DataReader):
return [(self.convert_graph(d.graph), self.convert_plan(d.plan)) for d in reader.copy().data]
def learn(self, train_reader: DataReader, dev_reader: DataReader):
train_set = self.convert_set(train_reader)
dev_set = self.convert_set(dev_reader)
model = Model()
self.executor = DynetModelExecutor(model, train_set, dev_set)
for batch_exponent in range(0, 3):
self.executor.train(5, batch_exponent)
return self
def score(self, g: Graph, plan: str):
error = self.executor.calc_error(self.convert_graph(g), self.convert_plan(plan))
return 1 / error # To make less error score better
def model_plan(self, g: Graph, greedy=True):
predict = list(self.executor.predict([self.convert_graph(g)], greedy=greedy))[0]
plan = " ".join(chain.from_iterable(predict))
for d, r in get_relations(plan):
plan = plan.replace(d + " " + r, d + " " + r.replace("_", " "))
return plan
def plan_random(self, g: Graph, amount: int):
return [self.model_plan(g, greedy=False) for _ in range(amount)]
def plan_best(self, g: Graph, ranker_plans=None):
if ranker_plans:
raise NotImplementedError("Planner.plan_best is not implemented when ranker_plans is defined")
return self.model_plan(g, greedy=True)
|
11577993
|
import os
from uuid import uuid4
import itertools
from contextlib import contextmanager
from .code_element import CodeElement
from .fixture import Fixture
from .generator_fixture import GeneratorFixture
from .nonmethod_test import NonMethodTest
from .test_class import Class
from .test_container import SuiteWriterTestContainer
class File(SuiteWriterTestContainer, CodeElement):
def __init__(self, suite, relpath=None):
super(File, self).__init__(suite)
self._classes = []
self._fixtures = []
if relpath is None:
relpath = 'test_{}.py'.format(self.id)
self._relpath = relpath
@property
def name(self):
return os.path.basename(self._relpath)
def add_hook_event(self, hook_name, extra_args=(), evt_name='evt'):
self.append_line('@slash.hooks.{}.register'.format(hook_name))
event_code = '{}_{}'.format(evt_name, uuid4())
self.append_line('def _hook():')
self.append_line(' __ut__.events.add({!r}, {})'.format(
event_code, ', '.join(extra_args)))
return event_code
@property
def classes(self):
return list(self._classes)
def add_fixture(self, **kw):
returned = Fixture(self.suite, self, **kw)
self._fixtures.append(returned)
return returned
def add_generator_fixture(self):
returned = GeneratorFixture(self.suite, self)
self._fixtures.append(returned)
return returned
def get_relative_path(self):
return self._relpath
def add_class(self):
cls = Class(self.suite, self)
self._classes.append(cls)
return cls
def get_last_class(self):
if not self._classes:
return None
return self._classes[-1]
def add_function_test(self):
returned = NonMethodTest(self.suite, self)
self._tests.append(returned)
self.suite.notify_test_added(returned)
return returned
@contextmanager
def _body_context(self, code_formatter):
with super(File, self)._body_context(code_formatter):
if self.suite.debug_info:
code_formatter.writeln('import __ut__')
code_formatter.writeln('import slash')
code_formatter.writeln()
yield None
def _write_body(self, code_formatter):
super(File, self)._write_body(code_formatter)
for thing in itertools.chain(self._classes, self._tests, self._fixtures):
thing.write(code_formatter)
|
11578000
|
import logging
import queue
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from typing import Dict
import time
from analysis.tools.aws_ec2_instance import EC2Instance
from analysis.tools.experiment import Experiment
from analysis.tools.stats import Stats
from analysis.tools.test_instance import TestInstance
class TestInstanceStatus(object):
def __init__(self):
self.total = 0
self.queued = 0
self.running = 0
self.done = 0
def status(self):
return dict(
total=self.total,
queued=self.queued,
running=self.running,
done=self.done
)
def __add__(self, other):
result = TestInstanceStatus()
result.total = self.total + other.total
result.queued = self.queued + other.queued
result.running = self.running + other.running
result.done = self.done + other.done
return result
class ExperimentExecutor(object):
def __init__(self, experiment: Experiment, num_instances: int = 1):
self._logger = logging.getLogger()
self._stats = Stats()
self._timestamp = time.time_ns()
self._tags = set()
self._experiment = experiment
self._terminated = False
self._test_instances: Dict[TestInstance, TestInstanceStatus] = defaultdict(lambda: TestInstanceStatus())
self._instances = []
self._free_instances = queue.SimpleQueue()
self._num_instances = num_instances
self._pool = ThreadPoolExecutor(max_workers=5000)
def _create_instance(self):
start_ns = time.time_ns()
instance = EC2Instance(self._stats)
self._instances.append(instance)
instance.wait_for_ready()
self._free_instances.put(instance)
self._stats.log("bootstrap", time.time_ns() - start_ns)
def _execute_test_instance(self, ti: TestInstance):
self._test_instances[ti].total += 1
self._test_instances[ti].queued += 1
start_ns = time.time_ns()
while True:
try:
instance = self._free_instances.get(timeout=1)
break
except queue.Empty:
if self._terminated:
return
self._stats.log("waiting", time.time_ns() - start_ns)
self._logger.info(f"{ti.get_test_tag()} execution starting...")
start_ns = time.time_ns()
try:
self._test_instances[ti].queued -= 1
self._test_instances[ti].running += 1
tag = instance.execute(ti, self._timestamp)
self._test_instances[ti].running -= 1
self._test_instances[ti].done += 1
self._tags.add(tag)
self._logger.info(f"{ti} execution completed...")
except Exception as e:
self._logger.error(e)
finally:
self._free_instances.put(instance)
self._stats.log("execution", time.time_ns() - start_ns)
def _execute_experiment(self, experiment: Experiment):
self._logger.info(f"starting experiment with {self._num_instances} machines")
try:
fs = []
for _ in range(self._num_instances):
fs.append(self._pool.submit(self._create_instance))
for ti in experiment.get_test_instances():
count = ti._gtest_repeat
ti._gtest_repeat = 1
for _ in range(count):
fs.append(self._pool.submit(self._execute_test_instance, ti))
self._logger.info("all tests scheduled, waiting for completion...")
while fs:
done_futures = [x for x in fs if x.done()]
fs = [x for x in fs if not x in done_futures]
[x.result() for x in done_futures] # will throw any exception propagated from future
time.sleep(1)
self._logger.info("all tests completed")
self._stats.dump()
assert 1 == len(self._tags), self._tags
tag = list(self._tags)[0]
filename = experiment.analyze(tag)
self._logger.info(f"done for tag={tag}, results in {filename}")
finally:
for instance in self._instances:
instance.terminate()
self._terminated = True
def run_async(self):
return self._pool.submit(self._execute_experiment, self._experiment)
def status(self):
tests = []
status_total = TestInstanceStatus()
for (ti, tis) in self._test_instances.items():
status_total += tis
tests.append((ti.get_test_tag(), tis.status()))
return dict(
stats=self._stats.to_dict(),
instances=dict([instance.status() for instance in self._instances]),
tests_status_total=status_total.status(),
tests=[(ti.get_test_tag(), tis.status()) for (ti, tis) in self._test_instances.items()]
)
|
11578023
|
from anago.tagger import Tagger
from anago.trainer import Trainer
from anago.wrapper import Sequence
|
11578060
|
from tests.policies.commenting import CommentingPolicy
class Comment:
__policy_class__ = CommentingPolicy
def __init__(self, id):
self.id = id
|
11578068
|
import pytest
from ldap_filter import Filter
class TestFilterAttributes:
def test_present(self):
filt = Filter.attribute('attr').present()
string = filt.to_string()
assert string == '(attr=*)'
def test_equal_to(self):
filt = Filter.attribute('attr').equal_to('value')
string = filt.to_string()
assert string == '(attr=value)'
def test_contains(self):
filt = Filter.attribute('attr').contains('value')
string = filt.to_string()
assert string == '(attr=*value*)'
def test_starts_with(self):
filt = Filter.attribute('attr').starts_with('value')
string = filt.to_string()
assert string == '(attr=value*)'
def test_ends_with(self):
filt = Filter.attribute('attr').ends_with('value')
string = filt.to_string()
assert string == '(attr=*value)'
def test_approx(self):
filt = Filter.attribute('attr').approx('value')
string = filt.to_string()
assert string == '(attr~=value)'
def test_greater_than(self):
filt = Filter.attribute('attr').gte('value')
string = filt.to_string()
assert string == '(attr>=value)'
def test_lesser_than(self):
filt = Filter.attribute('attr').lte('value')
string = filt.to_string()
assert string == '(attr<=value)'
def test_raw(self):
filt = Filter.attribute('attr').raw('value*value')
string = filt.to_string()
assert string == '(attr=value*value)'
class TestFilterEscapes:
def test_escape(self):
string = Filter.escape('a * (complex) \\value')
assert string == 'a \\2a \\28complex\\29 \\5cvalue'
def test_unescape(self):
string = Filter.unescape('a \\2a \\28complex\\29 \\5cvalue')
assert string == 'a * (complex) \\value'
def test_filter_escape(self):
filt = Filter.attribute('escaped').equal_to('a * (complex) \\value')
string = filt.to_string()
assert string == '(escaped=a \\2a \\28complex\\29 \\5cvalue)'
def test_filter_convert_int(self):
filt = Filter.attribute('number').equal_to(1000)
string = filt.to_string()
assert string == '(number=1000)'
def test_filter_convert_float(self):
filt = Filter.attribute('number').equal_to(10.26)
string = filt.to_string()
assert string == '(number=10.26)'
def test_filter_convert_negative(self):
filt = Filter.attribute('number').equal_to(-10)
string = filt.to_string()
assert string == '(number=-10)'
class TestFilterAggregates:
def test_and_aggregate(self):
filt = Filter.AND([
Filter.attribute('givenName').equal_to('bilbo'),
Filter.attribute('sn').equal_to('baggens')
])
string = filt.to_string()
assert string == '(&(givenName=bilbo)(sn=baggens))'
def test_or_aggregate(self):
filt = Filter.OR([
Filter.attribute('givenName').equal_to('bilbo'),
Filter.attribute('sn').equal_to('baggens')
])
string = filt.to_string()
assert string == '(|(givenName=bilbo)(sn=baggens))'
def test_not_aggregate(self):
filt = Filter.NOT([
Filter.attribute('givenName').equal_to('bilbo')
])
string = filt.to_string()
assert string == '(!(givenName=bilbo))'
|
11578085
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from navigation import Link
link_api = Link(
icon='fa fa-plug', tags='new_window', text=_('REST API'),
view='rest_api:api_root'
)
link_api_documentation = Link(
icon='fa fa-book', tags='new_window', text=_('API Documentation'),
view='django.swagger.base.view'
)
|
11578143
|
from __future__ import annotations
from django_rq import job
from jcasts.podcasts.models import Podcast, Recommendation
from jcasts.shared.typedefs import User
from jcasts.users.emails import send_user_notification_email
@job("mail")
def send_recommendations_email(user: User) -> None:
"""Sends email with 2 or 3 recommended podcasts, based on:
- favorites
- follows
- play history
- play queue
Podcasts should be just recommended once to each user.
"""
recommendations = (
Recommendation.objects.for_user(user)
.order_by("-frequency", "-similarity")
.values_list("recommended", flat=True)
)
podcasts = Podcast.objects.filter(pk__in=list(recommendations)).distinct()[:3]
if len(podcasts) not in range(2, 7):
return
# save recommendations
if podcasts:
user.recommended_podcasts.add(*podcasts)
send_user_notification_email(
user,
f"Hi {user.username}, here are some new podcasts you might like!",
"podcasts/emails/recommendations.txt",
"podcasts/emails/recommendations.html",
{
"podcasts": podcasts,
},
)
|
11578182
|
import copy
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import entmax
"""
Currently it contains three encoder layer: EncoderLayer, RATEncoderLayer, EncoderLayerWithLatentRelations
"""
# Adapted from
# https://github.com/tensorflow/tensor2tensor/blob/0b156ac533ab53f65f44966381f6e147c7371eee/tensor2tensor/layers/common_attention.py
def relative_attention_logits(query, key, relation):
# We can't reuse the same logic as tensor2tensor because we don't share relation vectors across the batch.
# In this version, relation vectors are shared across heads.
# query: [batch, heads, num queries, depth].
# key: [batch, heads, num kvs, depth].
# relation: [batch, num queries, num kvs, depth].
# qk_matmul is [batch, heads, num queries, num kvs]
qk_matmul = torch.matmul(query, key.transpose(-2, -1))
# q_t is [batch, num queries, heads, depth]
q_t = query.permute(0, 2, 1, 3)
# r_t is [batch, num queries, depth, num kvs]
r_t = relation.transpose(-2, -1)
# [batch, num queries, heads, depth]
# * [batch, num queries, depth, num kvs]
# = [batch, num queries, heads, num kvs]
# For each batch and query, we have a query vector per head.
# We take its dot product with the relation vector for each kv.
q_tr_t_matmul = torch.matmul(q_t, r_t)
# qtr_t_matmul_t is [batch, heads, num queries, num kvs]
q_tr_tmatmul_t = q_tr_t_matmul.permute(0, 2, 1, 3)
# [batch, heads, num queries, num kvs]
return (qk_matmul + q_tr_tmatmul_t) / math.sqrt(query.shape[-1])
# Sharing relation vectors across batch and heads:
# query: [batch, heads, num queries, depth].
# key: [batch, heads, num kvs, depth].
# relation: [num queries, num kvs, depth].
#
# Then take
# key reshaped
# [num queries, batch * heads, depth]
# relation.transpose(-2, -1)
# [num queries, depth, num kvs]
# and multiply them together.
#
# Without sharing relation vectors across heads:
# query: [batch, heads, num queries, depth].
# key: [batch, heads, num kvs, depth].
# relation: [batch, heads, num queries, num kvs, depth].
#
# Then take
# key.unsqueeze(3)
# [batch, heads, num queries, 1, depth]
# relation.transpose(-2, -1)
# [batch, heads, num queries, depth, num kvs]
# and multiply them together:
# [batch, heads, num queries, 1, depth]
# * [batch, heads, num queries, depth, num kvs]
# = [batch, heads, num queries, 1, num kvs]
# and squeeze
# [batch, heads, num queries, num kvs]
def relative_attention_values(weight, value, relation):
# In this version, relation vectors are shared across heads.
# weight: [batch, heads, num queries, num kvs].
# value: [batch, heads, num kvs, depth].
# relation: [batch, num queries, num kvs, depth].
# wv_matmul is [batch, heads, num queries, depth]
wv_matmul = torch.matmul(weight, value)
# w_t is [batch, num queries, heads, num kvs]
w_t = weight.permute(0, 2, 1, 3)
# [batch, num queries, heads, num kvs]
# * [batch, num queries, num kvs, depth]
# = [batch, num queries, heads, depth]
w_tr_matmul = torch.matmul(w_t, relation)
# w_tr_matmul_t is [batch, heads, num queries, depth]
w_tr_matmul_t = w_tr_matmul.permute(0, 2, 1, 3)
return wv_matmul + w_tr_matmul_t
# Adapted from The Annotated Transformer
def clones(module_fn, N):
return nn.ModuleList([module_fn() for _ in range(N)])
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
# return torch.matmul(p_attn, value), scores.squeeze(1).squeeze(1)
return torch.matmul(p_attn, value), p_attn
def sparse_attention(query, key, value, alpha, mask=None, dropout=None):
"Use sparse activation function"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
if alpha == 2:
p_attn = entmax.sparsemax(scores, -1)
elif alpha == 1.5:
p_attn = entmax.entmax15(scores, -1)
else:
raise NotImplementedError
if dropout is not None:
p_attn = dropout(p_attn)
# return torch.matmul(p_attn, value), scores.squeeze(1).squeeze(1)
return torch.matmul(p_attn, value), p_attn
def attention_with_relations(
query, key, value, relation_k, relation_v, mask=None, dropout=None
):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = relative_attention_logits(query, key, relation_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn_orig = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn_orig)
return relative_attention_values(p_attn, value, relation_v), p_attn_orig
# Adapted from The Annotated Transformers
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(lambda: nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [
l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))
]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
if query.dim() == 3:
x = x.squeeze(1)
return self.linears[-1](x)
# Adapted from The Annotated Transformer
class MultiHeadedAttentionWithRelations(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttentionWithRelations, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(lambda: nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, relation_k, relation_v, mask=None):
# query shape: [batch, num queries, d_model]
# key shape: [batch, num kv, d_model]
# value shape: [batch, num kv, d_model]
# relations_k shape: [batch, num queries, num kv, (d_model // h)]
# relations_v shape: [batch, num queries, num kv, (d_model // h)]
# mask shape: [batch, num queries, num kv]
if mask is not None:
# Same mask applied to all h heads.
# mask shape: [batch, 1, num queries, num kv]
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [
l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))
]
# 2) Apply attention on all the projected vectors in batch.
# x shape: [batch, heads, num queries, depth]
x, self.attn = attention_with_relations(
query, key, value, relation_k, relation_v, mask=mask, dropout=self.dropout
)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
# Adapted from The Annotated Transformer
class RATEncoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, layer_size, N, tie_layers=False):
super(RATEncoder, self).__init__()
if tie_layers:
self.layer = layer()
self.layers = [self.layer for _ in range(N)]
else:
self.layers = clones(layer, N)
self.norm = nn.LayerNorm(layer_size)
# TODO initialize using xavier
def forward(self, x, relation, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, relation, mask)
return self.norm(x)
# Adapted from The Annotated Transformer
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, layer_size, N, tie_layers=False):
super(Encoder, self).__init__()
if tie_layers:
self.layer = layer()
self.layers = [self.layer for _ in range(N)]
else:
self.layers = clones(layer, N)
self.norm = nn.LayerNorm(layer_size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
# Adapted from The Annotated Transformer
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = nn.LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
# Adapted from The Annotated Transformer
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(lambda: SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
# Adapted from The Annotated Transformer
class RATEncoderLayer(nn.Module):
"Encoder with RAT"
def __init__(self, size, self_attn, feed_forward, num_relation_kinds, dropout):
super(RATEncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(lambda: SublayerConnection(size, dropout), 2)
self.size = size
self.relation_k_emb = nn.Embedding(num_relation_kinds, self.self_attn.d_k)
self.relation_v_emb = nn.Embedding(num_relation_kinds, self.self_attn.d_k)
def forward(self, x, relation, mask):
"Follow Figure 1 (left) for connections."
relation_k = self.relation_k_emb(relation)
relation_v = self.relation_v_emb(relation)
x = self.sublayer[0](
x, lambda x: self.self_attn(x, x, x, relation_k, relation_v, mask)
)
return self.sublayer[1](x, self.feed_forward)
# Adapted from The Annotated Transformer
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
# Adapted from The Annotated Transformer
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[:, : x.size(1)]
return self.dropout(x)
# Adapted from The Annotated Transformer
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = nn.LayerNorm(self.layers[0].size)
def forward(self, x, memory, src_mask, tgt_mask):
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
# Adapted from The Annotated Transformer
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(lambda: SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask):
"Follow Figure 1 (right) for connections."
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
class EncoderLayerWithLatentRelations(nn.Module):
def __init__(
self,
size,
self_attn,
feed_forward,
relations2id,
num_latent_relations=3,
dropout=0.1,
enable_latent_relations=False,
combine_latent_relations=False,
):
super().__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(lambda: SublayerConnection(size, dropout), 2)
self.size = size
self.relations2id = relations2id
num_relation_kinds = len(relations2id)
self.default_qq = "q:q-default"
self.default_qq_id = self.relations2id[self.default_qq]
self.relation_k_emb = nn.Embedding(num_relation_kinds, self.self_attn.d_k)
self.relation_v_emb = nn.Embedding(num_relation_kinds, self.self_attn.d_k)
self.enable_latent_relations = enable_latent_relations
self.combine_latent_relations = combine_latent_relations
if enable_latent_relations:
num_qc_relations = num_latent_relations
num_qt_relations = num_latent_relations
num_cq_relations = num_latent_relations
num_tq_relations = num_latent_relations
self.latent_qc_relation_k_emb = nn.Embedding(
num_qc_relations, self.self_attn.d_k
)
self.latent_qc_relation_v_emb = nn.Embedding(
num_qc_relations, self.self_attn.d_k
)
self.latent_cq_relation_k_emb = nn.Embedding(
num_cq_relations, self.self_attn.d_k
)
self.latent_cq_relation_v_emb = nn.Embedding(
num_cq_relations, self.self_attn.d_k
)
self.latent_qt_relation_k_emb = nn.Embedding(
num_qt_relations, self.self_attn.d_k
)
self.latent_qt_relation_v_emb = nn.Embedding(
num_qt_relations, self.self_attn.d_k
)
self.latent_tq_relation_k_emb = nn.Embedding(
num_tq_relations, self.self_attn.d_k
)
self.latent_tq_relation_v_emb = nn.Embedding(
num_tq_relations, self.self_attn.d_k
)
def encode_merge_relations(self, relations):
ct_relation, qc_relation, cq_relation, qt_relation, tq_relation = (
relations.ct_relation,
relations.qc_relation,
relations.cq_relation,
relations.qt_relation,
relations.tq_relation,
)
_device = ct_relation.device
# qq relation
q_len = qc_relation.size(0)
qq_relation_t = (
torch.LongTensor(q_len, q_len).fill_(self.default_qq_id).to(_device)
)
qq_relation_k = self.relation_k_emb(qq_relation_t)
qq_relation_v = self.relation_v_emb(qq_relation_t)
# ct relation
ct_relation_k = self.relation_k_emb(ct_relation)
ct_relation_v = self.relation_v_emb(ct_relation)
# qc relation
qc_relation_k = torch.einsum(
"qcf,fl->qcl", [qc_relation, self.latent_qc_relation_k_emb.weight]
)
qc_relation_v = torch.einsum(
"qcf,fl->qcl", [qc_relation, self.latent_qc_relation_v_emb.weight]
)
# cq relation
cq_relation_k = torch.einsum(
"cqf,fl->cql", [cq_relation, self.latent_cq_relation_k_emb.weight]
)
cq_relation_v = torch.einsum(
"cqf,fl->cql", [cq_relation, self.latent_cq_relation_v_emb.weight]
)
# qt relation
qt_relation_k = torch.einsum(
"qtf,fl->qtl", [qt_relation, self.latent_qt_relation_k_emb.weight]
)
qt_relation_v = torch.einsum(
"qtf,fl->qtl", [qt_relation, self.latent_qt_relation_v_emb.weight]
)
# cq relation
tq_relation_k = torch.einsum(
"tqf,fl->tql", [tq_relation, self.latent_tq_relation_k_emb.weight]
)
tq_relation_v = torch.einsum(
"tqf,fl->tql", [tq_relation, self.latent_tq_relation_v_emb.weight]
)
q_relation_k = torch.cat([qq_relation_k, qc_relation_k, qt_relation_k], 1)
q_relation_v = torch.cat([qq_relation_v, qc_relation_v, qt_relation_v], 1)
q_ct_relation_k = torch.cat([cq_relation_k, tq_relation_k], 0)
q_ct_relation_v = torch.cat([cq_relation_v, tq_relation_v], 0)
qct_relation_k = torch.cat([q_ct_relation_k, ct_relation_k], 1)
qct_relation_v = torch.cat([q_ct_relation_v, ct_relation_v], 1)
relation_k = torch.cat([q_relation_k, qct_relation_k], 0)
relation_v = torch.cat([q_relation_v, qct_relation_v], 0)
return relation_k, relation_v
def forward(self, x, relations, mask):
"""
x: 1 * len * feat_size
ct_relation: ct_len * ct_len
"""
if self.enable_latent_relations:
relation_k_latent, relation_v_latent = self.encode_merge_relations(
relations
)
if self.combine_latent_relations:
relation_k_fixed = self.relation_k_emb(relations.predefined_relation)
relation_v_fixed = self.relation_v_emb(relations.predefined_relation)
relation_k = relation_k_fixed + relation_k_latent
relation_v = relation_v_fixed + relation_v_latent
else:
relation_k = relation_k_latent
relation_v = relation_v_latent
else:
relation_k = self.relation_k_emb(relations.predefined_relation)
relation_v = self.relation_v_emb(relations.predefined_relation)
relation_k, relation_v = relation_k.unsqueeze(0), relation_v.unsqueeze(0)
x = self.sublayer[0](
x, lambda x: self.self_attn(x, x, x, relation_k, relation_v, mask)
)
return self.sublayer[1](x, self.feed_forward)
class PointerWithLatentRelations(nn.Module):
def __init__(
self,
hidden_size,
relations2id,
dropout=0.1,
enable_latent_relations=False,
num_latent_relations=None,
combine_latent_relations=False,
):
super().__init__()
self.hidden_size = hidden_size
self.relations2id = relations2id
num_relation_kinds = len(relations2id)
self.linears = clones(lambda: nn.Linear(hidden_size, hidden_size), 3)
self.dropout = nn.Dropout(p=dropout)
self.default_qq = "q:q-default"
self.default_qq_id = self.relations2id[self.default_qq]
self.relation_k_emb = nn.Embedding(num_relation_kinds, hidden_size)
self.relation_v_emb = nn.Embedding(num_relation_kinds, hidden_size)
self.enable_latent_relations = enable_latent_relations
self.combine_latent_relations = combine_latent_relations
if enable_latent_relations:
num_qc_relations = num_latent_relations
num_qt_relations = num_latent_relations
num_cq_relations = num_latent_relations
num_tq_relations = num_latent_relations
self.latent_qc_relation_k_emb = nn.Embedding(
num_qc_relations, self.hidden_size
)
self.latent_qc_relation_v_emb = nn.Embedding(
num_qc_relations, self.hidden_size
)
self.latent_cq_relation_k_emb = nn.Embedding(
num_cq_relations, self.hidden_size
)
self.latent_cq_relation_v_emb = nn.Embedding(
num_cq_relations, self.hidden_size
)
self.latent_qt_relation_k_emb = nn.Embedding(
num_qt_relations, self.hidden_size
)
self.latent_qt_relation_v_emb = nn.Embedding(
num_qt_relations, self.hidden_size
)
self.latent_tq_relation_k_emb = nn.Embedding(
num_tq_relations, self.hidden_size
)
self.latent_tq_relation_v_emb = nn.Embedding(
num_tq_relations, self.hidden_size
)
def encode_merge_column_relations(self, relations):
ct_relation, qc_relation = (relations.ct_relation, relations.qc_relation)
# sc relation
t_base = relations.c_len
sc_relation = ct_relation[:, :t_base]
sc_relation_k = self.relation_k_emb(sc_relation)
sc_relation_v = self.relation_v_emb(sc_relation)
# qc relation
qc_relation_k = torch.einsum(
"qcf,fl->qcl", [qc_relation, self.latent_qc_relation_k_emb.weight]
)
qc_relation_v = torch.einsum(
"qcf,fl->qcl", [qc_relation, self.latent_qc_relation_v_emb.weight]
)
mc_relation_k = torch.cat([qc_relation_k, sc_relation_k], 0)
mc_relation_v = torch.cat([qc_relation_v, sc_relation_v], 0)
return mc_relation_k, mc_relation_v
def encode_merge_table_relations(self, relations):
ct_relation, qt_relation = (relations.ct_relation, relations.qt_relation)
# st relation
t_base = relations.c_len
st_relation = ct_relation[:, t_base:]
st_relation_k = self.relation_k_emb(st_relation)
st_relation_v = self.relation_v_emb(st_relation)
# qt relation
qt_relation_k = torch.einsum(
"qtf,fl->qtl", [qt_relation, self.latent_qt_relation_k_emb.weight]
)
qt_relation_v = torch.einsum(
"qtf,fl->qtl", [qt_relation, self.latent_qt_relation_v_emb.weight]
)
mt_relation_k = torch.cat([qt_relation_k, st_relation_k], 0)
mt_relation_v = torch.cat([qt_relation_v, st_relation_v], 0)
return mt_relation_k, mt_relation_v
def get_fixed_column_relation(self, relations):
c_base = relations.q_len
t_base = relations.q_len + +relations.c_len
mc_relation = relations.predefined_relation[:, c_base:t_base] # 1 * len * len
relation_k = self.relation_k_emb(mc_relation)
relation_v = self.relation_v_emb(mc_relation)
return relation_k, relation_v
def get_fixed_table_relation(self, relations):
t_base = relations.q_len + +relations.c_len
mc_relation = relations.predefined_relation[:, t_base:]
relation_k = self.relation_k_emb(mc_relation)
relation_v = self.relation_v_emb(mc_relation)
return relation_k, relation_v
def get_latent_relation(self, relations, kind):
if kind == "column":
return self.encode_merge_column_relations(relations)
else:
return self.encode_merge_table_relations(relations)
def get_fixed_relation(self, relations, kind):
if kind == "column":
return self.get_fixed_column_relation(relations)
else:
return self.get_fixed_table_relation(relations)
def forward(self, query, key, value, relations, kind="column"):
if self.enable_latent_relations:
relation_k, relation_v = self.get_latent_relation(relations, kind)
if self.combine_latent_relations:
relation_k_fixed, relation_v_fixed = self.get_fixed_relation(
relations, kind
)
relation_k = relation_k + relation_k_fixed
relation_v = relation_v + relation_v_fixed
else:
relation_k, relation_v = self.get_fixed_relation(relations, kind)
nbatches = query.size(0)
query, key, value = [
l(x).view(nbatches, -1, 1, self.hidden_size).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))
]
assert nbatches == 1 # TODO, support batching
relation_k, relation_v = relation_k.unsqueeze(0), relation_v.unsqueeze(0)
_, self.attn = attention_with_relations(
query, key, value, relation_k, relation_v, mask=None, dropout=self.dropout
)
return self.attn[0, 0]
|
11578214
|
import transform as tr
import numpy as np
import matplotlib.pyplot as plt
def main():
n = 64
arguments = np.arange(0, n) * np.pi / 6
function_values = list(map(lambda x: np.sin(x) + np.cos(4 * x), arguments))
dwt_result = tr.dwt(function_values, 1)
dwht_result = tr.dwht(function_values, 1)
fwht_result = tr.fwht(function_values, 1)
reverse_dwt_result = tr.dwt(dwt_result, -1)
reverse_dwht_result = tr.dwht(dwht_result, -1)
reverse_fwht_result = tr.fwht(fwht_result, -1)
# plotting part
fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2)
ax1.plot(arguments, function_values)
ax1.set(title='Function plot')
ax1.grid()
ax3.plot(arguments, dwt_result)
ax3.set(title='DWT coefficients to secuency indices')
ax3.grid()
ax4.plot(arguments, fwht_result)
ax4.set(title='FWHT coefficients to secuency indices')
ax4.grid()
ax5.plot(arguments, reverse_dwt_result)
ax5.set(title='Reverse DWT plot')
ax5.grid()
ax6.plot(arguments, reverse_fwht_result)
ax6.set(title='Reverse FWHT plot')
ax6.grid()
fig.delaxes(ax2)
plt.show()
if __name__ == '__main__':
main()
|
11578231
|
import math
import torch
from torch import nn
from torch.nn import init
class InvertibleNetwork(nn.Module):
"""Invertible neural network. Implements differentiable inverse for use in flows."""
def __init__(self, latent_size, negative_slope):
super().__init__()
self.f = nn.LeakyReLU(negative_slope=negative_slope, inplace=True)
self.f_inv = InverseLeakyReLU(
negative_slope=self.f.negative_slope, inplace=True)
self.log_grad_f_inv = LogGradInverseLeakyReLU()
self.weight = nn.Parameter(torch.Tensor(3, latent_size))
self.bias = nn.Parameter(torch.Tensor(3, latent_size))
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
bound = 0.01
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
"""Input is of shape (batch_size, latent_dim)."""
h1 = self.f(self.weight[0] * input + self.bias[0])
h2 = self.f(self.weight[1] * h1 + self.bias[1])
return self.weight[2] * h2 + self.bias[2]
def inverse(self, nu):
sum_log_weight_inv = self.weight.abs().log().sum(dim=0)
weight_inv = self.weight.reciprocal()
w_inv_nu_minus_b = weight_inv[2] * (nu - self.bias[2])
h2_inv = weight_inv[1] * self.f_inv(w_inv_nu_minus_b) - self.bias[1]
return weight_inv[0] * self.f_inv(h2_inv) - self.bias[0]
def log_det_grad_inverse(self, nu):
"""Log absolute value of determinant of Jacobian of inverse transform."""
sum_log_weight_inv = self.weight.abs().log().sum(dim=0)
weight_inv = self.weight.reciprocal()
w_inv_nu_minus_b = weight_inv[2] * (nu - self.bias[2])
#import ipdb; ipdb.set_trace()
return (sum_log_weight_inv +
self.log_grad_f_inv(w_inv_nu_minus_b) +
self.log_grad_f_inv(weight_inv[1] * self.f_inv(w_inv_nu_minus_b) - self.bias[1]))
class InverseLeakyReLU(nn.Module):
"""LeakyReLU^{-1}(y) = {y if y >= 0 else 1/negative_slope * y"""
def __init__(self, negative_slope=1e-2, inplace=False):
super().__init__()
self.negative_slope_inverse = 1 / negative_slope
self.inplace = inplace
def forward(self, input):
return F.leaky_relu(input, self.negative_slope_inverse, self.inplace)
class LogGradInverseLeakyReLU(nn.Module):
"""\partial_y LeakyReLU^{-1}(y) = {1 if y >= 0 else 1 / negative_slope."""
def __init__(self, negative_slope=1e-2, inplace=False):
super().__init__()
self.log_negative_slope_inverse = -math.log(negative_slope)
self.inplace = inplace
def forward(self, input):
"""Calculate log of derivative of inverse."""
mask = input >= 0
return (1 - mask).float() * self.log_negative_slope_inverse
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.