id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
433228
|
from django.urls import include, path
from likes.api.views import (
LikeListAPIView,
LikedCountAPIView,
LikeToggleView,
LikedIDsAPIView
)
app_name = 'likes-api'
urlpatterns = [
path('likes/', include([
path('count/', LikedCountAPIView.as_view(), name='count'),
path('is/', LikedIDsAPIView.as_view(), name='is'),
path('toggle/', LikeToggleView.as_view(), name='toggle'),
path('list/', LikeListAPIView.as_view(), name='list'),
]))
]
|
433240
|
import numpy as np
class GreedyKCenter(object):
def fit(self, points, k):
centers = []
centers_index = []
# Initialize distances
distances = [np.inf for u in points]
# Initialize cluster labels
labels = [np.inf for u in points]
for cluster in range(k):
# Let u be the point of P such that d[u] is maximum
u_index = distances.index(max(distances))
u = points[u_index]
# u is the next cluster center
centers.append(u)
centers_index.append(u_index)
# Update distance to nearest center
for i, v in enumerate(points):
distance_to_u = self.distance(u, v) # Calculate from v to u
if distance_to_u < distances[i]:
distances[i] = distance_to_u
labels[i] = cluster
# Update the bottleneck distance
max_distance = max(distances)
# Return centers, labels, max delta, labels
self.centers = centers
self.centers_index = centers_index
self.max_distance = max_distance
self.labels = labels
@staticmethod
def distance(u, v):
displacement = u - v
return np.sqrt(displacement.dot(displacement))
|
433271
|
import numpy as np
import pandas as pd
from pandas.io.parsers import read_csv
from BOAmodel import *
from collections import defaultdict
""" parameters """
# The following parameters are recommended to change depending on the size and complexity of the data
N = 2000 # number of rules to be used in SA_patternbased and also the output of generate_rules
Niteration = 500 # number of iterations in each chain
Nchain = 2 # number of chains in the simulated annealing search algorithm
supp = 5 # 5% is a generally good number. The higher this supp, the 'larger' a pattern is
maxlen = 3 # maxmum length of a pattern
# \rho = alpha/(alpha+beta). Make sure \rho is close to one when choosing alpha and beta.
alpha_1 = 500 # alpha_+
beta_1 = 1 # beta_+
alpha_2 = 500 # alpha_-
beta_2 = 1 # beta_-
""" input file """
# notice that in the example, X is already binary coded.
# Data has to be binary coded and the column name shd have the form: attributename_attributevalue
filepathX = 'tictactoe_X.txt' # input file X
filepathY = 'tictactoe_Y.txt' # input file Y
df = read_csv(filepathX,header=0,sep=" ")
Y = np.loadtxt(open(filepathY,"rb"),delimiter=" ")
lenY = len(Y)
train_index = sample(xrange(lenY),int(0.70*lenY))
test_index = [i for i in xrange(lenY) if i not in train_index]
model = BOA(df.iloc[train_index],Y[train_index])
model.generate_rules(supp,maxlen,N)
model.set_parameters(alpha_1,beta_1,alpha_2,beta_2,None,None)
rules = model.fit(Niteration,Nchain,print_message=True)
# test
Yhat = predict(rules,df.iloc[test_index])
TP,FP,TN,FN = getConfusion(Yhat,Y[test_index])
tpr = float(TP)/(TP+FN)
fpr = float(FP)/(FP+TN)
print 'TP = {}, FP = {}, TN = {}, FN = {} \n accuracy = {}, tpr = {}, fpr = {}'.format(TP,FP,TN,FN, float(TP+TN)/(TP+TN+FP+FN),tpr,fpr)
|
433286
|
from doitpy.pyflakes import Pyflakes
from doitpy.coverage import Coverage, PythonPackage
DOIT_CONFIG = {
'default_tasks': ['pyflakes'],
'verbosity': 2,
}
def task_pyflakes():
flaker = Pyflakes()
yield flaker.tasks('**/*.py')
def task_coverage():
"""show coverage for all modules including tests"""
cov = Coverage(
[PythonPackage('sqla_yaml_fixtures', 'tests')],
config={'branch':True,},
)
yield cov.all() # create task `coverage`
yield cov.src() # create task `coverage_src`
|
433295
|
from .kor2vec import Kor2Vec
from .context_kor2vec import ContextKor2Vec
from .model.vocab import *
# from .pretrained import SejongVector
import warnings
warnings.filterwarnings("ignore")
|
433358
|
class Base:
"""The base object for all top-level client objects.
Parameters
----------
domain : one of rubicon.domain.*
The top-level object's domain instance.
config : rubicon.client.Config, optional
The config, which injects the repository to use.
"""
def __init__(self, domain, config=None):
self._config = config
self._domain = domain
def __str__(self):
return self._domain.__str__()
@property
def repository(self):
return self._config.repository
|
433366
|
import torch
import numpy as np
from torch.autograd import Variable
class loss_block:
def __init__(self):
super(loss_block, self).__init__()
def loss(self,input_vals,lables):
return torch.mean(input_vals)
|
433395
|
import pytest
import numpy as np
from floodlight import XY
@pytest.fixture()
def example_sequence():
seq = np.array(
[np.NaN, np.NaN, -5.07, -2.7, np.NaN, np.NaN, 1.53, 27.13, None, 30.06]
)
return seq
@pytest.fixture()
def example_sequence_empty():
seq = np.empty(())
return seq
@pytest.fixture()
def example_sequence_two_dimensional():
seq = np.array([[0, 1, 2], [3, 4, 5]])
return seq
@pytest.fixture()
def example_sequence_full():
seq = np.array([-5.07, -2.7, 1.53, 27.13, 30.06])
return seq
@pytest.fixture()
def example_sequence_nan():
seq = np.array([np.NaN, np.NaN, np.NaN, np.NaN, np.NaN])
return seq
@pytest.fixture()
def example_xy_filter():
xy = XY(
np.array(
[
[np.NaN, -8.66, np.NaN, 1],
[np.NaN, -6.29, np.NaN, 2],
[-5.07, -4.31, np.NaN, 3],
[-2.7, -1.95, np.NaN, 4],
[np.NaN, -0.13, np.NaN, 5],
[np.NaN, 2.31, np.NaN, 6],
[1.53, 3.74, np.NaN, 7],
[5.13, 6.53, np.NaN, 8],
[7.02, 8.07, np.NaN, 9],
[9.48, 10.53, np.NaN, 8],
[10.09, np.NaN, np.NaN, 7],
[12.31, np.NaN, np.NaN, 6],
[13.22, np.NaN, np.NaN, 5],
[14.88, 14.88, np.NaN, 4],
[16.23, 17.05, np.NaN, 3],
[17.06, 18.37, np.NaN, 2],
[18.56, 19.27, np.NaN, 1],
[20.32, 20.46, np.NaN, 2],
[21.7, 22.61, np.NaN, 3],
[23.11, 23.54, np.NaN, 4],
[24.23, 25.25, np.NaN, 5],
[25.74, 25.95, np.NaN, 6],
[27.13, 28.06, np.NaN, 7],
[None, 29.55, np.NaN, 8],
[30.06, np.NaN, np.NaN, 9],
]
),
framerate=20,
)
return xy
@pytest.fixture()
def example_xy_filter_short():
xy = XY(np.array([[23.11, 23.54, np.NaN], [30.06, np.NaN, np.NaN]]), framerate=20)
return xy
@pytest.fixture()
def example_xy_filter_one_frame():
xy = XY(np.array((0, 1, np.NaN)), framerate=20)
return xy
@pytest.fixture()
def example_xy_filter_empty():
xy = XY(np.array(()), framerate=20)
return xy
|
433402
|
import os
import tensorflow as tf
num_threads = os.environ.get('OMP_NUM_THREADS')
def get_session(gpu_fraction=0.75):
"""Assume that you have 6GB of GPU memory and want to allocate ~2GB"""
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
print("with nthreads=%s" % num_threads)
return tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
# return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, device_count={'GPU':1}))
return tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
gpu_options=gpu_options))
# KTF.set_session(get_session())
def get_session2(CPU, GPU):
if GPU:
num_GPU = 1
num_CPU = 1
if CPU:
num_CPU = 1
num_GPU = 0
config = tf.ConfigProto(intra_op_parallelism_threads=num_threads,
inter_op_parallelism_threads=num_threads,
allow_soft_placement=True,
device_count={'CPU': num_CPU,
'GPU': num_GPU}
)
session = tf.Session(config=config)
K.set_session(session)
|
433419
|
from helper_helper import *
from helper_helper import _time
@unittest.skipUnless( os.environ.get('TEST_API','true').lower()=='true', 'skipping api' )
class ApiHelper(Chai):
def setUp(self):
super(ApiHelper,self).setUp()
self.series = Timeseries(self.client, type='series', prefix='kairos',
read_func=int, #write_func=str,
intervals={
'minute' : {
'step' : 60,
'steps' : 5,
},
'hour' : {
'step' : 3600,
'resolution' : 60,
},
'bulk-hour' : {
'step' : 3600,
}
} )
self.series.delete_all()
def tearDown(self):
self.series.delete_all()
def test_list(self):
self.series.insert( 'test', 32, timestamp=_time(0) )
self.series.insert( 'test1', 32, timestamp=_time(0) )
self.series.insert( 'test2', 32, timestamp=_time(0) )
self.series.insert( 'test', 32, timestamp=_time(0) )
res = sorted(self.series.list())
assert_equals( ['test', 'test1', 'test2'], res )
self.series.delete('test')
self.series.delete('test1')
self.series.delete('test2')
def test_properties(self):
self.series.insert( 'test', 32, timestamp=_time(0) )
self.series.insert( 'test', 32, timestamp=_time(60) )
self.series.insert( 'test', 32, timestamp=_time(600) )
res = self.series.properties('test')
assert_equals( _time(0), res['minute']['first'] )
assert_equals( _time(600), res['minute']['last'] )
assert_equals( _time(0), res['hour']['first'] )
assert_equals( _time(0), res['hour']['last'] )
self.series.delete('test')
def test_iterate(self):
self.series.insert( 'test', 32, timestamp=_time(0) )
self.series.insert( 'test', 42, timestamp=_time(60) )
self.series.insert( 'test', 52, timestamp=_time(600) )
# There should be a result for every possible step between first and last
res = list(self.series.iterate('test','minute'))
assert_equals( 11, len(res) )
assert_equals( (_time(0),[32]), res[0] )
assert_equals( (_time(60),[42]), res[1] )
assert_equals( (_time(120),[]), res[2] )
assert_equals( (_time(600),[52]), res[-1] )
# With resolutions, there should be a result only for where there's data
res = list(self.series.iterate('test','hour'))
assert_equals( 3, len(res) )
assert_equals( (_time(0),[32]), res[0] )
assert_equals( (_time(60),[42]), res[1] )
assert_equals( (_time(600),[52]), res[2] )
# Without resolutions, there should be a single result
res = list(self.series.iterate('test','bulk-hour'))
assert_equals( 1, len(res) )
assert_equals( (_time(0),[32,42,52]), res[0] )
self.series.delete('test')
|
433449
|
from flask import request
from test import client
from app.utils import SERIES, SERIES_URL
from app.utils import get_by_id, finder
def test_home_page(client):
""" Testcase for home url """
response = client.get("/api")
assert SERIES_URL == response.get_json()
assert SERIES_URL == response.get_json()
def test_series(client):
""" Testcase for all quotes in series """
for i in SERIES:
response = client.get(f"/{i}")
assert finder(i) == response.get_json()
def test_random_series(client):
""" Testcase for random quotes in series """
for i in SERIES:
response = client.get(f"/random/{i}")
assert response.get_json() in finder(i)
def test_series_by_id(client):
""" Testcase for get quotes by series id """
for i in SERIES:
response = client.get(f"/{i}/10")
assert response.get_json() == get_by_id(i, 10)
|
433501
|
import xml.etree.ElementTree as ET
import py7zr
import json
import os
import glob
import re
import argparse
from pathlib import Path
from bs4 import BeautifulSoup
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
XML_SUFFIX = '<?xml version="1.0" encoding="utf-8"?>\n<posts>\n'
XML_PREFIX = '\n</posts>'
posts = {
'Id': 'INTEGER',
'PostTypeId': 'INTEGER', # 1: Question, 2: Answer
'ParentId': 'INTEGER', # (only present if PostTypeId is 2)
'AcceptedAnswerId': 'INTEGER', # (only present if PostTypeId is 1)
'CreationDate': 'DATETIME',
'Score': 'INTEGER',
'ViewCount': 'INTEGER',
'Body': 'TEXT',
'OwnerUserId': 'INTEGER', # (present only if user has not been deleted)
'OwnerDisplayName': 'TEXT',
'LastEditorUserId': 'INTEGER',
'LastEditorDisplayName': 'TEXT', # ="<NAME>"
'LastEditDate': 'DATETIME', # ="2009-03-05T22:28:34.823"
'LastActivityDate': 'DATETIME', # ="2009-03-11T12:51:01.480"
'CommunityOwnedDate': 'DATETIME', # (present only if post is community wikied)
'Title': 'TEXT',
'Tags': 'TEXT',
'AnswerCount': 'INTEGER',
'CommentCount': 'INTEGER',
'FavoriteCount': 'INTEGER',
'ClosedDate': 'DATETIME'
}
def extract_7zip(source_dir):
with py7zr.SevenZipFile(os.path.join(source_dir, 'stackoverflow.com-Posts.7z'), mode='r') as z:
z.extractall()
def process_xml_line(line):
line = line.strip()
line = XML_SUFFIX + line + XML_PREFIX
try:
root = ET.fromstring(line.strip())
except:
return None
if root is None:
return None
assert len(root) == 1
element = root[0]
data = {}
for key in posts.keys():
data[key] = element.get(key)
return data
def txt_to_json(path, outpath, filesize=1800000, max_split=8):
Path(outpath).mkdir(parents=True, exist_ok=True)
file_index = 0
pool = Pool(cpu_count())
linecount = 0
fw = open('{}/split-{:03d}.json'.format(outpath, file_index), 'w', encoding='utf-8')
for file in glob.glob("{}/*.txt".format(path)):
lines = [line for line in open(file, 'r')]
print(file)
with tqdm(total=len(lines), desc='Processing') as pbar:
for data in pool.imap(process_xml_line, lines, 1000):
pbar.update()
if data is not None:
linecount += 1
fw.write(json.dumps(data) + '\n')
if linecount % filesize == 0 and file_index < max_split - 1:
file_index += 1
if not fw.closed:
fw.close()
fw = open('{}/split-{:03d}.json'.format(outpath, file_index), 'w', encoding='utf-8')
print(linecount)
if not fw.closed:
fw.close()
def process_chunk(ex):
description = ''
if ex['Title'] is not None:
description += ex['Title'].strip()
soup = BeautifulSoup(ex['Body'].strip(), features="lxml")
if soup.find('pre'):
soup.pre.decompose()
description += ' ' + soup.get_text()
return description
def parse_nl_data(path, outpath):
Path(outpath).mkdir(parents=True, exist_ok=True)
pool = Pool(cpu_count())
total_files = sum(1 for _ in glob.glob("{}/*.json".format(path)))
for part in range(total_files):
with open('{}/split-{:03d}.json'.format(path, part), 'r', encoding='utf-8') as f:
data = [json.loads(line.strip()) for line in f]
results = []
with tqdm(total=len(data), desc='Processing') as pbar:
for i, ex in enumerate(pool.imap(process_chunk, data, 1000)):
pbar.update()
tokens = ex.split()
if len(tokens) > 10:
results.append(' '.join(tokens))
if part == total_files - 1:
with open('{}/test.description.txt'.format(outpath), 'w', encoding='utf-8') as fw:
fw.write('\n'.join(results[:10000]))
with open('{}/valid.description.txt'.format(outpath), 'w', encoding='utf-8') as fw:
fw.write('\n'.join(results[10000:20000]))
with open('{}/train.{}.description.txt'.format(outpath, part), 'w', encoding='utf-8') as fw:
fw.write('\n'.join(results[20000:]))
else:
with open('{}/train.{}.description.txt'.format(outpath, part), 'w', encoding='utf-8') as fw:
fw.write('\n'.join(results))
def split_xml_java_python(inpath, outpath):
Path(outpath).mkdir(parents=True, exist_ok=True)
file_index = 0
linecount = 0
selected_ids = set()
with open(inpath, "r") as f:
next(f)
fw = open('{}/questions_{}.txt'.format(outpath, file_index), 'w', encoding='utf-8')
for line in f:
line = line.strip()
matches = re.findall(r'<row Id=\"(.+?)\"', line)
assert len(matches) <= 1
if len(matches) == 0:
continue
row_id = int(matches[0])
matches = re.findall(r'Tags=\"(.+?)\"', line)
assert len(matches) <= 1
if len(matches) == 0:
continue
matches = matches[0]
if not ('java' in matches or 'python' in matches):
continue
selected_ids.add(row_id)
fw.write(line + '\n')
linecount += 1
if linecount % 1000000 == 0:
file_index += 1
if not fw.closed:
fw.close()
fw = open('{}/questions_{}.txt'.format(outpath, file_index), 'w', encoding='utf-8')
print(linecount)
if not fw.closed:
fw.close()
linecount = 0
file_index = 0
with open(inpath, "r") as f:
next(f)
fw = open('{}/answers_{}.txt'.format(outpath, file_index), 'w', encoding='utf-8')
for line in f:
line = line.strip()
matches = re.findall(r'ParentId=\"(.+?)\"', line)
if len(matches) != 1:
continue
parent_id = int(matches[0])
if parent_id in selected_ids:
fw.write(line + '\n')
linecount += 1
if linecount % 1000000 == 0:
file_index += 1
if not fw.closed:
fw.close()
fw = open('{}/answers_{}.txt'.format(outpath, file_index), 'w', encoding='utf-8')
print(linecount)
if not fw.closed:
fw.close()
def split_xml(inpath, outpath):
Path(outpath).mkdir(parents=True, exist_ok=True)
file_index = 0
linecount = 0
with open(inpath, "r") as f:
next(f)
fw = open('{}/posts_{}.txt'.format(outpath, file_index), 'w', encoding='utf-8')
for line in f:
line = line.strip()
fw.write(line + '\n')
linecount += 1
if linecount % 5000000 == 0:
file_index += 1
if not fw.closed:
fw.close()
fw = open('{}/posts_{}.txt'.format(outpath, file_index), 'w', encoding='utf-8')
print(linecount)
if not fw.closed:
fw.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--src_dir", type=str, help='Source directory')
args = parser.parse_args()
# extract the 7zip file into xml (file size will be ~80GB)
extract_7zip(args.src_dir)
# to tackle a large file (of 80GB), we split and dump them into xml shards
split_xml(
'{}/Posts.xml'.format(args.src_dir), '{}/xml_shards'.format(args.src_dir)
)
# convert the xml-style lines into dictionary object
# ~50M posts are saved in 8 files, each with 6.25M
txt_to_json(
'{}/xml_shards'.format(args.src_dir),
'{}/json_shards'.format(args.src_dir),
filesize=6250000
)
# prepare the NL examples
parse_nl_data(
'{}/json_shards'.format(args.src_dir), '{}/desc_shards'.format(args.src_dir)
)
|
433512
|
class Kite(object):
def __init__(self):
self.code = "kite"
def foo(self):
print("bar!")
|
433615
|
from models.multiple_solution.swarm_based.EPO import BaseEPO
from utils.FunctionUtil import *
## Setting parameters
root_paras = {
"problem_size": 100,
"domain_range": [-100, 100],
"print_train": True,
"objective_func": C30
}
epo_paras = {
"epoch": 500,
"pop_size": 100
}
## Run model
md = BaseEPO(root_algo_paras=root_paras, epo_paras=epo_paras)
md._train__()
|
433652
|
import time
import boto3
import os
import sys
from data_mesh_util.lib.ApiAutomator import ApiAutomator
sys.path.append(os.path.join(os.path.dirname(__file__), "resource"))
sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
from data_mesh_util.lib.SubscriberTracker import *
class DataMeshProducer:
_data_mesh_account_id = None
_data_producer_account_id = None
_data_mesh_manager_role_arn = None
_session = None
_iam_client = None
_sts_client = None
_config = {}
_current_region = None
_log_level = None
_logger = logging.getLogger("DataMeshProducer")
stream_handler = logging.StreamHandler(sys.stdout)
_logger.addHandler(stream_handler)
_data_mesh_account_id = None
_data_producer_role_arn = None
_data_mesh_credentials = None
_data_mesh_arn = None
_data_mesh_boto_session = None
_subscription_tracker = None
_data_producer_identity = None
_producer_automator = None
_mesh_automator = None
def __init__(self, data_mesh_account_id: str, region_name: str = 'us-east-1', log_level: str = "INFO",
use_credentials=None):
self._data_mesh_account_id = data_mesh_account_id
if region_name is None:
raise Exception("Cannot initialize a Data Mesh Producer without an AWS Region")
else:
self._current_region = region_name
# Assume the producer account DataMeshProducer role, unless we have been supplied temporary credentials for that role
self._session, _producer_credentials, _producer_arn = utils.assume_iam_role(
role_name=DATA_MESH_PRODUCER_ROLENAME,
region_name=self._current_region,
use_credentials=use_credentials
)
self._iam_client = self._session.client('iam')
self._sts_client = self._session.client('sts')
self._log_level = log_level
self._logger.setLevel(log_level)
self._data_producer_identity = self._sts_client.get_caller_identity()
self._data_producer_account_id = self._data_producer_identity.get('Account')
self._producer_automator = ApiAutomator(target_account=self._data_producer_account_id,
session=self._session, log_level=self._log_level)
# now assume the DataMeshProducer-<account-id> Role in the Mesh Account
self._data_mesh_session, self._data_mesh_credentials, self._data_mesh_arn = utils.assume_iam_role(
role_name=utils.get_central_role_name(self._data_producer_account_id, PRODUCER),
region_name=self._current_region,
use_credentials=_producer_credentials,
target_account=self._data_mesh_account_id
)
# validate that we are running in the data mesh account
utils.validate_correct_account(self._data_mesh_credentials, self._data_mesh_account_id)
# generate an API Automator in the mesh
self._mesh_automator = ApiAutomator(target_account=self._data_mesh_account_id,
session=self._data_mesh_session, log_level=self._log_level)
self._logger.debug("Created new STS Session for Data Mesh Admin Producer")
self._logger.debug(self._data_mesh_credentials)
self._subscription_tracker = SubscriberTracker(credentials=self._data_mesh_credentials,
data_mesh_account_id=data_mesh_account_id,
region_name=self._current_region,
log_level=log_level)
def _create_mesh_table(self, table_def: dict, data_mesh_glue_client, source_database_name: str,
data_mesh_database_name: str,
producer_account_id: str,
data_mesh_account_id: str, create_public_metadata: bool = True,
expose_table_references_with_suffix: str = "_link", use_original_table_name: bool = False):
'''
API to create a table as a data product in the data mesh
:param table_def:
:param data_mesh_glue_client:
:param data_mesh_lf_client:
:param producer_ram_client:
:param producer_glue_client:
:param data_mesh_database_name:
:param producer_account_id:
:param data_mesh_account_id:
:return:
'''
# cleanup the TableInfo object to be usable as a TableInput
# remove properties from a TableInfo object returned from get_table to be compatible with put_table
keys = [
'DatabaseName', 'CreateTime', 'UpdateTime', 'CreatedBy', 'IsRegisteredWithLakeFormation', 'CatalogId',
'Tags'
]
t = utils.remove_dict_keys(input_dict=table_def, remove_keys=keys)
t['Owner'] = producer_account_id
self._logger.debug("Existing Table Definition")
self._logger.debug(t)
table_name = t.get('Name')
# create the glue catalog entry
try:
data_mesh_glue_client.create_table(
DatabaseName=data_mesh_database_name,
TableInput=t
)
self._logger.info(f"Created new Glue Table {table_name}")
except data_mesh_glue_client.exceptions.from_code('AlreadyExistsException'):
self._logger.info(f"Glue Table {table_name} Already Exists")
table_partitions = self._producer_automator.get_table_partitions(
database_name=source_database_name,
table_name=table_name
)
if table_partitions is not None and len(table_partitions) > 0:
self._mesh_automator.create_table_partition_metadata(
database_name=data_mesh_database_name,
table_name=table_name,
partition_input_list=table_partitions
)
# grant access to the producer account
perms = ['INSERT', 'SELECT', 'ALTER', 'DELETE', 'DESCRIBE']
permissions_granted = self._mesh_automator.lf_grant_permissions(
data_mesh_account_id=self._data_mesh_account_id,
principal=producer_account_id,
database_name=data_mesh_database_name,
table_name=table_name,
permissions=perms,
grantable_permissions=perms
)
# if create public metadata is True, then grant describe to the general data mesh consumer role
if create_public_metadata is True:
self._mesh_automator.lf_grant_permissions(
data_mesh_account_id=self._data_mesh_account_id,
principal=utils.get_role_arn(self._data_mesh_account_id, DATA_MESH_READONLY_ROLENAME),
database_name=data_mesh_database_name,
table_name=table_name,
permissions=['DESCRIBE'],
grantable_permissions=None
)
self._logger.info(f"Granted Describe on {table_name} to {DATA_MESH_READONLY_ROLENAME}")
# in the producer account, accept the RAM share after 1 second - seems to be an async delay
if permissions_granted > 0:
time.sleep(1)
self._producer_automator.accept_pending_lf_resource_shares(
sender_account=data_mesh_account_id
)
# create a resource link for the data mesh table in producer account
if use_original_table_name is True:
link_table_name = table_name
else:
link_table_name = f"{table_name}_link"
if expose_table_references_with_suffix is not None:
link_table_name = f"{table_name}{expose_table_references_with_suffix}"
self._producer_automator.create_remote_table(
data_mesh_account_id=self._data_mesh_account_id,
database_name=data_mesh_database_name,
local_table_name=link_table_name,
remote_table_name=table_name
)
return table_name, link_table_name
def _make_database_name(self, database_name: str):
return "%s-%s" % (database_name, self._data_producer_identity.get('Account'))
def migrate_tables_to_mesh(self, source_database_name: str,
create_public_metadata: bool = True,
table_name_regex: str = None,
domain: str = None,
data_product_name: str = None,
sync_mesh_catalog_schedule: str = None,
sync_mesh_crawler_role_arn: str = None):
# create a data product in the mesh, and share back to the producer as the same object
created_products = self.create_data_products(
source_database_name=source_database_name,
create_public_metadata=create_public_metadata,
table_name_regex=table_name_regex,
domain=domain,
data_product_name=data_product_name,
sync_mesh_catalog_schedule=sync_mesh_catalog_schedule,
sync_mesh_crawler_role_arn=sync_mesh_crawler_role_arn,
expose_data_mesh_db_name=source_database_name,
expose_table_references_with_suffix=None,
use_original_table_name=True
)
table_list = [t.get('SourceTable') for t in created_products.get('Tables')]
# remove the existing objects from the producer account
self._producer_automator.safe_remove_glue_objects(
database_name=source_database_name,
table_list=table_list
)
def create_data_products(self, source_database_name: str,
create_public_metadata: bool = True,
table_name_regex: str = None,
domain: str = None,
data_product_name: str = None,
sync_mesh_catalog_schedule: str = None,
sync_mesh_crawler_role_arn: str = None,
expose_data_mesh_db_name: str = None,
expose_table_references_with_suffix: str = "_link",
use_original_table_name: bool = False):
if create_public_metadata is None:
create_public_metadata = True
# generate the target database name for the mesh
data_mesh_database_name = self._make_database_name(source_database_name)
if expose_data_mesh_db_name is not None:
data_mesh_database_name = expose_data_mesh_db_name
# create clients for the new credentials in the data mesh account
data_mesh_glue_client = utils.generate_client(service='glue', region=self._current_region,
credentials=self._data_mesh_credentials)
data_mesh_lf_client = utils.generate_client(service='lakeformation', region=self._current_region,
credentials=self._data_mesh_credentials)
# load the specified tables to be created as data products
all_tables = self._producer_automator.load_glue_tables(
catalog_id=self._data_producer_account_id,
source_db_name=source_database_name,
table_name_regex=table_name_regex
)
# get or create the target database exists in the mesh account
self._mesh_automator.get_or_create_database(
database_name=data_mesh_database_name,
database_desc="Database to contain objects from Source Database %s.%s" % (
self._data_producer_account_id, source_database_name)
)
self._logger.info("Validated Data Mesh Database %s" % data_mesh_database_name)
# set default permissions on db
self._mesh_automator.set_default_db_permissions(database_name=data_mesh_database_name)
# grant the producer permissions to create tables on this database
self._mesh_automator.lf_grant_permissions(
data_mesh_account_id=self._data_mesh_account_id,
principal=self._data_producer_account_id,
database_name=data_mesh_database_name,
permissions=['CREATE_TABLE', 'DESCRIBE'],
grantable_permissions=None
)
self._logger.info("Granted access on Database %s to Producer" % data_mesh_database_name)
# grant the mesh permissions to administer the database
self._mesh_automator.lf_grant_permissions(
data_mesh_account_id=self._data_mesh_account_id,
principal=self._data_mesh_arn,
database_name=data_mesh_database_name,
permissions=['ALL'],
grantable_permissions=None
)
self._logger.info(
f"Granted describe access on Database {data_mesh_database_name} to Data Mesh {self._data_mesh_account_id}")
# get or create a data mesh shared database in the producer account
self._producer_automator.get_or_create_database(
database_name=data_mesh_database_name,
database_desc="Database to contain objects objects shared with the Data Mesh Account"
)
self._logger.info("Validated Producer Account Database %s" % data_mesh_database_name)
shared_objects = {
DATABASE_NAME: source_database_name,
'Tables': []
}
for table in all_tables:
table_s3_path = table.get('StorageDescriptor').get('Location')
table_s3_arn = utils.convert_s3_path_to_arn(table_s3_path)
# create a data lake location for the s3 path
try:
data_mesh_lf_client.register_resource(
ResourceArn=table_s3_arn,
UseServiceLinkedRole=True
)
except data_mesh_lf_client.exceptions.AlreadyExistsException:
pass
# grant data lake location access
producer_central_role_arn = utils.get_role_arn(account_id=self._data_mesh_account_id,
role_name=utils.get_central_role_name(
account_id=self._data_producer_account_id,
type=PRODUCER))
data_mesh_lf_client.grant_permissions(
Principal={
'DataLakePrincipalIdentifier': producer_central_role_arn
},
Resource={
'DataLocation': {'ResourceArn': table_s3_arn}
},
Permissions=['DATA_LOCATION_ACCESS']
)
# create a mesh table for the data product table
created_table = self._create_mesh_table(
table_def=table,
data_mesh_glue_client=data_mesh_glue_client,
source_database_name=source_database_name,
data_mesh_database_name=data_mesh_database_name,
producer_account_id=self._data_producer_account_id,
data_mesh_account_id=self._data_mesh_account_id,
create_public_metadata=create_public_metadata,
expose_table_references_with_suffix=expose_table_references_with_suffix,
use_original_table_name=use_original_table_name
)
# grant the mesh permissions to describe and select from the table
self._mesh_automator.lf_grant_permissions(
data_mesh_account_id=self._data_mesh_account_id,
principal=self._data_mesh_arn,
database_name=data_mesh_database_name,
table_name=table.get('Name'),
permissions=['DESCRIBE', 'SELECT'],
grantable_permissions=None
)
self._logger.info(
f"Granted describe access on Table {table.get('Name')} to Data Mesh {self._data_mesh_account_id}")
shared_objects.get('Tables').append({
'SourceTable': created_table[0],
'LinkTable': created_table[1]
})
# propagate lakeformation tags and attach to table
if 'Tags' in table:
for tag in table.get('Tags').items():
self._mesh_automator.attach_tag(database=data_mesh_database_name, table=table.get('Name'), tag=tag)
# add the domain tag
if domain is not None:
self._mesh_automator.attach_tag(
database=data_mesh_database_name,
table=table.get('Name'),
tag=(DOMAIN_TAG_KEY, {'TagValues': [domain], 'ValidValues': [domain]})
)
# add the data product tag
if data_product_name is not None:
self._mesh_automator.attach_tag(
database=data_mesh_database_name,
table=table.get('Name'),
tag=(DATA_PRODUCT_TAG_KEY, {'TagValues': [data_product_name], 'ValidValues': [data_product_name]})
)
# add a bucket policy entry allowing the data mesh lakeformation service linked role to perform GetObject*
table_bucket = table_s3_path.split("/")[2]
self._producer_automator.add_bucket_policy_entry(
principal_account=self._data_mesh_account_id,
access_path=table_bucket
)
if sync_mesh_catalog_schedule is not None:
glue_crawler = self._producer_automator.create_crawler(
database_name=data_mesh_database_name,
table_name=created_table,
s3_location=table_s3_path,
crawler_role=sync_mesh_crawler_role_arn,
sync_schedule=sync_mesh_catalog_schedule
)
return shared_objects
def get_data_product(self, database_name: str, table_name_regex: str):
# generate a new glue client for the data mesh account
data_mesh_glue_client = utils.generate_client('glue', region=self._current_region,
credentials=self._data_mesh_credentials)
# grab the tables that match the regex
all_tables = self._load_glue_tables(
glue_client=data_mesh_glue_client,
catalog_id=self._data_mesh_account_id,
source_db_name=self._make_database_name(database_name),
table_name_regex=table_name_regex
)
response = []
for t in all_tables:
response.append({"Database": t.get('DatabaseName'), "TableName": t.get('Name'),
"Location": t.get('StorageDescriptor').get("Location")})
return response
def list_pending_access_requests(self):
'''
Lists all access requests that have been made by potential consumers. Pending requests can be approved or denied
with close_access_request()
:return:
'''
me = self._sts_client.get_caller_identity().get('Account')
return self._subscription_tracker.list_subscriptions(owner_id=me, request_status=STATUS_PENDING)
def approve_access_request(self, request_id: str,
grant_permissions: list = None,
grantable_permissions: list = None,
decision_notes: str = None):
'''
API to close an access request as approved. Approvals must be accompanied by the
permissions to grant to the specified principal.
:param request_id:
:param grant_permissions:
:param grantable_permissions
:param decision_notes:
:return:
'''
# load the subscription
subscription = self._subscription_tracker.get_subscription(subscription_id=request_id)
# validate types provided for grants and grantable
grant_perms = utils.ensure_list(grant_permissions)
grantable_perms = utils.ensure_list(grantable_permissions)
if subscription is None:
raise Exception(f"Unable to resolve Subscription {request_id}")
elif subscription.get(STATUS) == STATUS_ACTIVE:
raise Exception(f"Subscription is already Active")
# approver can override the requested grants
if grant_perms is None or grant_perms == []:
set_permissions = subscription.get(REQUESTED_GRANTS)
else:
set_permissions = grant_perms
# grant the approved permissions in lake formation
data_mesh_lf_client = utils.generate_client(service='lakeformation', region=self._current_region,
credentials=self._data_mesh_credentials)
tables = subscription.get(TABLE_NAME)
ram_shares = {}
table_arns = []
table_list = []
if tables is not None:
if isinstance(tables, list):
table_list = tables
else:
table_list = [tables]
# grant describe on the database
self._mesh_automator.lf_grant_permissions(
data_mesh_account_id=self._data_mesh_account_id,
principal=subscription.get(SUBSCRIBER_PRINCIPAL),
database_name=subscription.get(DATABASE_NAME),
permissions=['DESCRIBE'],
grantable_permissions=None
)
for t in table_list:
# confirm that the requested object exists
self._mesh_automator.describe_table(database_name=subscription.get(DATABASE_NAME), table_name=t)
# resolve the original database name
original_db = subscription.get(DATABASE_NAME).replace(f"-{self._data_producer_account_id}", "")
# get the catalog definition of this table including if its a regex subscription
all_tables = self._producer_automator.load_glue_tables(
catalog_id=self._data_producer_account_id,
source_db_name=original_db,
table_name_regex=t,
load_lf_tags=False
)
for resolved_table in all_tables:
table_name = resolved_table.get('Name')
# get the data location for the table
table_s3_path = resolved_table.get('StorageDescriptor').get('Location')
# add a bucket policy entry allowing the consumer lakeformation service linked role to perform GetObject*
table_bucket = table_s3_path.split("/")[2]
self._producer_automator.add_bucket_policy_entry(
principal_account=subscription.get(SUBSCRIBER_PRINCIPAL),
access_path=table_bucket
)
# grant validated permissions to object
self._mesh_automator.lf_grant_permissions(
data_mesh_account_id=self._data_mesh_account_id,
principal=subscription.get(SUBSCRIBER_PRINCIPAL),
database_name=subscription.get(DATABASE_NAME),
table_name=table_name,
permissions=set_permissions,
grantable_permissions=grantable_perms
)
rs = utils.load_ram_shares(lf_client=data_mesh_lf_client,
data_mesh_account_id=self._data_mesh_account_id,
database_name=subscription.get(DATABASE_NAME), table_name=t,
target_principal=subscription.get(SUBSCRIBER_PRINCIPAL))
ram_shares.update(rs)
# add the shared table arn to the list of ARNs
table_arns.append(utils.get_table_arn(region_name=self._current_region,
catalog_id=self._data_mesh_account_id,
database_name=subscription.get(DATABASE_NAME),
table_name=table_name))
self._logger.info("Subscription RAM Shares")
self._logger.info(ram_shares)
# apply a glue catalog resource policy allowing the consumer to access objects by tag
self._add_principal_to_glue_resource_policy(
database_name=subscription.get(DATABASE_NAME),
tables=subscription.get(TABLE_ARNS),
add_principal=subscription.get(SUBSCRIBER_PRINCIPAL)
)
# update the subscription to reflect the changes
self._subscription_tracker.update_status(
subscription_id=request_id, status=STATUS_ACTIVE,
permitted_grants=grant_perms, grantable_grants=grantable_perms, notes=decision_notes,
ram_shares=ram_shares, table_arns=table_arns
)
def _add_principal_to_glue_resource_policy(self, database_name: str, tables: list, add_principal: str):
self._mesh_automator.add_tbac_glue_catalog_resource_policy(
region=self._current_region,
database_name=database_name,
tables=tables,
producer_account_id=self._data_mesh_account_id,
consumer_account_id=add_principal
)
def deny_access_request(self, request_id: str,
decision_notes: str = None):
'''
API to close an access request as denied. The reason for the denial should be included in decision_notes.
:param request_id:
:param decision_notes:
:return:
'''
return self._subscription_tracker.update_status(
subscription_id=request_id, status=STATUS_DENIED,
notes=decision_notes
)
def update_subscription_permissions(self, subscription_id: str, grant_permissions: list, notes: str,
grantable_permissions: list = None):
'''
Update the permissions on a subscription
:param subscription_id:
:param grant_permissions:
:param notes:
:return:
'''
subscription = self._subscription_tracker.get_subscription(subscription_id)
# validate types provided for grants and grantable
grant_perms = utils.ensure_list(grant_permissions)
grantable_perms = utils.ensure_list(grantable_permissions)
current_permissions = subscription.get(PERMITTED_GRANTS)
current_grantable_permissions = subscription.get(GRANTABLE_GRANTS)
if current_grantable_permissions is None:
current_grantable_permissions = []
# calculate the permissions to be added
perms_to_add = list(set(grant_perms) - set(current_permissions))
grantable_perms_to_add = []
if len(grantable_perms or '') > 0:
grantable_perms_to_add = list(set(grantable_perms) - set(current_grantable_permissions))
# cant add grantable permissions without granting them first
if len(perms_to_add or '') == 0 and len(grantable_perms_to_add or '') > 0:
perms_to_add = grantable_perms_to_add
permissions_modified = 0
if len(perms_to_add or '') > 0:
permissions_modified += self._mesh_automator.lf_batch_grant_permissions(
data_mesh_account_id=self._data_mesh_account_id,
target_account_id=subscription.get(SUBSCRIBER_PRINCIPAL),
database_name=subscription.get(DATABASE_NAME),
table_list=subscription.get(TABLE_NAME),
permissions=perms_to_add,
grantable_permissions=grantable_perms_to_add
)
# modify the current permissions to reflect the state of the addition
current_permissions.extend(perms_to_add)
current_grantable_permissions.extend(grantable_perms_to_add)
# calculate the permissions to be removed
perms_to_remove = list(set(current_permissions) - set(grant_perms))
grantable_perms_to_remove = list(set(current_grantable_permissions) - set(grantable_perms))
# revoke permissions at the lakeformation level
if len(perms_to_remove or '') > 0:
permissions_modified += self._mesh_automator.lf_batch_revoke_permissions(
data_mesh_account_id=self._data_mesh_account_id,
consumer_account_id=subscription.get(SUBSCRIBER_PRINCIPAL),
database_name=subscription.get(DATABASE_NAME),
table_list=subscription.get(TABLE_NAME),
permissions=perms_to_remove,
grantable_permissions=grantable_perms_to_remove
)
self._subscription_tracker.update_grants(
subscription_id=subscription_id,
permitted_grants=grant_perms,
grantable_grants=grantable_perms,
notes=notes
)
self._logger.info(f"Modified {permissions_modified} Permissions")
def get_subscription(self, request_id: str) -> dict:
return self._subscription_tracker.get_subscription(subscription_id=request_id)
def delete_subscription(self, subscription_id: str, reason: str):
'''
Soft delete a subscription
:param subscription_id:
:param reason:
:return:
'''
subscription = self.get_subscription(request_id=subscription_id)
if subscription is None:
raise Exception("No Subscription Found")
else:
lf_client = self._data_mesh_session.client('lakeformation')
entries = []
# generate the list of permissions at table and column level
for t in subscription.get(TABLE_NAME):
entries.extend(self._mesh_automator.create_lf_permissions_entry(
data_mesh_account_id=self._data_mesh_account_id,
target_account_id=subscription.get(SUBSCRIBER_PRINCIPAL),
database_name=subscription.get(DATABASE_NAME),
table_name=t,
permissions=subscription.get(PERMITTED_GRANTS),
grantable_permissions=subscription.get(GRANTABLE_GRANTS),
target_batch=True
))
# add the database DESCRIBE grant
entries.extend(self._mesh_automator.create_lf_permissions_entry(
data_mesh_account_id=self._data_mesh_account_id,
target_account_id=subscription.get(SUBSCRIBER_PRINCIPAL),
database_name=subscription.get(DATABASE_NAME),
permissions=['DESCRIBE'],
grantable_permissions=['DESCRIBE'],
target_batch=True
))
lf_client.batch_revoke_permissions(
Entries=entries
)
return self._subscription_tracker.delete_subscription(subscription_id=subscription_id, reason=reason)
|
433662
|
from enum import Enum
class PublicMethods(Enum):
"""
Публичные методы, которые не требуют шифрования при запросе
"""
KEY_INFO = 'key_info'
I_CLIENT_PUB_KEY = 'iclient_pub_key'
class AsymEncryptionHandShake(Enum):
ASYM_HAND_SHAKE = 'asym_hand_shake'
|
433694
|
from multiprocessing import Pool
import numpy as np
from mmcv.utils import print_log
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from mmdet.core.evaluation.mean_ap import get_cls_results
def calc_tpfpfn(det_bboxes, gt_bboxes, iou_thr=0.5):
"""Check if detected bboxes are true positive or false positive and if gt bboxes are false negative.
Args:
det_bboxes (ndarray): Detected bboxes of this image, of shape (m, 5).
gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
iou_thr (float): IoU threshold to be considered as matched.
Default: 0.5.
Returns:
float: (tp, fp, fn).
"""
num_dets = det_bboxes.shape[0]
num_gts = gt_bboxes.shape[0]
tp = 0
fp = 0
# if there is no gt bboxes in this image, then all det bboxes
# within area range are false positives
if num_gts == 0:
fp = num_dets
return tp, fp, 0
ious: np.ndarray = bbox_overlaps(det_bboxes, gt_bboxes)
# sort all dets in descending order by scores
sort_inds = np.argsort(-det_bboxes[:, -1])
gt_covered = np.zeros(num_gts, dtype=bool)
for i in sort_inds:
uncovered_ious = ious[i, gt_covered == 0]
if len(uncovered_ious):
iou_argmax = uncovered_ious.argmax()
iou_max = uncovered_ious[iou_argmax]
if iou_max >= iou_thr:
gt_covered[[x[iou_argmax] for x in np.where(gt_covered == 0)]] = True
tp += 1
else:
fp += 1
else:
fp += 1
fn = (gt_covered == 0).sum()
return tp, fp, fn
def kaggle_map(
det_results, annotations, iou_thrs=(0.5, 0.55, 0.6, 0.65, 0.7, 0.75), logger=None, n_jobs=4, by_sample=False
):
"""Evaluate kaggle mAP of a dataset.
Args:
det_results (list[list]): [[cls1_det, cls2_det, ...], ...].
The outer list indicates images, and the inner list indicates
per-class detected bboxes.
annotations (list[dict]): Ground truth annotations where each item of
the list indicates an image. Keys of annotations are:
- `bboxes`: numpy array of shape (n, 4)
- `labels`: numpy array of shape (n, )
- `bboxes_ignore` (optional): numpy array of shape (k, 4)
- `labels_ignore` (optional): numpy array of shape (k, )
iou_thrs (list): IoU thresholds to be considered as matched.
Default: (0.5, 0.55, 0.6, 0.65, 0.7, 0.75).
logger (logging.Logger | str | None): The way to print the mAP
summary. See `mmdet.utils.print_log()` for details. Default: None.
n_jobs (int): Processes used for computing TP, FP and FN.
Default: 4.
by_sample (bool): Return AP by sample.
Returns:
tuple: (mAP, [dict, dict, ...])
"""
assert len(det_results) == len(annotations)
num_imgs = len(det_results)
num_classes = len(det_results[0]) # positive class num
pool = Pool(n_jobs)
eval_results = []
for i in range(num_classes):
# get gt and det bboxes of this class
cls_dets, cls_gts, _ = get_cls_results(det_results, annotations, i)
# compute tp and fp for each image with multiple processes
aps_by_thrs = []
aps_by_sample = np.zeros(num_imgs)
for iou_thr in iou_thrs:
tpfpfn = pool.starmap(calc_tpfpfn, zip(cls_dets, cls_gts, [iou_thr for _ in range(num_imgs)]))
iou_thr_aps = np.array([tp / (tp + fp + fn) for tp, fp, fn in tpfpfn])
if by_sample:
aps_by_sample += iou_thr_aps
aps_by_thrs.append(np.mean(iou_thr_aps))
eval_results.append(
{
"num_gts": len(cls_gts),
"num_dets": len(cls_dets),
"ap": np.mean(aps_by_thrs),
"ap_by_sample": None if not by_sample else aps_by_sample / len(iou_thrs),
}
)
pool.close()
aps = []
for cls_result in eval_results:
if cls_result["num_gts"] > 0:
aps.append(cls_result["ap"])
mean_ap = np.array(aps).mean().item() if aps else 0.0
print_log(f"\nKaggle mAP: {mean_ap}", logger=logger)
return mean_ap, eval_results
|
433719
|
class Database:
database = None
def __init__(self):
self.content = "User Data"
@staticmethod
def getInstance():
if not Database.database:
print("New database instance created")
Database.database = Database()
return Database.database
def describe(self):
print("The database contain:", self.content)
|
433811
|
from datetime import datetime
import pytz
from django.test.testcases import TestCase
from robber.expect import expect
from freezegun import freeze_time
from analytics import constants
from analytics.factories import AttachmentTrackingFactory
from data.cache_managers import allegation_cache_manager
from data.constants import MEDIA_TYPE_DOCUMENT, MEDIA_TYPE_AUDIO, MEDIA_TYPE_VIDEO
from data.factories import AllegationFactory, AttachmentFileFactory, OfficerAllegationFactory, AllegationCategoryFactory
from cr.queries import LatestDocumentsQuery
class LatestDocumentsQueryTestCase(TestCase):
@freeze_time('2019-04-03')
def test_execute(self):
allegation_1 = AllegationFactory(crid='123')
allegation_2 = AllegationFactory(crid='456')
allegation_3 = AllegationFactory(crid='789')
allegation_4 = AllegationFactory(crid='321')
allegation_5 = AllegationFactory(crid='987')
allegation_category_1 = AllegationCategoryFactory(id=1)
allegation_category_12 = AllegationCategoryFactory(id=2)
OfficerAllegationFactory(allegation=allegation_1, allegation_category=allegation_category_1)
OfficerAllegationFactory(allegation=allegation_1, allegation_category=allegation_category_1)
OfficerAllegationFactory(allegation=allegation_1, allegation_category=allegation_category_12)
attachment_file_1 = AttachmentFileFactory(
allegation=allegation_1,
title='CR document 1',
id=1,
tag='CR',
url='http://cr-document.com/1',
file_type=MEDIA_TYPE_DOCUMENT,
preview_image_url='http://preview.com/url1',
external_created_at=datetime(2019, 1, 19, 12, 1, 1, tzinfo=pytz.utc)
)
AttachmentFileFactory(
allegation=allegation_1,
title='CR document 2',
id=2,
tag='CR',
url='http://cr-document.com/2',
file_type=MEDIA_TYPE_DOCUMENT,
external_created_at=datetime(2019, 1, 14, 10, 12, 1, tzinfo=pytz.utc)
)
attachment_file_2 = AttachmentFileFactory(
allegation=allegation_2,
title='CR document 3',
id=3,
tag='CR',
url='http://cr-document.com/3',
file_type=MEDIA_TYPE_DOCUMENT,
preview_image_url='http://preview.com/url3',
external_created_at=datetime(2019, 1, 15, 9, 3, 1, tzinfo=pytz.utc)
)
AttachmentFileFactory(
allegation=allegation_2,
title='CR document 4',
id=4,
tag='OCIR',
url='http://cr-document.com/4',
file_type=MEDIA_TYPE_DOCUMENT,
preview_image_url='http://preview.com/url4',
external_created_at=datetime(2019, 1, 19, 17, 12, 5, tzinfo=pytz.utc)
)
with freeze_time(datetime(2019, 1, 20, 13, 2, 15, tzinfo=pytz.utc)):
AttachmentFileFactory(
allegation=allegation_2,
title='CR document 5',
id=5,
tag='AR',
url='http://cr-document.com/5',
file_type=MEDIA_TYPE_DOCUMENT,
preview_image_url='http://preview.com/url5',
external_created_at=None
)
AttachmentFileFactory(
allegation=allegation_3,
title='CR document 6',
id=6,
tag='CR',
url='http://cr-document.com/6',
file_type=MEDIA_TYPE_AUDIO,
preview_image_url='http://preview.com/url6',
external_created_at=datetime(2019, 1, 21, 6, 4, 12, tzinfo=pytz.utc)
)
AttachmentFileFactory(
allegation=allegation_3,
title='CR document 7',
id=7,
tag='CR',
url='http://cr-document.com/7',
file_type=MEDIA_TYPE_VIDEO,
preview_image_url='http://preview.com/url7',
external_created_at=datetime(2019, 1, 22, 4, 9, 12, tzinfo=pytz.utc)
)
attachment_file_3 = AttachmentFileFactory(
title='Tracking document 1',
id=8,
tag='CR',
url='http://cr-document.com/8',
file_type=MEDIA_TYPE_DOCUMENT,
preview_image_url='http://preview.com/url8',
allegation=allegation_4,
external_created_at=datetime(2014, 9, 14, 12, 0, 1, tzinfo=pytz.utc)
)
attachment_file_4 = AttachmentFileFactory(
title='Tracking document 2',
id=9,
tag='CR',
url='http://cr-document.com/9',
file_type=MEDIA_TYPE_DOCUMENT,
preview_image_url='http://preview.com/url9',
allegation=allegation_4,
external_created_at=datetime(2015, 9, 14, 12, 0, 1, tzinfo=pytz.utc)
)
AttachmentFileFactory(
title='Not appear attachment',
id=10,
tag='CR',
url='http://cr-document.com/10',
file_type=MEDIA_TYPE_DOCUMENT,
preview_image_url='http://preview.com/url10',
allegation=allegation_4,
external_created_at=datetime(2015, 6, 13, 12, 0, 1, tzinfo=pytz.utc)
)
attachment_file_5 = AttachmentFileFactory(
title='Tracking document 3',
id=11,
tag='CR',
url='http://cr-document.com/11',
file_type=MEDIA_TYPE_DOCUMENT,
preview_image_url='http://preview.com/url11',
allegation=allegation_5,
external_created_at=datetime(2015, 9, 14, 12, 0, 1, tzinfo=pytz.utc)
)
# Should not have this in result because show = False
AttachmentFileFactory(
allegation=allegation_1,
title='CR document 12',
id=12,
tag='CR',
url='http://cr-document.com/12',
file_type=MEDIA_TYPE_DOCUMENT,
preview_image_url='http://preview.com/url12',
external_created_at=datetime(2015, 9, 14, 12, 0, 1, tzinfo=pytz.utc),
show=False
)
# Should still count but not 1st row because is attached to a download event
attachment_file_6 = AttachmentFileFactory(
title='Attachment not appear because is download event',
id=13,
tag='CR',
url='http://cr-document.com/13',
file_type=MEDIA_TYPE_DOCUMENT,
preview_image_url='http://preview.com/url13',
allegation=allegation_4,
external_created_at=datetime(2015, 7, 13, 12, 0, 1, tzinfo=pytz.utc)
)
with freeze_time(datetime(2019, 1, 17, 12, 0, 1, tzinfo=pytz.utc)):
AttachmentTrackingFactory(attachment_file=attachment_file_3)
with freeze_time(datetime(2019, 1, 18, 12, 0, 1, tzinfo=pytz.utc)):
AttachmentTrackingFactory(attachment_file=attachment_file_4)
with freeze_time(datetime(2019, 1, 12, 12, 0, 1, tzinfo=pytz.utc)):
AttachmentTrackingFactory(attachment_file=attachment_file_5)
with freeze_time(datetime(2019, 10, 14, 12, 0, 1, tzinfo=pytz.utc)):
AttachmentTrackingFactory(
attachment_file=attachment_file_6,
kind=constants.DOWNLOAD_EVENT_TYPE)
allegation_cache_manager.cache_data()
results = LatestDocumentsQuery.execute(5)
expected_results = [
{
'id': attachment_file_1.id,
'allegation_id': '123',
'last_active_at': datetime(2019, 1, 19, 12, 1, 1, tzinfo=pytz.utc),
},
{
'id': attachment_file_4.id,
'allegation_id': '321',
'last_active_at': datetime(2019, 1, 18, 12, 0, 1, tzinfo=pytz.utc),
},
{
'id': attachment_file_2.id,
'allegation_id': '456',
'last_active_at': datetime(2019, 1, 15, 9, 3, 1, tzinfo=pytz.utc),
},
{
'id': attachment_file_5.id,
'allegation_id': '987',
'last_active_at': datetime(2019, 1, 12, 12, 0, 1, tzinfo=pytz.utc),
},
]
expect(len(results)).to.eq(len(expected_results))
for index, attachment_data in enumerate(results):
expected_attachment_data = expected_results[index]
expect(attachment_data.id).to.eq(expected_attachment_data['id'])
expect(attachment_data.allegation_id).to.eq(expected_attachment_data['allegation_id'])
expect(attachment_data.last_active_at).to.eq(expected_attachment_data['last_active_at'])
|
433821
|
from TeamPromoConnectionFactory import TeamPromoConnectionFactory
from TeamAssessmentConnectionFactory import TeamAssessmentConnectionFactory
# Testing factory for Promo Team
currentFactory = TeamPromoConnectionFactory()
RPCConnection = currentFactory.createRPCConnection()
HTTPConnection = currentFactory.createHTTPConnection()
RPCConnection.callProcedure()
HTTPConnection.callAPI()
# Testing factory for Assessment Team
currentFactory = TeamAssessmentConnectionFactory()
RPCConnection = currentFactory.createRPCConnection()
HTTPConnection = currentFactory.createHTTPConnection()
RPCConnection.callProcedure()
HTTPConnection.callAPI()
|
433859
|
from typing import Tuple
import gdsfactory as gf
from gdsfactory.geometry import check_width
def test_wmin_failing(layer: Tuple[int, int] = (1, 0)) -> None:
w = 50
min_width = 50 + 10 # component edges are smaller than min_width
c = gf.components.rectangle(size=(w, w), layer=layer)
gdspath = c.write_gds("wmin.gds")
# r = check_width(gdspath, min_width=min_width, layer=layer)
# print(check_width(gdspath, min_width=min_width, layer=layer))
assert check_width(gdspath, min_width=min_width, layer=layer) == 2
assert check_width(c, min_width=min_width, layer=layer) == 2
def test_wmin_passing(layer: Tuple[int, int] = (1, 0)) -> None:
w = 50
min_width = 50 - 10 # component edges are bigger than the min_width
c = gf.components.rectangle(size=(w, w), layer=layer)
gdspath = c.write_gds("wmin.gds")
# print(check_width(c, min_width=min_width, layer=layer))
# assert check_width(gdspath, min_width=min_width, layer=layer) is None
# assert check_width(c, min_width=min_width, layer=layer) is None
assert check_width(gdspath, min_width=min_width, layer=layer) == 0
assert check_width(c, min_width=min_width, layer=layer) == 0
if __name__ == "__main__":
# test_wmin_failing()
test_wmin_passing()
|
433860
|
from django.conf.urls import patterns, url
from contact_updater.views import prepopulate_agency, form_index
urlpatterns = patterns(
'',
url(r'^$', form_index, name='contact_updater_index'),
url(r'^(?P<slug>[-\w]+)/?$',
prepopulate_agency,
name='contact_updater_form'),
)
|
433889
|
import torch
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_utils import dtype2prec_DONTUSE
from depthwise_conv3d import DepthwiseConv3d
class TestConv(TestCase):
def test_Conv3d_depthwise_naive_groups_cuda(self, dtype=torch.float):
for depth_multiplier in [1, 2]:
m = DepthwiseConv3d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to("cuda", dtype)
i = torch.randn(2, 2, 6, 6, 6, device="cuda", dtype=dtype).div_(2).requires_grad_()
output = m(i)
grad_output = torch.randn(2, 2 * depth_multiplier, 4, 4, 4, device="cuda", dtype=dtype) / 2
output.backward(grad_output)
offset = 1 * depth_multiplier
m1 = DepthwiseConv3d(1, 1 * depth_multiplier, kernel_size=3).to("cuda", dtype)
m1.weight.data = m.weight.data[:offset].clone()
m1.bias.data = m.bias.data[:offset].clone()
i1 = i.detach()[:, :1].clone().requires_grad_()
output1 = m1(i1)
output1.backward(grad_output[:, :offset].contiguous())
m2 = DepthwiseConv3d(1, 1 * depth_multiplier, kernel_size=3).to("cuda", dtype)
m2.weight.data.copy_(m.weight.data[offset:])
m2.bias.data.copy_(m.bias.data[offset:])
i2 = i.detach()[:, 1:].clone().requires_grad_()
output2 = m2(i2)
output2.backward(grad_output[:, offset:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data,
m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data,
m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype], rtol=0)
if __name__ == '__main__':
test = TestConv()
# test.test_Conv3d_depthwise_naive_groups_cuda()
test.grad_check()
|
433942
|
import pickle
import json
def v2d(names: list):
return {v: eval(v) for v in names}
def save_pickle(file_name: str, object):
with open(file_name, 'wb') as f:
pickle.dump(object, f)
def load_pickle(file_name: str):
with open(file_name, 'rb') as f:
return pickle.load(f)
def load_json(file: str):
with open(file) as json_file:
d = json.load(json_file)
return d
|
433950
|
def extractJunktranslatesWordpressCom(item):
'''
Parser for 'junktranslates.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('intl', 'It’s Not Too Late to Meet Again After Rebirth', 'translated'),
('smwom', 'Second Marriage of a Wealthy Old Man', 'translated'),
('rolm', 'Reborn Out of Love and Murder', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
433952
|
import os
from typing import List, Tuple, Optional
import json
import pathlib
import tarfile
from contextlib import closing
from requests import Response, get
from autocorrect import Speller
from sentry_sdk import capture_exception
from rozental_as_a_service.common_types import TypoInfo
from rozental_as_a_service.config import YA_SPELLER_REQUEST_TIMEOUTS, YA_SPELLER_RETRIES_COUNT
from rozental_as_a_service.db_utils import save_ya_speller_results_to_db, get_ya_speller_cache_from_db
PATH = os.path.abspath(os.path.dirname(__file__))
def process_with_vocabulary(
words: List[str],
vocabulary_path: Optional[str],
) -> Tuple[List[str], List[TypoInfo], List[str]]:
if vocabulary_path is None or not os.path.exists(vocabulary_path):
return [], [], words
with open(vocabulary_path, encoding='utf8') as file_handler:
raw_vocabulary = file_handler.readlines()
vocabulary = {w.strip().lower() for w in raw_vocabulary if not w.strip().startswith('#')}
correct_words = {w for w in words if w in vocabulary}
return list(correct_words), [], [w for w in words if w not in correct_words]
class YaSpellerBackend:
def __init__(self, db_path: Optional[str]):
self.db_path = db_path
def __call__(self, words: List[str]) -> Tuple[List[str], List[TypoInfo], List[str]]:
sure_correct_words, incorrect_typos_info, unknown = self._process_with_db_cache(
words,
)
(
sure_correct_words_from_ya,
incorrect_typos_info_from_ya,
unknown,
) = self._process_with_ya_speller(unknown)
return (
sure_correct_words + sure_correct_words_from_ya,
incorrect_typos_info + incorrect_typos_info_from_ya,
unknown,
)
def _process_with_db_cache(
self,
words: List[str],
) -> Tuple[List[str], List[TypoInfo], List[str]]:
if self.db_path is None:
return [], [], words
words_cache = get_ya_speller_cache_from_db(words, self.db_path)
sure_correct_words: List[str] = []
incorrect_typos_info: List[TypoInfo] = []
for word in words:
if word not in words_cache:
continue
cached_value = words_cache[word]
if cached_value is None:
sure_correct_words.append(word)
else:
incorrect_typos_info.append(
{
'original': word,
'possible_options': cached_value,
},
)
known_words = set(
sure_correct_words + [t['original'] for t in incorrect_typos_info],
)
return sure_correct_words, incorrect_typos_info, list(set(words) - known_words)
def _process_with_ya_speller(
self,
words: List[str],
) -> Tuple[List[str], List[TypoInfo], List[str]]:
if not words:
return [], [], words
for _ in range(YA_SPELLER_RETRIES_COUNT):
try:
response = get(
'https://speller.yandex.net/services/spellservice.json/checkTexts',
params={'text': words},
timeout=YA_SPELLER_REQUEST_TIMEOUTS,
)
except TimeoutError as e:
capture_exception(e)
else:
break
return ([], *_process_ya_speller_response(response, words, self.db_path))
def _process_ya_speller_response(
response: Response,
words: List[str],
db_path: Optional[str],
) -> Tuple[List[TypoInfo], List[str]]:
typos_info: List[TypoInfo] = []
speller_result = response.json()
if speller_result:
for word_info in speller_result:
if word_info and word_info[0]['s']:
typos_info.append({
'original': word_info[0]['word'],
'possible_options': word_info[0]['s'],
})
if db_path is not None:
save_ya_speller_results_to_db(speller_result, words, db_path)
typo_words = {t['original'] for t in typos_info}
return typos_info, [w for w in words if w not in typo_words]
class AutocorrectCheckerBackend:
def __init__(self) -> None:
archive_path = pathlib.Path(PATH, 'data', 'ru.tar.gz')
with closing(tarfile.open(archive_path, 'r:gz')) as tarf, closing(tarf.extractfile('word_count.json')) as file:
nlp_data = json.load(file) # type: ignore
self.checker = Speller('ru', fast=True, nlp_data=nlp_data)
def __call__(self, words: List[str]) -> Tuple[List[str], List[TypoInfo], List[str]]:
incorrect_typos_info: List[TypoInfo] = []
known: List[str] = []
unknown: List[str] = []
for word in words:
if self.checker.existing([word]):
known.append(word)
continue
candidates = [
candidate[1] for candidate in sorted(
self.checker.get_candidates(word), key=lambda item: item[0],
)
]
if word == candidates[0]:
known.append(word)
continue
incorrect_typos_info.append(
{
'original': word,
'possible_options': candidates,
},
)
return known, incorrect_typos_info, unknown
|
433991
|
from amuse.support.interface import InCodeComponentImplementation
from amuse.test.amusetest import TestWithMPI
from amuse.test import compile_tools
import os
import time
import shlex
from amuse.units import nbody_system
from amuse.units import units
from amuse import datamodel
from amuse.rfi import channel
from amuse.rfi.core import *
from .test_fortran_implementation import codestring, ForTestingInterface
class ForTesting(InCodeComponentImplementation):
def __init__(self, exefile, **options):
InCodeComponentImplementation.__init__(self, ForTestingInterface(exefile, **options), **options)
class TestInterface(TestWithMPI):
@classmethod
def setup_class(cls):
print("building")
cls.check_can_compile_modules()
cls.check_fortran_version()
cls.check_not_in_mpiexec()
cls.exefile=compile_tools.build_fortran_worker(codestring, cls.get_path_to_results(), ForTestingInterface)
print("done")
@classmethod
def check_fortran_version(self):
pass
@classmethod
def check_not_in_mpiexec(cls):
"""
The tests will fork another process, if the test run
is itself an mpi process, the tests may fail.
For the hydra process manager the tests will fail.
So skip the tests if we detect hydra
"""
if 'HYDI_CONTROL_FD' in os.environ:
return # can run in modern mpiexec.hydra
if 'HYDRA_CONTROL_FD' in os.environ or 'PMI_FD' in os.environ:
cls.skip('cannot run the socket tests under hydra process manager')
def test1(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
int_out, error = instance.echo_int(10)
del instance
self.assertEqual(int_out, 10)
self.assertEqual(error, 0)
def test2(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out, error = instance.echo_double(4.0)
del instance
self.assertEqual(out, 4.0)
self.assertEqual(error, 0)
def test3(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
input = [1,2,3,4]
output, errors = instance.echo_int(input)
del instance
self.assertEqual(len(errors),4)
for actual, expected in zip(output, input):
self.assertEqual(actual, expected)
def test4(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
input = [1.0,2.1,3.3,4.2]
output, errors = instance.echo_double(input)
del instance
self.assertEqual(len(errors),4)
for actual, expected in zip(output, input):
self.assertEqual(actual, expected)
def test5(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out, error = instance.echo_float(4.0)
del instance
self.assertEqual(out, 4.0)
self.assertEqual(error, 0)
def test6(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out, error = instance.echo_string("abc")
del instance
self.assertEqual(error, 0)
self.assertEqual(out, "abc")
def test7(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out, error = instance.echo_string(["abc","def"])
del instance
self.assertEqual(error[0], 0)
self.assertEqual(error[1], 0)
self.assertEqual(out[0], "abc")
self.assertEqual(out[1], "def")
def test8(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out1, out2, error = instance.echo_strings("abc","def")
del instance
self.assertEqual(error, 0)
self.assertEqual(out1, "Abc")
self.assertEqual(out2, "Bef")
def test9(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
str1_out, str2_out, error = instance.echo_strings(["abc", "def"], ["ghi", "jkl"])
del instance
self.assertEqual(error[0], 0)
self.assertEqual(error[1], 0)
self.assertEqual(str1_out[0], "Abc")
self.assertEqual(str1_out[1], "Aef")
self.assertEqual(str2_out[0], "Bhi")
self.assertEqual(str2_out[1], "Bkl")
def test10(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out = instance.return_string("qwerty")
out = instance.return_string("abcdefghi")
instance.stop()
del instance
self.assertEqual(out, "abcdefghi")
def test11(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out, error = instance.hello_string()
del instance
self.assertEqual(out, "hello")
def test12(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out, error = instance.echo_string_fixed_len("abc")
del instance
self.assertEqual(error, 0)
self.assertEqual(out, "abc")
def test13(self):
instance = ForTestingInterface(self.exefile, debugger="none", channel_type="sockets")
(output_ints, error) = instance.echo_array_with_result([4,5,6])
instance.stop()
print(output_ints, error)
self.assertEqual(output_ints[0], 4)
self.assertEqual(output_ints[1], 5)
self.assertEqual(output_ints[2], 6)
self.assertEqual(error[0], -1)
self.assertEqual(error[1], -1)
self.assertEqual(error[2], -1)
def test14(self):
for x in range(4):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
int_out, error = instance.echo_int(10)
instance.stop()
self.assertEqual(int_out, 10)
self.assertEqual(error, 0)
def test15(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
(output_ints, error) = instance.echo_inout_array_with_result([4,5,6])
instance.stop()
self.assertEqual(output_ints[0], 14)
self.assertEqual(output_ints[1], 15)
self.assertEqual(output_ints[2], 16)
self.assertEqual(error[0], 11)
self.assertEqual(error[1], 11)
self.assertEqual(error[2], 11)
def test16(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
(output1, error1) = instance.echo_logical(True)
(output2, error2) = instance.echo_logical(False)
instance.stop()
self.assertEqual(error1, 0)
self.assertEqual(error2, 0)
self.assertTrue(output1)
self.assertFalse(output2)
def test16b(self):
instance = ForTesting(self.exefile, channel_type="sockets")
output = instance.echo_logical([True, True,False, True, False])
self.assertEqual(output, [True, True, False, True, False])
def test16c(self):
instance = ForTesting(self.exefile, redirection="none")
output = instance.echo_logical2([True, True,False, True, False]*1024)
self.assertEqual(output, [True, True, False, True, False]*1024)
def xtest20(self):
#
# TURNED OFF support for redirection,
# by default output is redirected to /dev/null
# if you need file, use the support from your mpi implementation
#
if os.path.exists("pout.000"):
os.remove("pout.000")
if os.path.exists("perr.000"):
os.remove("perr.000")
x = ForTestingInterface(self.exefile, redirect_stderr_file = 'perr', redirect_stdout_file = 'pout', redirection="file", channel_type="sockets")
x.print_string("abc")
x.print_error_string("exex")
x.stop()
time.sleep(0.2)
self.assertTrue(os.path.exists("pout.000"))
with open("pout.000","r") as f:
content = f.read()
self.assertEqual(content.strip(), "abc")
self.assertTrue(os.path.exists("perr.000"))
with open("perr.000","r") as f:
content = f.read()
self.assertEqual(content.strip(), "exex")
x = ForTestingInterface(self.exefile, redirect_stderr_file = 'perr', redirect_stdout_file = 'pout', redirection="file", channel_type="sockets")
x.print_string("def")
x.print_error_string("exex")
x.stop()
time.sleep(0.2)
self.assertTrue(os.path.exists("pout.000"))
with open("pout.000","r") as f:
content = f.read()
self.assertEqual(content.strip(), "abc\n def")
self.assertTrue(os.path.exists("perr.000"))
with open("perr.000","r") as f:
content = f.read()
self.assertEqual(content.strip(), "exex\n exex")
def test35(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
out, error = instance.echo_string(["abc","def"]*100000)
del instance
self.assertEqual(error[0], 0)
self.assertEqual(error[1], 0)
self.assertEqual(out[-2], "abc")
self.assertEqual(out[-1], "def")
def test36(self):
instance = ForTestingInterface(self.exefile, channel_type="sockets")
N=255
out, error = instance.echo_string("a"*N)
del instance
self.assertEqual(error, 0)
self.assertEqual(out, "a"*N)
|
433996
|
import torch.nn as nn
from rpn.utils.eval_utils import binary_accuracy
from rpn.utils.config import dict_to_namedtuple
def compute_bce_loss(logits, labels, weight=None):
loss_fn = nn.BCEWithLogitsLoss(weight=weight, reduction='mean')
loss = loss_fn(logits, labels)
return loss
def log_binary_accuracy_fpfn(preds, labels, summarizer, global_step, prefix):
acc, fp, fn = binary_accuracy(preds, labels)
for i, (fp, fn) in enumerate(zip(fp, fn)):
summarizer.add_scalar(prefix + 'acc/%i/fp' % i, fp, global_step)
summarizer.add_scalar(prefix + 'acc/%i/fn' % i, fn, global_step)
summarizer.add_scalar(prefix + 'acc', acc, global_step)
def masked_symbolic_state_index(symbolic_state, mask):
masked_state = (symbolic_state > 0.5).long().detach()
masked_state.masked_fill_(mask < 0.5, 2)
return masked_state
class Net(nn.Module):
def __init__(self, **kwargs):
super(Net, self).__init__()
c = dict_to_namedtuple(kwargs, name='config')
self._c = kwargs
self.c = c
self.policy_mode = False
self.env = None
self.verbose = kwargs.get('verbose', False)
def forward_batch(self, batch):
raise NotImplementedError
@property
def config(self):
return self._c
@staticmethod
def log_losses(losses, summarizer, global_step, prefix):
for name, loss in losses.items():
summarizer.add_scalar(prefix + name, loss, global_step=global_step)
@staticmethod
def log_outputs(outputs, batch, summarizer, global_step, prefix):
raise NotImplementedError
def inspect(self, batch, env=None):
return None
def policy(self):
self.policy_mode = True
self.eval()
def train(self, mode=True):
super(Net, self).train(mode)
if mode:
self.policy_mode = False
def main():
print()
if __name__ == '__main__':
main()
|
434006
|
from notion.store import RecordStore
def no_save_cache(self, attribute):
return()
def no_load_cache(self, attributes=("_values", "_role", "_collection_row_ids")):
return()
# Prevents the cache from being written and loaded
# Provides a massive speed boost for short lived commands
RecordStore._save_cache = no_save_cache
RecordStore._load_cache = no_load_cache
|
434013
|
from armulator.armv6.opcodes.abstract_opcode import AbstractOpcode
from armulator.armv6.bits_ops import unsigned_sat_q, zero_extend
class Usat16(AbstractOpcode):
def __init__(self, saturate_to, d, n):
super(Usat16, self).__init__()
self.saturate_to = saturate_to
self.d = d
self.n = n
def execute(self, processor):
if processor.condition_passed():
result1, sat1 = unsigned_sat_q(processor.registers.get(self.n)[16:32].int, self.saturate_to)
result2, sat2 = unsigned_sat_q(processor.registers.get(self.n)[0:16].int, self.saturate_to)
processor.registers.set(self.d, zero_extend(result2, 16) + zero_extend(result1, 16))
if sat1 or sat2:
processor.registers.cpsr.set_q(True)
|
434075
|
def test_passed():
assert True
def test_read_params(config, param):
assert config.get(param) == 2
def test_do_nothing():
pass
|
434098
|
import argparse
import torch
from torch import nn
import numpy as np
from tqdm import tqdm
import pdb
import os
import csv
from glob import glob
import math
from torchvision import transforms
from torch.nn import functional as F
from matplotlib import pyplot as plt
import sys
sys.path.append('../retrieval_model')
from utils_retrieval import compute_statistics
import train_retrieval
sys.path.append('../')
from common import requires_grad
if __name__ == '__main__':
from utils_metrics import load_args, normalize, resize
args = load_args()
# assertations
assert 'ckpt_dir' in args.__dict__
assert 'retrieval_model' in args.__dict__
assert 'device' in args.__dict__
assert 'batch_size' in args.__dict__
sys.path.append('../cookgan/')
from generate_batch import BatchGenerator
device = args.device
_, _, txt_encoder, img_encoder, _ = train_retrieval.load_model(args.retrieval_model, device)
requires_grad(txt_encoder, False)
requires_grad(img_encoder, False)
txt_encoder = txt_encoder.eval()
img_encoder = img_encoder.eval()
filename = os.path.join(args.ckpt_dir, 'medR.csv')
# load values that are already computed
computed = []
if os.path.exists(filename):
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
computed += [row[0]]
# prepare to write
f = open(filename, mode='a')
writer = csv.writer(f, delimiter=',')
# find checkpoints
ckpt_paths = glob(os.path.join(args.ckpt_dir, '*.ckpt')) + glob(os.path.join(args.ckpt_dir, '*.pt'))+glob(os.path.join(args.ckpt_dir, '*.pth'))
ckpt_paths = sorted(ckpt_paths)
print('records:', ckpt_paths)
print('computed:', computed)
for ckpt_path in ckpt_paths:
print()
print(f'working on {ckpt_path}')
iteration = os.path.basename(ckpt_path).split('.')[0]
if iteration in computed:
print('already computed')
continue
print('==> computing MedR')
args.ckpt_path = ckpt_path
batch_generator = BatchGenerator(args)
txt_outputs = []
img_outputs = []
with torch.no_grad():
for _ in tqdm(range(1000//args.batch_size+1)):
# generate
txt, fake_img = batch_generator.generate_MedR()
# fake_img: normalize
fake_img = normalize(fake_img)
# fake_img: resize
fake_img = resize(fake_img, size=224)
# retrieve
txt_output, _ = txt_encoder(*txt)
img_output = img_encoder(fake_img)
txt_outputs.append(txt_output.detach().cpu())
img_outputs.append(img_output.detach().cpu())
txt_outputs = torch.cat(txt_outputs, dim=0).numpy()
img_outputs = torch.cat(img_outputs, dim=0).numpy()
retrieved_range = min(txt_outputs.shape[0], 1000)
medR, recalls = compute_statistics(
txt_outputs, img_outputs, retrieved_type='image',
retrieved_range=retrieved_range, verbose=True)
print(f'{iteration}, MedR={medR.mean()}')
writer.writerow ([iteration, medR.mean()])
f.close()
medRs = []
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
medR = float(row[1])
medRs += [medR]
fig = plt.figure(figsize=(6,6))
plt.plot(medRs)
plt.savefig(os.path.join(args.ckpt_dir, 'medR.png'))
|
434115
|
from abc import ABCMeta, abstractmethod
import json
from typing import Any, IO, Optional, Sequence, cast
from pytest_wdl.core import DataDirs, DataManager, DataResolver
from pytest_wdl.utils import ensure_path
from py.path import local
import pytest
from _pytest.fixtures import FixtureRequest
try:
from ruamel import yaml
except ImportError:
yaml = None
def pytest_collection(session: pytest.Session):
"""
Prints an empty line to make the report look slightly better.
"""
print()
def pytest_collect_file(path: local, parent) -> Optional[pytest.File]:
if path.basename.startswith("test") and not path.basename.startswith("test_data."):
if path.ext == ".json":
return JsonWdlTestsModule(path, parent)
elif yaml and path.ext == ".yaml":
return YamlWdlTestsModule(path, parent)
# TODO: the Node API will be changing at some point
# https://docs.pytest.org/en/latest/example/nonpython.html#a-basic-example-for-specifying-tests-in-yaml-files
class WdlTestsModule(pytest.Module, metaclass=ABCMeta):
@abstractmethod
def _load(self, fp: IO) -> dict:
pass
def collect(self):
with self.fspath.open() as inp:
d = self._load(inp)
if "tests" not in d:
raise ValueError(f"Tests file {self.fspath} must contain a 'tests' key")
data = d.get("data")
for spec in d["tests"]:
if "name" not in spec:
raise ValueError("Test case missing 'name' key")
yield TestItem(self, data=data, **spec)
class YamlWdlTestsModule(WdlTestsModule):
def _load(self, fp: IO) -> dict:
yaml_loader = yaml.YAML(typ="safe")
yaml_loader.default_flow_style = False
return yaml_loader.load(fp)
class JsonWdlTestsModule(WdlTestsModule):
def _load(self, fp: IO) -> dict:
return json.load(fp)
class TestItem(pytest.Item):
def __init__(
self,
parent,
data: Optional[dict] = None,
name: Optional[str] = None,
wdl: Optional[str] = None,
inputs: Optional[dict] = None,
expected: Optional[dict] = None,
tags: Optional[Sequence] = None,
**kwargs
):
if not all((name, wdl)):
raise ValueError("Every test must have 'name' and 'wdl' keys")
super().__init__(name, parent)
self._wdl = wdl
self._inputs = inputs
self._expected = expected
self._tags = tags # TODO: add tags as marks
self._workflow_runner_kwargs = kwargs
self._data = data
self._fixture_request = None
def setup(self):
"""
This method is black magic - uses internal pytest APIs to create a
FixtureRequest that can be used to access fixtures in `runtest()`.
Copied from
https://github.com/pytest-dev/pytest/blob/master/src/_pytest/doctest.py.
"""
def func():
pass
self.funcargs = {}
fm = self.session._fixturemanager
self._fixtureinfo = fm.getfixtureinfo(
node=self, func=func, cls=None, funcargs=False
)
self._fixture_request = FixtureRequest(self)
self._fixture_request._fillfixtures()
def runtest(self):
# Get/create DataManager
if self._data:
config = self._fixture_request.getfixturevalue("user_config")
data_resolver = DataResolver(self._data, config)
data_dirs = DataDirs(
ensure_path(self._fixture_request.fspath.dirpath(), canonicalize=True),
function=self.name,
module=None, # TODO: support a top-level key for module name
cls=None, # TODO: support test groupings
)
workflow_data = DataManager(data_resolver, data_dirs)
else:
workflow_data = self._fixture_request.getfixturevalue("workflow_data")
# Build the arguments to workflow_runner
workflow_runner_kwargs = self._workflow_runner_kwargs
# Resolve test data requests in the inputs and outputs
if self._inputs:
workflow_runner_kwargs["inputs"] = _resolve_test_data(
self._inputs, workflow_data
)
if self._expected:
workflow_runner_kwargs["expected"] = _resolve_test_data(
self._expected, workflow_data
)
# Run the test
workflow_runner = self._fixture_request.getfixturevalue("workflow_runner")
return workflow_runner(self._wdl, **workflow_runner_kwargs)
def _resolve_test_data(d: dict, workflow_data: DataManager) -> dict:
def _resolve(val: Any):
if isinstance(val, str):
try:
# See if it's a test data entry
return workflow_data[cast(str, val)]
except FileNotFoundError:
# It's a string literal
return val
elif isinstance(val, dict):
return dict((key, _resolve(value)) for key, value in cast(dict, d).items())
elif isinstance(val, Sequence):
return [_resolve(value) for value in cast(Sequence, val)]
else:
return val
return _resolve(d)
|
434174
|
import datetime
from sqlalchemy import Enum
from sqlalchemy import Column
from sqlalchemy import String
from sqlalchemy import Integer
from sqlalchemy import Boolean
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from sqlalchemy import sql
from .base import Model
__all__ = ["Team", "UserTeam"]
class Team(Model):
"""Team table.
Parameters
----------
name : str
The name of the team.
admin : :class:`ramp_database.model.User`
The admin user of the team.
is_individual : bool
This team is an individual team.
Attributes
----------
id : int
The ID of the table row.
name : str
The name of the team.
admin_id : int
The ID of the admin user.
admin : :class:`ramp_database.model.User`
The admin user instance.
team_events : :class:`ramp_database.model.EventTeam`
A back-reference to the events to which the team is enroll.
"""
__tablename__ = "teams"
id = Column(Integer, primary_key=True)
name = Column(String(20), nullable=False, unique=True)
admin_id = Column(Integer, ForeignKey("users.id"))
admin = relationship(
"User", backref=backref("admined_teams", cascade="all, delete")
)
is_individual = Column(Boolean, default=True, nullable=False)
creation_timestamp = Column(DateTime, nullable=False)
def __init__(self, name, admin, is_individual=True):
self.name = name
self.admin = admin
self.creation_timestamp = datetime.datetime.utcnow()
self.is_individual = is_individual
def __str__(self):
return f"Team({self.name})"
def __repr__(self):
return (
f"Team(name={self.name}, admin_name={self.admin.name}, "
f"is_individual={self.is_individual})"
)
class UserTeam(Model):
"""User to team many-to-many association table.
Parameters
----------
user_id : int
The ID of the user.
team_id : int
The ID of the team.
status: str
The relationship status. One of "asked", "accepted".
Attributes
----------
id : int
The ID of the table row.
update_timestamp : datetime
Last updated timestamp.
"""
__tablename__ = "user_teams"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("users.id"))
user = relationship(
"User", backref=backref("user_user_team", cascade="all, delete")
)
team_id = Column(Integer, ForeignKey("teams.id"))
team = relationship(
"Team", backref=backref("team_user_team", cascade="all, delete")
)
status = Column(Enum("asked", "accepted", name="status"), default="asked")
update_timestamp = Column(
DateTime, onupdate=sql.func.now(), server_default=sql.func.now()
)
def __init__(self, user_id, team_id, status="asked"):
self.user_id = user_id
self.team_id = team_id
self.status = status
def __repr__(self):
return (
f"UserTeam(user_id={self.user_id}, team_id={self.team_id}, "
f"status='{self.status}')"
)
|
434186
|
import torch
from moment_detr.model import build_transformer, build_position_encoding, MomentDETR
def build_inference_model(ckpt_path, **kwargs):
ckpt = torch.load(ckpt_path, map_location="cpu")
args = ckpt["opt"]
if len(kwargs) > 0: # used to overwrite default args
args.update(kwargs)
transformer = build_transformer(args)
position_embedding, txt_position_embedding = build_position_encoding(args)
model = MomentDETR(
transformer,
position_embedding,
txt_position_embedding,
txt_dim=args.t_feat_dim,
vid_dim=args.v_feat_dim,
num_queries=args.num_queries,
input_dropout=args.input_dropout,
aux_loss=args.aux_loss,
contrastive_align_loss=args.contrastive_align_loss,
contrastive_hdim=args.contrastive_hdim,
span_loss_type=args.span_loss_type,
use_txt_pos=args.use_txt_pos,
n_input_proj=args.n_input_proj,
)
model.load_state_dict(ckpt["model"])
return model
|
434201
|
from collections.abc import Mapping, Iterable
from typing import Iterable as IterableType
from jsons._compatibility_impl import get_naked_class
from jsons.deserializers.default_list import default_list_deserializer
def default_iterable_deserializer(
obj: list,
cls: type,
**kwargs) -> Iterable:
"""
Deserialize a (JSON) list into an ``Iterable`` by deserializing all items
of that list. The given obj is assumed to be homogeneous; if the list has a
generic type (e.g. Set[datetime]) then it is assumed that all elements can
be deserialized to that type.
:param obj: The list that needs deserializing to an ``Iterable``.
:param cls: The type, optionally with a generic (e.g. Deque[str]).
:param kwargs: Any keyword arguments.
:return: A deserialized ``Iterable`` (e.g. ``set``) instance.
"""
cls_ = Mapping
if hasattr(cls, '__args__'):
cls_ = IterableType[cls.__args__]
list_ = default_list_deserializer(obj, cls_, **kwargs)
result = list_
naked_cls = get_naked_class(cls)
if not isinstance(result, naked_cls):
result = naked_cls(list_)
return result
|
434247
|
import pandas as pd
import os
from .. import constants as c
def get_short_imagename(imagename):
"""Return image-specific suffix of imagename.
This excludes a possible experiment-specific prefix,
such as 0001_... for trial #1.
"""
splits = imagename.split("_")
if len(splits) > 1:
name = splits[-2:]
if name[0].startswith("n0"):
# ImageNet image: keep n0... prefix
name = name[0] + "_" + name[1]
else:
name = name[1]
else:
name = splits[0]
return name
def read_data(path):
"""Read experimental data from csv file."""
assert os.path.exists(path)
df = pd.read_csv(path)
return df
def read_all_csv_files_from_directory(dir_path):
assert os.path.exists(dir_path)
assert os.path.isdir(dir_path)
df = pd.DataFrame()
for f in sorted(os.listdir(dir_path)):
if f.endswith(".csv"):
df2 = read_data(os.path.join(dir_path, f))
df2.columns = [c.lower() for c in df2.columns]
df = pd.concat([df, df2])
return df
def get_experimental_data(dataset, print_name=False):
"""Read all available data for an experiment."""
if print_name:
print(dataset.name)
experiment_path = os.path.join(c.RAW_DATA_DIR, dataset.name)
assert os.path.exists(experiment_path), experiment_path + " does not exist."
df = read_all_csv_files_from_directory(experiment_path)
df.condition = df.condition.astype(str)
for experiment in dataset.experiments:
if not set(experiment.data_conditions).issubset(set(df.condition.unique())):
print(set(e.data_conditions))
print(set(df.condition.unique()))
raise ValueError("Condition mismatch")
df = df.copy()
df["image_id"] = df["imagename"].apply(get_short_imagename)
return df
def crop_pdfs_in_directory(dir_path, suppress_output=True):
"""Crop all PDF plots in a directory (removing white borders),
Args:
dir_path: path to directory
"""
assert os.path.exists(dir_path)
assert os.path.isdir(dir_path)
x = ""
if suppress_output:
x = " > /dev/null"
for file in sorted(os.listdir(dir_path)):
if file.endswith(".pdf"):
fullpath = os.path.join(dir_path, file)
os.system("pdfcrop " + fullpath + " " + fullpath + x)
##################################################################
# QUICK TESTS
##################################################################
assert get_short_imagename("0000_cl_s01_bw_boat_40_n02951358_5952.JPEG") == "n02951358_5952.JPEG"
assert get_short_imagename("airplane1-bicycle2.png") == "airplane1-bicycle2.png"
|
434289
|
import pandas as pd
import os
from helpers import *
def main():
sim_threshold = 100
prefix = '/home/cragkhit/data/cloverflow/github_max_to_10/'
dir = '../results/results_for_thesis/'
clones = pd.read_csv(
'../results/results_for_thesis/github_license_qr_17-08-18_16-46-780_for_github_query.csv',
sep=',',
header=None)
print('line total:', len(clones))
count = 0
count_license_compatible = 0
compat_license_map = dict()
incompat_license_map = dict()
project_map = dict()
for index, row in clones.iterrows():
for i in range(1, len(row)):
if str(row[i]) != 'nan':
query = row[0].split('#')
result = row[i].split('#')
try:
writefile(dir + 'results_github_clones_' + str(sim_threshold) + '.csv',
str(row[0]).replace('#', ',') + ',' +
str(row[i]).replace('#', ',').replace(prefix, '') + '\n', 'a', False)
# count the number of clone pairs
count += 1
# create a frequency map of project names for compatible-license clones
# and incompatible-license clones
project_name = row[i].split("#")[0].replace(prefix, '').split('/')
pname = project_name[0].strip() + '/' + project_name[1].strip()
if pname not in project_map:
project_map[pname] = 1
else:
project_map[pname] += 1
if query[3] == result[3]:
count_license_compatible += 1
if query[3] not in compat_license_map:
compat_license_map[query[3]] = 1
else:
compat_license_map[query[3]] += 1
writefile(dir + 'results_github_clones_' + str(sim_threshold) + '_compatible.csv',
str(row[0]).replace('#', ',') + ',' +
str(row[i]).replace('#', ',') + '\n', 'a', False)
else:
license_concat = query[3] + '-' + result[3]
if license_concat not in incompat_license_map:
incompat_license_map[license_concat] = 1
else:
incompat_license_map[license_concat] += 1
writefile(dir + 'results_github_clones_' + str(sim_threshold) + '_incompatible.csv',
str(row[0]).replace('#', ',') + ',' +
str(row[i]).replace('#', ',') + '\n', 'a', False)
except IndexError:
out = ''
for s in row:
out += str(s) + ','
writefile(dir + 'results_github_skipped' + str(sim_threshold) + '.csv', out + '\n', 'a', False)
print('total clones with ' + str(sim_threshold) + '% similarity: ' + str(count))
print('found clones in ' + str(len(project_map.keys())) + ' GitHub projects:')
sorted_project_map = [(k, project_map[k]) for k in sorted(project_map, key=project_map.get, reverse=True)]
print('-' * 50)
print('top 10 projects:')
pcount = 0
for k, v in sorted_project_map:
if pcount < 10:
print(k, v)
pcount += 1
print('-' * 50)
print('clones with compatible license: ' + str(count_license_compatible))
print('clones with incompatible license: ' + str(count - count_license_compatible))
print('-' * 50)
print('compatible license:')
for k, v in compat_license_map.items():
print(k, v)
print('-' * 50)
print('incompatible license:')
for k, v in incompat_license_map.items():
print(k, v)
print('-' * 50)
main()
|
434322
|
import argparse
import os, sys
import numpy as np
from scipy.ndimage import rotate
from tqdm import tqdm
def rotate_gt(args, categories_dict, scannet_shape_ids, angles):
for category in categories_dict:
cat_path = categories_dict[category] + '_geo'
cat_save_path = os.path.join(args.data_dir, category + '_geo_8rot')
os.makedirs(cat_save_path, exist_ok=True)
for file in tqdm(os.listdir(cat_path)):
partnet_id = file.split('.')[0]
if file.endswith('.npy') and partnet_id in scannet_shape_ids:
shape = np.load(os.path.join(cat_path, file))
num_parts = len(shape)
for k, angle in enumerate(angles):
rotated_parts = []
for i in range(num_parts):
part = shape[i, 0, ...]
rotated_part = rotate(part, angle, axes=[0, 2], reshape=False)
rotated_parts += [rotated_part[None, ...]]
rotated_parts = np.stack(rotated_parts)
np.save(os.path.join(cat_save_path, f'{partnet_id}_{k}.npy'), rotated_parts)
full_shape = np.load(os.path.join(cat_path, partnet_id + '_full.npy'))[0]
for k, angle in enumerate(angles):
rotated_shape = rotate(full_shape, angle, axes=[0, 2], reshape=False)[None, ...]
np.save(os.path.join(cat_save_path, f'{partnet_id}_full_{k}.npy'), rotated_shape)
def rotate_crops(args, categories_dict, scannet_shape_ids, angles, scannet_train, scannet_val):
suffix_data = '_scannet_geo'
for category in categories_dict:
cat_path = categories_dict[category] + suffix_data
cat_save_path = os.path.join(args.data_dir, category + suffix_data + '_8rot')
os.makedirs(cat_save_path, exist_ok=True)
for file in tqdm(os.listdir(cat_path)):
if file.endswith('scan.npy') or file.endswith('labels.npy') \
or file.endswith('min_1.npy') or file.endswith('max_1.npy') or file.endswith('max_2.npy') \
or file.endswith('iou.npy'):
continue
partnet_id = file.split('_')[0]
if file.endswith('.npy') and partnet_id in scannet_shape_ids:
scannet_id = file.split('_')[1] + '_' + file.split('_')[2]
filename = file.split('.')[0]
shape = np.load(os.path.join(cat_path, file))
for k, angle in enumerate(angles):
rotated_shape = rotate(shape, angle, axes=[0, 2], reshape=False)
np.save(os.path.join(cat_save_path, f'{filename}_{k}.npy'), rotated_shape)
for category in categories_dict:
train_ids = []
val_ids = []
cat_path = categories_dict[category] + suffix_data + '_8rot'
for file in tqdm(os.listdir(cat_path)):
partnet_id = file.split('_')[0]
if file.endswith('.npy') and partnet_id in scannet_shape_ids:
scannet_id = file.split('_')[1] + '_' + file.split('_')[2]
if scannet_id in scannet_train:
train_ids += [file.split('.')[0]]
if scannet_id in scannet_val:
val_ids += [file.split('.')[0]]
with open(os.path.join(cat_path, 'train.txt'), 'w') as f:
for item in train_ids:
f.write("%s\n" % item)
with open(os.path.join(cat_path, 'val.txt'), 'w') as f:
for item in val_ids:
f.write("%s\n" % item)
with open(os.path.join(cat_path, 'full.txt'), 'w') as f:
for item in train_ids + val_ids:
f.write("%s\n" % item)
def rotate_priors(args):
priors_save_path = args.all_priors_dir + '_8rot'
os.makedirs(priors_save_path, exist_ok=True)
for prior_path in os.listdir(args.all_priors_dir):
prior_name = prior_path.split('.')[0]
priors = np.load(os.path.join(args.all_priors_dir, prior_path))
num_priors = len(priors)
for k, angle in enumerate(angles):
rotated_priors = []
for i in range(num_priors):
prior = priors[i]
rotated_prior = rotate(prior, angle, axes=[0, 2], reshape=False)
rotated_priors += [rotated_prior]
rotated_priors = np.stack(rotated_priors)
np.save(os.path.join(priors_save_path, f'{prior_name}_{k}.npy'), rotated_priors)
if __name__ == '__main__':
# params
parser = argparse.ArgumentParser()
# data params
parser.add_argument('--save_dir', required=True, help='path to store scan data specs')
parser.add_argument('--data_dir', required=True, help='path to directory with processed trees and scan crops')
parser.add_argument('--all_data_dir', required=True, help='path to directory with scan data specs')
parser.add_argument('--all_priors_dir', required=True, help='path to directory with gathered priors data')
parser.add_argument('--scannet_splits_dir', required=True, help='path to directory with ScanNet splits')
args = parser.parse_args()
categories_dict = {}
categories_dict['chair'] = os.path.join(args.data_dir, 'chair')
categories_dict['table'] = os.path.join(args.data_dir, 'table')
categories_dict['storagefurniture'] = os.path.join(args.data_dir, 'storagefurniture')
categories_dict['bed'] = os.path.join(args.data_dir, 'bed')
categories_dict['trashcan'] = os.path.join(args.data_dir, 'trashcan')
scannet_shape_ids = []
for split in ['train', 'val']:
with open(os.path.join(args.all_data_dir, split + '.txt'), 'r') as fin:
lines = fin.readlines()
lines = [x.split('_')[0] for x in lines]
scannet_shape_ids += lines
scannet_shape_ids = list(set(scannet_shape_ids))
angles = [45 * i for i in range(8)]
scannet_train = []
with open(os.path.join(args.scannet_splits_dir, 'scannetv2_train.txt'), 'r') as fin:
lines = fin.readlines()
scannet_train = [x[:-1] for x in lines]
scannet_val = []
with open(os.path.join(args.scannet_splits_dir, 'scannetv2_val.txt'), 'r') as fin:
lines = fin.readlines()
scannet_val = [x[:-1] for x in lines]
scannet_test = []
with open(os.path.join(args.scannet_splits_dir, 'scannetv2_test.txt'), 'r') as fin:
lines = fin.readlines()
scannet_test = [x[:-1] for x in lines]
# rotate voxelized GT trees
rotate_gt(args, categories_dict, scannet_shape_ids, angles)
# rotate ScanNet crops
rotate_crops(args, categories_dict, scannet_shape_ids, angles, scannet_train, scannet_val)
# rotate priors from args.all_priors_dir directory
rotate_priors(args)
|
434403
|
import io
import re
from itertools import chain
import fbuild
import fbuild.builders
import fbuild.builders.c
import fbuild.builders.platform
import fbuild.db
import fbuild.record
from fbuild.path import Path
from fbuild.temp import tempfile
# ------------------------------------------------------------------------------
class Ar(fbuild.db.PersistentObject):
def __init__(self, ctx, exe='ar', *,
platform=None,
prefix=None,
suffix=None,
flags=('-rcs',),
libpaths=(),
libs=(),
external_libs=(),
ranlib='ranlib',
ranlib_flags=()):
super().__init__(ctx)
self.exe = fbuild.builders.find_program(ctx, [exe])
try:
self.ranlib = fbuild.builders.find_program(ctx, [ranlib])
except fbuild.ConfigFailed:
self.ranlib = None
self.prefix = prefix or 'lib'
self.suffix = suffix or '.a'
self.libpaths = tuple(libpaths)
self.libs = tuple(libs)
self.external_libs = tuple(external_libs)
self.flags = tuple(flags)
self.ranlib_flags = tuple(ranlib_flags)
@fbuild.db.cachemethod
def __call__(self, dst, srcs:fbuild.db.SRCS, *,
libs:fbuild.db.SRCS=(),
ldlibs=(),
external_libs=(),
flags=(),
ranlib_flags=(),
prefix=None,
suffix=None,
buildroot=None,
**kwargs) -> fbuild.db.DST:
buildroot = buildroot or self.ctx.buildroot
#libs = set(libs)
#libs.update(self.libs)
#libs = sorted(libs)
#assert srcs or libs, 'no sources passed into ar'
assert srcs, 'no sources passed into ar'
prefix = prefix or self.prefix
suffix = suffix or self.suffix
dst = Path(dst).addroot(buildroot)
dst = dst.parent / prefix + dst.name + suffix
dst.parent.makedirs()
srcs = list(Path.globall(srcs))
cmd = [self.exe]
cmd.extend(self.flags)
cmd.extend(flags)
cmd.append(dst)
cmd.extend(srcs)
#cmd.extend(libs)
#cmd.extend(self.external_libs)
#cmd.extend(external_libs)
self.ctx.execute(cmd,
msg1=str(self),
msg2='%s -> %s' % (' '.join(srcs), dst),
color='link',
**kwargs)
if self.ranlib is not None:
cmd = [self.ranlib]
cmd.extend(self.ranlib_flags)
cmd.extend(ranlib_flags)
cmd.append(dst)
self.ctx.execute(cmd,
msg1=self.ranlib.name,
msg2=dst,
color='link',
**kwargs)
return dst
def __str__(self):
return str(self.exe.name)
# ------------------------------------------------------------------------------
class Gcc(fbuild.db.PersistentObject):
def __init__(self, ctx, exe, *,
src_suffix,
pre_flags=(),
flags=(),
includes=(),
macros=(),
warnings=(),
libpaths=(),
libs=(),
ldlibs=(),
external_libs=(),
debug=None,
profile=None,
optimize=None,
debug_flags=('-g',),
profile_flags=('-pg',),
optimize_flags=('-O2',),
arch=None,
machine_flags=(),
requires_version=None,
requires_at_least_version=None,
requires_at_most_version=None):
super().__init__(ctx)
self.exe = exe
self.src_suffix = src_suffix
self.pre_flags = tuple(pre_flags)
self.flags = tuple(flags)
self.includes = tuple(includes)
self.macros = tuple(macros)
self.warnings = tuple(warnings)
self.libpaths = tuple(libpaths)
self.libs = tuple(libs)
self.ldlibs = tuple(ldlibs)
self.external_libs = tuple(external_libs)
self.debug = debug
self.profile = profile
self.optimize = optimize
self.debug_flags = tuple(debug_flags)
self.profile_flags = tuple(profile_flags)
self.optimize_flags = tuple(optimize_flags)
self.arch = arch
self.machine_flags = tuple(machine_flags)
if not self.check_flags(flags):
raise fbuild.ConfigFailed('%s failed to compile an exe' % self)
if debug and debug_flags and not self.check_flags(debug_flags):
raise fbuild.ConfigFailed('%s failed to compile an exe' % self)
if profile and profile_flags and not self.check_flags(profile_flags):
raise fbuild.ConfigFailed('%s failed to compile an exe' % self)
if optimize and optimize_flags and not self.check_flags(optimize_flags):
raise fbuild.ConfigFailed('%s failed to compile an exe' % self)
# Make sure we've got a valid version.
fbuild.builders.check_version(ctx, self, self.version,
requires_version=requires_version,
requires_at_least_version=requires_at_least_version,
requires_at_most_version=requires_at_most_version)
def __call__(self, srcs, dst=None, *,
pre_flags=(),
flags=(),
includes=(),
macros=(),
warnings=(),
libpaths=(),
libs=(),
ldlibs=(),
external_libs=(),
debug=None,
profile=None,
optimize=None,
arch=None,
machine_flags=(),
include_source_dirs=True,
**kwargs):
srcs = [Path(src) for src in srcs]
# Make sure we don't repeat includes
new_includes = []
for include in chain(self.includes, includes,
(s.parent for s in srcs) if include_source_dirs\
else []):
if include not in new_includes:
new_includes.append(include)
includes = new_includes
# Make sure we don't repeat flags
new_flags = []
for flag in chain(self.flags, flags):
if flag not in new_flags:
new_flags.append(flag)
flags = new_flags
macros = set(macros)
macros.update(self.macros)
warnings = set(warnings)
warnings.update(self.warnings)
machine_flags = set(machine_flags)
machine_flags.update(self.machine_flags)
# Make sure we don't repeat library paths
new_libpaths = []
for libpath in chain(self.libpaths, libpaths):
if libpath not in new_libpaths:
new_libpaths.append(libpath)
libpaths = new_libpaths
# Make sure we don't repeat external library paths
new_external_libs = []
for lib in chain(self.external_libs, external_libs):
if lib not in new_external_libs:
new_external_libs.append(lib)
external_libs = new_external_libs
# Since the libs could be derived from fbuild.builders.c.Library, we need
# to extract the extra libs and flags that they need. Linux needs the
# libraries listed in a particular order. Libraries must appear left
# of their dependencies in order to optimize linking.
new_libs = []
def f(lib):
if lib in new_libs:
return
if isinstance(lib, fbuild.builders.c.Library):
for libpath in lib.libpaths:
if libpath not in libpaths:
libpaths.append(libpath)
for l in lib.external_libs:
if l not in external_libs:
external_libs.append(l)
# In order to make linux happy, we'll recursively walk the
# dependencies first, then add the library.
for l in lib.libs:
f(l)
parent, lib = Path(lib).split()
if parent not in libpaths:
libpaths.append(parent)
lib = lib.name[len('lib'):]
lib = lib.rsplit('.', 1)[0]
if lib not in new_libs:
new_libs.append(lib)
for lib in chain(self.libs, libs):
f(lib)
# Finally, we need to reverse the list so it's in the proper order.
new_libs.reverse()
libs = new_libs
# ----------------------------------------------------------------------
cmd = [self.exe]
cmd.extend(self.pre_flags)
cmd.extend(pre_flags)
if (debug is None and self.debug) or debug:
cmd.extend(self.debug_flags)
if (profile is None and self.profile) or profile:
cmd.extend(self.profile_flags)
if (optimize is None and self.optimize) or optimize:
cmd.extend(self.optimize_flags)
arch = (arch is None and self.arch) or arch
if arch:
cmd.extend(('-arch', arch))
# make sure that the path is converted into the native path format
cmd.extend('-I' + Path(i) for i in sorted(includes) if i)
cmd.extend('-D' + d for d in sorted(macros))
cmd.extend('-W' + w for w in sorted(warnings))
cmd.extend('-L' + Path(p) for p in sorted(libpaths) if p)
cmd.extend('-m' + m for m in sorted(machine_flags) if m)
if dst is not None:
cmd.extend(('-o', dst))
msg2 = '%s -> %s' % (' '.join(chain(srcs, libs)), dst)
else:
msg2 = ' '.join(srcs)
cmd.extend(flags)
cmd.extend(srcs)
# Avoid passing libraries if just compiling.
if '-c' not in cmd:
# Libraries must come last on linux in order to find symbols.
cmd.extend('-l' + l for l in libs)
cmd.extend('-l' + l for l in external_libs)
# Add ldlibs.
cmd.extend(self.ldlibs+tuple(ldlibs))
return self.ctx.execute(cmd, msg2=msg2, **kwargs)
def version(self):
"""Return the version of the gcc executable."""
stdout, stderr = self.ctx.execute((self.exe, '--version'), quieter=1)
return stdout.decode().split('\n')[0].split(' ')[2]
def check_flags(self, flags):
if flags:
self.ctx.logger.check('checking %s with %s' %
(self, ' '.join(flags)))
else:
self.ctx.logger.check('checking %s' % self)
code = 'int main(int argc, char** argv){return 0;}'
with tempfile(code, suffix=self.src_suffix) as src:
try:
self([src], flags=flags, quieter=1, cwd=src.parent)
except fbuild.ExecutionError:
self.ctx.logger.failed()
return False
self.ctx.logger.passed()
return True
def __str__(self):
return str(self.exe.name)
def make_cc(ctx, exe=None, default_exes=['gcc', 'cc'], **kwargs):
return Gcc(ctx,
fbuild.builders.find_program(ctx, [exe] if exe else default_exes),
**kwargs)
# ------------------------------------------------------------------------------
class Compiler(fbuild.db.PersistentObject):
def __init__(self, ctx, cc, flags, *, suffix):
super().__init__(ctx)
self.cc = cc
self.flags = tuple(flags)
self.suffix = suffix
if flags and not cc.check_flags(flags):
raise fbuild.ConfigFailed('%s does not support %s flags' %
(cc, flags))
def __call__(self, src, dst=None, *,
suffix=None,
buildroot=None,
**kwargs):
buildroot = buildroot or self.ctx.buildroot
src = Path(src)
suffix = suffix or self.suffix
dst = Path(dst or src).addroot(buildroot).replaceext(suffix)
dst.parent.makedirs()
stdout, stderr = self.cc([src], dst,
pre_flags=list(chain(('-c',), self.flags)),
msg1=str(self),
color='compile',
**kwargs)
return dst, stdout, stderr
def __str__(self):
return str(self.cc)
# ------------------------------------------------------------------------------
class Linker(fbuild.db.PersistentObject):
def __init__(self, ctx, cc, flags=(), *, prefix, suffix):
super().__init__(ctx)
self.cc = cc
self.flags = tuple(flags)
self.prefix = prefix
self.suffix = suffix
if flags and not cc.check_flags(flags):
raise fbuild.ConfigFailed('%s does not support %s' %
(cc, ' '.join(flags)))
def __call__(self, dst, srcs, *,
prefix=None,
suffix=None,
buildroot=None,
**kwargs):
prefix = prefix or self.prefix
suffix = suffix or self.suffix
buildroot = buildroot or self.ctx.buildroot
dst = Path(dst).addroot(buildroot)
dst = dst.parent / prefix + dst.name + suffix
dst.parent.makedirs()
self.cc(srcs, dst,
pre_flags=self.flags,
msg1=str(self),
color='link',
**kwargs)
return dst
def __str__(self):
return str(self.cc)
# ------------------------------------------------------------------------------
class Builder(fbuild.builders.c.Builder):
def __init__(self, *args,
compiler,
lib_linker,
exe_linker,
**kwargs):
self.compiler = compiler
self.lib_linker = lib_linker
self.exe_linker = exe_linker
# This needs to come last as the parent class tests the builder.
super().__init__(*args, **kwargs)
def __str__(self):
return str(self.compiler)
# --------------------------------------------------------------------------
@fbuild.db.cachemethod
def compile(self, src:fbuild.db.SRC, dst=None, *,
flags=[],
**kwargs) -> fbuild.db.DST:
"""Compile a c file and cache the results."""
# Generate the dependencies while we compile the file.
with tempfile() as dep:
obj = self.uncached_compile(src, dst,
flags=list(chain(('-MMD', '-MF', dep), flags)),
**kwargs)
with open(dep, 'rb') as f:
# Parse the output and return the module dependencies.
stdout = f.read().replace(b'\\\n', b'')
# Parse the output and return the module dependencies.
m = re.match(b'\s*\S+:(?: (.*))?$', stdout)
if not m:
raise fbuild.ExecutionError('unable to understand %r' % stdout)
s = m.group(1)
if s is not None:
deps = s.decode().split()
self.ctx.db.add_external_dependencies_to_call(srcs=deps)
return obj
def uncached_compile(self, *args, **kwargs):
"""Compile a c file without caching the results. This is needed when
compiling temporary files."""
obj, stdout, stderr = self.compiler(*args, **kwargs)
return obj
def uncached_link_lib(self, *args, **kwargs):
"""Link compiled c files into a library without caching the results.
This is needed when linking temporary files."""
lib = self.lib_linker(*args, **kwargs)
return fbuild.builders.c.Library(lib,
libpaths=kwargs.get('libpaths', []),
libs=kwargs.get('libs', []),
external_libs=kwargs.get('external_libs', []))
def uncached_link_exe(self, *args, **kwargs):
"""Link compiled c files into am executable without caching the
results. This is needed when linking temporary files."""
exe = self.exe_linker(*args, **kwargs)
return fbuild.builders.c.Executable(exe,
libs=kwargs.get('libs', []))
# --------------------------------------------------------------------------
def __repr__(self):
return '%s(compiler=%r, lib_linker=%r, exe_linker=%r)' % (
self.__class__.__name__,
self.compiler,
self.lib_linker,
self.exe_linker)
# ------------------------------------------------------------------------------
def static(ctx, exe=None, *args,
make_cc=make_cc,
make_compiler=Compiler,
make_lib_linker=Ar,
make_exe_linker=Linker,
platform=None,
flags=(),
compile_flags=(),
ar=None,
libpaths=(),
libs=(),
link_flags=(),
exe_link_flags=(),
src_suffix='.c',
obj_suffix=None,
lib_prefix=None,
lib_suffix=None,
exe_suffix=None,
cross_compiler=False,
**kwargs):
cc = make_cc(ctx, exe, src_suffix=src_suffix, libpaths=libpaths, libs=libs,
**kwargs)
# Allow the user to overload the file extensions.
if obj_suffix is None:
obj_suffix = fbuild.builders.platform.static_obj_suffix(ctx, platform)
lib_prefix = lib_prefix or 'lib'
lib_suffix = lib_suffix or '.a'
if exe_suffix is None:
exe_suffix = fbuild.builders.platform.exe_suffix(ctx, platform)
return Builder(ctx,
compiler=make_compiler(ctx, cc,
flags=list(chain(flags, compile_flags)),
suffix=obj_suffix),
lib_linker=make_lib_linker(ctx,
libs=libs,
libpaths=libpaths,
prefix=lib_prefix,
suffix=lib_suffix),
exe_linker=make_exe_linker(ctx, cc,
flags=list(chain(flags, link_flags, exe_link_flags)),
prefix='',
suffix=exe_suffix),
src_suffix=src_suffix,
flags=flags,
cross_compiler=cross_compiler)
# ------------------------------------------------------------------------------
def shared(ctx, exe=None, *args,
make_cc=make_cc,
make_compiler=Compiler,
make_lib_linker=Linker,
make_exe_linker=Linker,
platform=None,
flags=(),
compile_flags=('-fPIC',),
libpaths=(),
libs=(),
link_flags=(),
lib_link_flags=('-fPIC', '-shared'),
exe_link_flags=(),
src_suffix='.c',
obj_suffix=None,
lib_prefix=None,
lib_suffix=None,
exe_suffix=None,
cross_compiler=False,
**kwargs):
cc = make_cc(ctx, exe, src_suffix=src_suffix, libpaths=libpaths, libs=libs,
**kwargs)
# Allow the user to overload the file extensions.
if obj_suffix is None:
obj_suffix = fbuild.builders.platform.shared_obj_suffix(ctx, platform)
if lib_prefix is None:
lib_prefix = fbuild.builders.platform.shared_lib_prefix(ctx, platform)
if lib_suffix is None:
lib_suffix = fbuild.builders.platform.shared_lib_suffix(ctx, platform)
if exe_suffix is None:
exe_suffix = fbuild.builders.platform.exe_suffix(ctx, platform)
return Builder(ctx,
compiler=make_compiler(ctx, cc,
flags=list(chain(flags, compile_flags)),
suffix=obj_suffix),
lib_linker=make_lib_linker(ctx, cc,
flags=list(chain(flags, link_flags, lib_link_flags)),
prefix=lib_prefix,
suffix=lib_suffix),
exe_linker=make_exe_linker(ctx, cc,
flags=list(chain(flags, link_flags, exe_link_flags)),
prefix='',
suffix=exe_suffix),
src_suffix=src_suffix,
flags=flags,
cross_compiler=cross_compiler)
|
434411
|
def do_stuff():
print('stuff')
def do_other_stuff():
print('other stuff')
def branching(some_condition):
if some_condition is True:
do_stuff()
do_other_stuff()
|
434441
|
from .Euler_Scheme import EulerScheme
import taichi as ti
import numpy as np
# ref IVOCK 2014, Dr <NAME> .etal
# need help on stretch and stream function(velocity from vorticity)
@ti.data_oriented
class IVOCK_EulerScheme(EulerScheme):
def __init__(self, cfg):
super().__init__(cfg)
def advect(self, dt):
pass
def stretch(self, dt):
pass
def schemeStep(self, ext_input: np.array):
# get omega_n
self.grid.calCurl(self.grid.v_pair.cur, self.grid.curl_pair.cur)
# TODO vorticity enhancement on vorticity
if self.dim == 3:
self.stretch(self.cfg.dt)
# advect vorticity
self.advection_solver.advect(self.grid.v_pair.cur,
self.grid.curl_pair.cur,
self.grid.curl_pair.nxt,
self.cfg.dt)
# advect velocity
# after advection, u^tilde = v_pair.nxt
for v_pair in self.grid.advect_v_pairs:
self.advection_solver.advect(self.grid.v_pair.cur, v_pair.cur, v_pair.nxt,
self.cfg.dt)
self.grid.calCurl(self.grid.v_pair.nxt, self.grid.curl_pair.nxt)
pass
|
434460
|
import numpy as np
import matplotlib.pyplot as plt
logistic = False
samples = [[0.47,1],[0.24,1],[0.75,1],[0.00,1],[-0.80,1],[-0.59,1],[1.09,1],[1.34,1],
[1.01,1],[-1.02,1],[0.50,1],[0.64,1],[-1.15,1],[-1.68,1],[-2.21,1],[-0.52,1],
[3.93,1],[4.21,1],[5.18,1],[4.20,1],[4.57,1],[2.63,1],[4.52,1],[3.31,1],
[6.75,1],[3.47,1],[4.32,1],[3.08,1],[4.10,1],[4.00,1],[2.99,1],[3.83,1]]
n = len(samples)
m = len(samples[0])
labels = [0]*(n//2) + [1]*(n//2)
k = 100
X = []
Y = []
Z = []
a,b = -8.132648217846736, 4.07391337959378
for x in np.linspace(a-3,a+17,k):
for y in np.linspace(b-13,b+7,k):
E = 0
for i in range(n):
if logistic:
E -= labels[i]*(+samples[i][0]*y + x) - np.log(1+np.exp(+x + samples[i][0]*y)) - 0.001*(x**2 + y**2)
else:
# E += (labels[i] - 1/(1+np.exp(-x - samples[i][0]*y)))**2 + 0.001*(x**2 + y**2)
E += (labels[i] - 1/(1+np.exp(-x - samples[i][0]*y)))**2
print("v %3.3f %3.3f %3.3f" %(x,y,E))
X.append(x)
Y.append(y)
Z.append(E)
for i in range(k-1):
for j in range(k-1):
print("f %d %d %d %d" %(i+j*k+1, i+1+j*k+1, i+1+(j+1)*k+1, i+(j+1)*k+1) )
'''
X, Y = np.meshgrid(X, Y)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X,Y,Z)
plot.show()
'''
|
434500
|
def main():
info('Waiting for minibone access')
wait('JanMiniboneFlag', 0)
info('Minibone released')
wait('MinibonePumpTimeFlag', 0)
acquire('FelixMiniboneFlag', clear=True)
info('minibone acquired')
|
434539
|
from .mod import mod
from ..db import get_watches_bi_user_id as _get_watches_bi_user_id
from ..ut.bunch import Bunch
def get_watches_bi_user_id(conn, user_id, offset=None, limit=None):
return [transform(w) for w in _get_watches_bi_user_id(
conn,
user_id,
offset=offset,
limit=limit
)]
def transform(watch):
watch = Bunch(**watch)
watch.what = mod(watch.query_kind).format_query_text('web', watch.query_text)
return watch
|
434657
|
from math import *
from NodeGeneratorBase import *
from Spheral import Vector3d, Tensor3d, SymTensor3d, Plane3d, rotationMatrix
from SpheralTestUtilities import *
import mpi
procID = mpi.rank
nProcs = mpi.procs
#-------------------------------------------------------------------------------
# The sign function
#-------------------------------------------------------------------------------
def sgn(x):
if x >= 0.0:
return 1.0
else:
return -1.0
#-------------------------------------------------------------------------------
# Produce spherical shell sections, bounded by four planes.
#-------------------------------------------------------------------------------
class GenerateSphericalShellSection(NodeGeneratorBase):
#-------------------------------------------------------------------------------
# Constructor
#-------------------------------------------------------------------------------
def __init__(self,
nr,
nl,
rho0,
r0,
rthick,
openingAngle,
nNodePerh = 2.01,
SPH = False):
self.nr = nr
self.nl = nl
self.rho0 = rho0
self.r0 = r0
self.rthick = rthick
self.openingAngle = openingAngle
self.nNodePerh = nNodePerh
# Prepare our internal arrays.
self.x = []
self.y = []
self.z = []
self.m = []
self.H = []
# Useful stuff to precompute.
phi0 = -0.5*openingAngle
dphi = openingAngle/nl
dr = rthick/nr
halfpi = 0.5*pi
# We pretend that the subvolumes per point are pretty much the same,
# just rotated into a different orientation. Not a good approximation
# as we increase the opening angle, but OK for small angles.
dx = r0*dphi
V0 = dx*dx*dr
m0 = V0*rho0
H0 = SymTensor3d(1.0/dx, 0.0, 0.0,
0.0, 1.0/dx, 0.0,
0.0, 0.0, 1.0/dr)/nNodePerh
# Iterate over the angles.
for ip1 in xrange(nl):
phi1 = phi0 + (ip1 + 0.5)*dphi
plane1 = self.computePlane(0.0, phi1)
for ip2 in xrange(nl):
phi2 = phi0 + (ip2 + 0.5)*dphi
plane2 = self.computePlane(halfpi, phi2)
# Get the line that represents the intersection of the two planes.
P, N = self.computePlaneIntersection(plane1, plane2)
# Intersect this line with each radii we want, which gives us
# the desired positions.
for ir in xrange(nr):
r = r0 + (ir + 0.5)*dr
pos = self.intersectLineAndSphere(P, N, r)
assert fuzzyEqual(pos.magnitude(), r, 1.0e-10)
self.x.append(pos.x)
self.y.append(pos.y)
self.z.append(pos.z)
self.m.append(m0)
# Rotate H into the appropriate frame.
R = rotationMatrix(N)
Hi = SymTensor3d(H0)
Hi.rotationalTransform(R)
self.H.append(Hi)
# Check that the points are within bounds.
planes = self.boundaryPlanes()
for x, y, z in zip(self.x, self.y, self.z):
for plane in planes:
assert Vector3d(x, y, z) > plane
# Have the base class break up the serial node distribution
# for parallel cases.
NodeGeneratorBase.__init__(self, True,
self.x, self.y, self.z, self.m, self.H)
# If SPH has been specified, make sure the H tensors are round.
if SPH:
self.makeHround()
return
#-------------------------------------------------------------------------------
# Return the plane corresponding to the given displacment in theta and phi.
#-------------------------------------------------------------------------------
def computePlane(self, theta, phi):
ct = cos(theta)
st = sin(theta)
cp = cos(phi)
sp = sin(phi)
P = Vector3d(ct*sp, st*sp, cp)
N = Vector3d(-ct*cp, -st*cp, sp)
assert fuzzyEqual(N.magnitude2(), 1.0)
return Plane3d(P, N)
#-------------------------------------------------------------------------------
# Return the line resulting from the intersection of two planes.
#-------------------------------------------------------------------------------
def computePlaneIntersection(self, plane1, plane2):
p1 = plane1.point()
n1 = plane1.normal()
p2 = plane2.point()
n2 = plane2.normal()
assert distinctlyLessThan(abs(n1.dot(n2)), 1.0)
d1 = -n1.dot(p1)
d2 = -n2.dot(p2)
n3 = (n1.cross(n2)).unitVector()
p3 = (d2*n1 - d1*n2).cross(n3) / (n1.cross(n2).dot(n3))
return p3, n3
#-------------------------------------------------------------------------------
# Return the positive intersection of a line with a sphere centered at the
# origin.
#-------------------------------------------------------------------------------
def intersectLineAndSphere(self, point, normal, r):
b = 2.0*normal.dot(point)
c = point.magnitude2() - r*r
q = -0.5*(b + sgn(b)*sqrt(b*b - 4.0*c))
assert distinctlyGreaterThan(abs(q), 0.0)
a1 = q
a2 = c/q
p1 = (point + a1*normal).unitVector() * r
p2 = (point + a2*normal).unitVector() * r
if p1 > p2:
return p1
else:
return p2
#-------------------------------------------------------------------------------
# Return the boundary planes.
#-------------------------------------------------------------------------------
def boundaryPlanes(self):
p = 0.5*self.openingAngle
halfpi = 0.5*pi
return (self.computePlane(0.0, p),
self.computePlane(halfpi, p),
self.computePlane(pi, p),
self.computePlane(-halfpi, p))
#-------------------------------------------------------------------------------
# Get the position for the given node index.
#-------------------------------------------------------------------------------
def localPosition(self, i):
assert i >= 0 and i < len(self.x)
assert len(self.x) == len(self.y)
return Vector3d(self.x[i], self.y[i], self.z[i])
#-------------------------------------------------------------------------------
# Get the mass for the given node index.
#-------------------------------------------------------------------------------
def localMass(self, i):
assert i >= 0 and i < len(self.m)
return self.m[i]
#-------------------------------------------------------------------------------
# Get the mass density for the given node index.
#-------------------------------------------------------------------------------
def localMassDensity(self, i):
assert i >= 0 and i < len(self.x)
return self.rho0
#-------------------------------------------------------------------------------
# Get the H tensor for the given node index.
#-------------------------------------------------------------------------------
def localHtensor(self, i):
assert i >= 0 and i < len(self.H)
return self.H[i]
|
434665
|
import enum as _enum
import string as _string
from typing import Any
from . import auth as _auth
from . import helper as _helper
from . import url as _url
import requests
class Method(_enum.Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
def request(
request_method: Method,
uri: str,
*,
auth=None,
base_url=None,
**kwargs
) -> requests.PreparedRequest:
request_method = _helper.enumize(request_method, Method).value
req = requests.Request(
method = request_method,
url = _url.get(uri=uri, base_url=base_url),
auth = (auth or _auth.DefaultOAuth.get()),
**kwargs
)
return req.prepare()
def method(
request_method: Method,
uri: str,
**kwargs
) -> Any:
preq = request(request_method, uri, **kwargs)
with requests.Session() as s:
resp = s.send(preq)
return resp.json()
|
434695
|
import argparse
def parse(args):
parser = argparse.ArgumentParser()
parser.add_argument(
"--debug", action="store_true", default=False, help="Enable debug logging"
)
parser.add_argument("--xml")
parser.add_argument(
"--module", action="store_true", default=False, help="Trace a module"
)
parser.add_argument("progname", help="file to run as main program")
parser.add_argument(
"arguments", nargs=argparse.REMAINDER, help="arguments to the program"
)
return parser.parse_args(args)
|
434746
|
from .svg_parser_utils import *
import OpenGL.GL as gl
import math
EPSILON = 0.001
class vec2(object):
def __init__(self, *args):
if isinstance(args[0], vec2):
self.x = args[0].x
self.y = args[0].y
elif isinstance(args[0], list):
self.x, self.y = args[0]
else:
self.x, self.y = args[0], args[1]
def tolist(self):
return [self.x, self.y]
def __repr__(self):
return '(' + str(self.x) + ',' + str(self.y) + ')'
def __neg__(self):
return vec2(-self.x, -self.y)
def __abs__(self):
return self.length()
def __add__(self, other):
return vec2(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return vec2(self.x - other.x, self.y - other.y)
def __mul__(self, scale):
return vec2(self.x * scale, self.y * scale)
def __div__(self, scale):
return vec2(self.x / scale, self.y / scale)
def __truediv__(self, scale):
return vec2(self.x / scale, self.y / scale)
def __eq__(self, other):
if not other: return False
return abs(self.x - other.x) < EPSILON and abs(self.y - other.y) < EPSILON
def __ne__(self, other):
return not(self.__eq__(other))
def normalized(self):
l = self.length()
if l == 0:
return vec2(1, 0)
else:
return vec2(self.x, self.y) / self.length()
def angle(self):
return math.atan2(self.y, self.x)
def length(self):
return math.sqrt(self.x ** 2 + self.y ** 2)
def intersection(p1, p2, p3, p4):
"""
Returns whether two lines intersected, and the point at which
they intersected (or, "None" if the lines are parallel)
"""
A1 = p2.y - p1.y
B1 = p1.x - p2.x
C1 = A1 * p1.x + B1 * p1.y
A2 = p4.y - p3.y
B2 = p3.x - p4.x
C2 = A2 * p3.x + B2 * p3.y
det = A1 * B2 - A2 * B1
if abs(det) < EPSILON: # Lines are parallel
if (p1 == p3) or (p1 == p4):
return True, p1
elif (p2 == p3) or (p2 == p4):
return True, p2
return False, None
else:
result = vec2(
(B2 * C1 - B1 * C2) / det,
(A1 * C2 - A2 * C1) / det)
epsilon = .01
on_line_segment = True
on_line_segment &= result.x >= (min(p1.x, p2.x) - epsilon)
on_line_segment &= result.y >= (min(p1.y, p2.y) - epsilon)
on_line_segment &= result.x <= (max(p1.x, p2.x) + epsilon)
on_line_segment &= result.y <= (max(p1.y, p2.y) + epsilon)
return on_line_segment, result
def line_length(a, b):
return math.sqrt((b.x - a.x) ** 2 + (b.y - a.y) ** 2)
def radian(deg):
return deg * (math.pi / 180.0)
ninety_degrees = radian(90)
class Matrix(object):
def __init__(self, string=None):
self.values = [1, 0, 0, 1, 0, 0]
if isinstance(string, str):
string = string.strip()
if string.startswith('matrix('):
self.values = [float(x) for x in parse_list(string[7:-1])]
elif string.startswith('translate('):
args = [float(x) for x in parse_list(string[10:-1])]
#if len(args) == 2:
self.values = [1, 0, 0, 1, args[0], args[1]]
#else:
# self.values = [1, 0, 0, 1, args[0], 0]
elif string.startswith('scale('):
inside = string[6:-1]
scale_vars = [float(x) for x in parse_float_list(inside)]
if len(scale_vars) == 1:
self.values = [scale_vars[0], 0, 0, scale_vars[0], 0, 0]
else:
self.values = [scale_vars[0], 0, 0, scale_vars[1], 0, 0]
elif string.startswith('rotate('):
angle = float(string[7:-1])
theta = radian(angle)
#where does angle go in here?
self.values = [math.cos(theta), math.sin(theta), -math.sin(theta), math.cos(theta), 0, 0]
elif string is not None:
self.values = list(string)
def __enter__(self):
gl.glPushMatrix()
gl.glMultMatrixf(self.to_mat4())
return self
def __exit__(self, type, value, traceback):
gl.glPopMatrix()
def __call__(self, other):
return (self.values[0] * other[0] + self.values[2] * other[1] + self.values[4],
self.values[1] * other[0] + self.values[3] * other[1] + self.values[5])
def __str__(self):
return str(self.values)
def to_mat4(self):
v = self.values
return [v[0], v[1], 0.0, 0.0,
v[2], v[3], 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
v[4], v[5], 0.0, 1.0]
@classmethod
def identity(cls):
return Matrix([1, 0, 0, 1, 0, 0])
@classmethod
def translation(cls, x, y):
return Matrix([1, 0, 0, 1, x, y])
@classmethod
def scale(cls, sx, sy):
return Matrix([sx, 0, 0, sy, 0, 0])
@classmethod
def rotation(cls, theta):
return Matrix([math.cos(theta), math.sin(theta), -math.sin(theta), math.cos(theta), 0, 0])
@classmethod
def transform(cls, x, y, theta=0, sx=1, sy=1):
return Matrix([math.cos(theta) * sx, math.sin(theta),
-math.sin(theta), math.cos(theta) * sy,
x, y])
def inverse(self):
d = float(self.values[0] * self.values[3] - self.values[1] * self.values[2])
return Matrix([self.values[3] / d, -self.values[1] / d, -self.values[2] / d, self.values[0] / d,
(self.values[2] * self.values[5] - self.values[3] * self.values[4]) / d,
(self.values[1] * self.values[4] - self.values[0] * self.values[5]) / d])
def __mul__(self, other):
a, b, c, d, e, f = self.values
u, v, w, x, y, z = other.values
return Matrix([
a * u + c * v,
b * u + d * v,
a * w + c * x,
b * w + d * x,
a * y + c * z + e,
b * y + d * z + f])
def svg_matrix_to_gl_matrix(matrix):
v = matrix.values
return [v[0], v[1], 0.0, v[2], v[3], 0.0, v[4], v[5], 1.0]
class BoundingBox:
def __init__(self, point_cloud=None):
self.min_x = None
self.max_x = None
self.min_y = None
self.max_y = None
if point_cloud:
self.expand(point_cloud)
def expand(self, points):
for p in points:
x, y = p[0], p[1]
if self.min_x is None or x < self.min_x:
self.min_x = x
if self.min_y is None or y < self.min_y:
self.min_y = y
if self.max_x is None or x > self.max_x:
self.max_x = x
if self.max_y is None or y > self.max_y:
self.max_y = y
def extents(self):
return self.min_x, self.min_y, self.max_x, self.max_y
|
434764
|
import collections
class Solution(object):
def removeDuplicateLetters(self, s):
"""
:type s: str
:rtype: str
"""
count_map = collections.Counter(s)
stack = []
for c in s:
if len(stack) == 0:
stack.append(c)
count_map[c] -= 1
elif c in stack:
while ord(c) < ord(stack[-1]) and count_map[stack[-1]] > 0:
stack.pop()
stack.append(c)
count_map[c] -= 1
return ''.join(stack)
if __name__ == '__main__':
solution = Solution()
print(solution.removeDuplicateLetters('cbacdcbc'))
|
434788
|
import elbus_async
import asyncio
async def main():
name = 'test.client.python.async_sender'
bus = elbus_async.client.Client('/tmp/elbus.sock', name)
await bus.connect()
# send a regular message
result = await bus.send('test.client.python.async',
elbus_async.client.Frame('hello'))
print(hex(await result.wait_completed()))
# send a broadcast message
result = await bus.send(
'test.*',
elbus_async.client.Frame('hello everyone',
tp=elbus_async.client.OP_BROADCAST))
print(hex(await result.wait_completed()))
# publish to a topic with zero QoS (no confirmation required)
await bus.send(
'test/topic',
elbus_async.client.Frame('something',
tp=elbus_async.client.OP_PUBLISH,
qos=0))
asyncio.run(main())
|
434798
|
from __future__ import division, print_function
import argparse
import pickle as pkl
import h5py
import glob
import os
from tqdm import tqdm
def main(args):
print(args)
compression_flags = dict(compression='gzip', compression_opts=9)
filenames = glob.glob(os.path.join(args.features_folder, '*.pkl'))
print(f'Number of pkl files: {len(filenames)}')
output = h5py.File(args.output_h5, 'w')
for f in tqdm(filenames):
video_name = os.path.basename(f).split('.pkl')[0]
with open(f, 'rb') as fobj:
data = pkl.load(fobj)
output.create_dataset(video_name, data=data, chunks=True, **compression_flags)
output.close()
print(f'The h5 feature file is saved to {args.output_h5}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Merge the feature pkl files of different videos into one '
'h5 feature file mapping video name to feature tensor.')
parser.add_argument('--features-folder', required=True, type=str,
help='Path to the folder containing the pkl feature files')
parser.add_argument('--output-h5', required=True, type=str,
help='Where to save the combined metadata CSV file')
args = parser.parse_args()
main(args)
|
434883
|
from About import *
from Args import *
from DemoSet import *
from DemoSplash import *
from geoparse import *
from MainDisplay import *
from MainMenu import *
|
434938
|
from getpass import getpass
import os
import click
from flask.cli import FlaskGroup
import lxml.etree
from passlib.apache import HtpasswdFile
from tqdm import tqdm
from .flask import app
from . import models, tasks
from . import views, twilio # noqa: F401
@click.group(cls=FlaskGroup, create_app=lambda *args, **kwargs: app)
def main():
"""Command line management console for the GROWTH ToO Marshal"""
@app.cli.command(context_settings=dict(allow_extra_args=True,
ignore_unknown_options=True))
@click.pass_context
def celery(ctx):
"""Manage Celery cluster."""
tasks.celery.start(['celery'] + ctx.args)
@app.cli.command()
def gcn():
"""Listen for GCN Notices."""
from .gcn import listen
listen()
@app.cli.command()
def iers():
"""Update IERS data for precise positional astronomy.
The IERS Bulletin A data set is used for precise time conversions and
positional astronomy. To initialize Astroplan, you need to download it.
According to https://astroplan.readthedocs.io/en/latest/faq/iers.html, you
need to run this command::
python -c 'from astroplan import download_IERS_A; download_IERS_A()'
Unfortunately, the USNO server that provides the data file is extremely
flaky. This tool attempts to work around that by retrying the download
several times.
"""
from retry.api import retry_call
from astroplan import download_IERS_A
from urllib.error import URLError
retry_call(
download_IERS_A, exceptions=(IndexError, URLError, ValueError),
tries=5, delay=1, backoff=2)
@app.cli.command()
@click.argument('username', required=False)
def passwd(username):
"""Set the password for a user."""
if username is None:
username = input('Username: ')
password = getpass()
path = os.path.join(app.instance_path, 'htpasswd')
os.makedirs(app.instance_path, exist_ok=True)
try:
htpasswd = HtpasswdFile(path)
except FileNotFoundError:
htpasswd = HtpasswdFile()
htpasswd.set_password(username, password)
htpasswd.save(path)
@app.cli.group()
def db():
"""Manage the PostgreSQL database."""
@db.command()
@click.option('--sample', is_flag=True, help="Populate with sample data.")
def create(sample):
"""Create all tables from SQLAlchemy models"""
models.create_all()
models.db.session.commit()
if sample:
from .gcn import handle
# Don't rely on Celery to be functional.
tasks.celery.conf['task_always_eager'] = True
models.db.session.merge(models.User(name='fritz'))
models.db.session.commit()
filenames = ['GRB180116A_Fermi_GBM_Alert.xml',
'GRB180116A_Fermi_GBM_Flt_Pos.xml',
'GRB180116A_Fermi_GBM_Gnd_Pos.xml',
'GRB180116A_Fermi_GBM_Fin_Pos.xml',
'MS181101ab-1-Preliminary.xml',
'MS181101ab-4-Retraction.xml',
'AMON_151115.xml']
with tqdm(filenames) as progress:
for filename in progress:
progress.set_description(
'processing GCN {}'.format(filename))
with app.open_resource(
os.path.join('tests/data', filename)) as f:
payload = f.read()
handle(payload, lxml.etree.fromstring(payload))
tasks.ztf_client.ztf_obs()
@db.command()
@click.option('--preserve', help='Preserve the named table.', multiple=True)
def drop(preserve):
"""Drop all tables from SQLAlchemy models"""
models.db.reflect(bind=None)
models.db.metadata.drop_all(
bind=models.db.get_engine(app, bind=None),
tables=[value for key, value in models.db.metadata.tables.items()
if key not in preserve])
models.db.session.commit()
@db.command()
@click.option('--sample', is_flag=True, help="Populate with sample data.")
@click.pass_context
def recreate(ctx, sample):
"""Drop and recreate all tables from SQLAlchemy models"""
ctx.invoke(drop)
ctx.forward(create)
|
434952
|
from __future__ import print_function
import argparse
import sys
import traceback
from botocore.exceptions import ClientError
from ridi.secret_keeper.connect import tell
def run(arguments):
description = """
Retrieve and print secrets from `secret-keeper`.
You need to configure AWS credentials by environment variables or files.
See https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#credentials for more detail.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument("alias", help="Alias of the secret")
parser.add_argument("-o", "--outfile", help="Output file name. If not provided, secret is printed to stdout.")
parser.add_argument("-v", "--verbose", action="store_true", help="Gives detailed error message")
args = parser.parse_args(arguments)
alias = args.alias
outfile = args.outfile
verbose = args.verbose
try:
secret = tell(alias).strip()
if outfile:
with open(outfile, "w") as f:
print(secret, file=f)
else:
print(secret)
return 0
except Exception as e:
if verbose:
traceback.print_exc(file=sys.stderr)
if isinstance(e, ClientError):
print("Secret of alias '%s' is not found." % alias, file=sys.stderr)
return 1
def main():
retval = run(sys.argv[1:])
sys.exit(retval)
if __name__ == "__main__":
main()
|
434993
|
def dedupe_and_sort(sequence, first=None, last=None):
"""
De-dupe and partially sort a sequence.
The `first` argument should contain all the items that might appear in
`sequence` and for which the order (relative to each other) is important.
The `last` argument is the same, but matching items will be placed at the
end of the sequence.
For example, `INSTALLED_APPS` and `MIDDLEWARE_CLASSES` settings.
Items from `first` will only be included if they also appear in `sequence`.
Items from `sequence` that don't appear in `first` will come
after any that do, and retain their existing order.
Returns a sequence of the same type as given.
"""
first = first or []
last = last or []
# Add items that should be sorted first.
new_sequence = [i for i in first if i in sequence]
# Add remaining items in their current order, ignoring duplicates and items
# that should be sorted last.
for item in sequence:
if item not in new_sequence and item not in last:
new_sequence.append(item)
# Add items that should be sorted last.
new_sequence.extend([i for i in last if i in sequence])
# Return a sequence of the same type as given.
return type(sequence)(new_sequence)
def _apply_slice(sequence, start, end):
return sequence[start:end]
def slice_sequences(sequences, start, end, apply_slice=None):
"""
Performs a slice across multiple sequences.
Useful when paginating across chained collections.
:param sequences: an iterable of iterables, each nested iterable should contain
a sequence and its size
:param start: starting index to apply the slice from
:param end: index that the slice should end at
:param apply_slice: function that takes the sequence and start/end offsets, and
returns the sliced sequence
:return: a list of the items sliced from the sequences
"""
if start < 0 or end < 0 or end <= start:
raise ValueError('Start and/or End out of range. Start: %s. End: %s' % (start, end))
items_to_take = end - start
items_passed = 0
collected_items = []
if apply_slice is None:
apply_slice = _apply_slice
for sequence, count in sequences:
offset_start = start - items_passed
offset_end = end - items_passed
if items_passed == start:
items = apply_slice(sequence, 0, items_to_take)
elif 0 < offset_start < count:
items = apply_slice(sequence, offset_start, offset_end)
elif offset_start < 0:
items = apply_slice(sequence, 0, offset_end)
else:
items = []
items = list(items)
collected_items += items
items_to_take -= len(items)
items_passed += count
if items_passed > end or items_to_take == 0:
break
return collected_items
|
435010
|
from starflyer import Handler, redirect, asjson
from camper import BaseForm, db, BaseHandler
from camper import logged_in, is_admin, ensure_barcamp
from .base import BarcampBaseHandler
import werkzeug.exceptions
ALLOWED_CHECKS = [
'has_event',
'has_sponsor',
'has_hashtag',
'has_twitter',
'has_facebook',
'has_seo',
'has_timetable',
'has_logo'
]
class BarcampWizard(BarcampBaseHandler):
"""the wizard is a page which shows what needs eventually be done for a barcamp to make it complete"""
template = "admin/wizard.html"
@ensure_barcamp()
@logged_in()
@is_admin()
def get(self, slug = None):
"""show the missing pieces"""
# check for post
if self.request.method == "POST":
for cancel in self.request.form:
cancel = str(cancel)
if cancel in ALLOWED_CHECKS and cancel not in self.barcamp.wizard_checked:
self.barcamp.wizard_checked.append(cancel)
self.barcamp.save()
# uses the computation in the base handler
return self.render(**self.compute_progress())
post = get
|
435076
|
from boto.swf.exceptions import SWFResponseError
from botocore.exceptions import ClientError
from freezegun import freeze_time
import sure # noqa # pylint: disable=unused-import
from unittest import SkipTest
import pytest
from moto import mock_swf, mock_swf_deprecated
from moto import settings
from moto.swf import swf_backend
from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION
from ..utils import setup_workflow_boto3
# PollForActivityTask endpoint
# Has boto3 equivalent
@mock_swf_deprecated
def test_poll_for_activity_task_when_one():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
resp = conn.poll_for_activity_task(
"test-domain", "activity-task-list", identity="surprise"
)
resp["activityId"].should.equal("my-activity-001")
resp["taskToken"].should_not.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234"
)
resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted")
resp["events"][-1]["activityTaskStartedEventAttributes"].should.equal(
{"identity": "surprise", "scheduledEventId": 5}
)
@mock_swf
def test_poll_for_activity_task_when_one_boto3():
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
resp = client.poll_for_activity_task(
domain="test-domain",
taskList={"name": "activity-task-list"},
identity="surprise",
)
resp["activityId"].should.equal("my-activity-001")
resp["taskToken"].should_not.be.none
resp = client.get_workflow_execution_history(
domain="test-domain",
execution={"runId": client.run_id, "workflowId": "uid-abcd1234"},
)
resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted")
resp["events"][-1]["activityTaskStartedEventAttributes"].should.equal(
{"identity": "surprise", "scheduledEventId": 5}
)
# Has boto3 equivalent
@mock_swf_deprecated
def test_poll_for_activity_task_when_none():
conn = setup_workflow()
resp = conn.poll_for_activity_task("test-domain", "activity-task-list")
resp.should.equal({"startedEventId": 0})
# Has boto3 equivalent
@mock_swf_deprecated
def test_poll_for_activity_task_on_non_existent_queue():
conn = setup_workflow()
resp = conn.poll_for_activity_task("test-domain", "non-existent-queue")
resp.should.equal({"startedEventId": 0})
@pytest.mark.parametrize("task_name", ["activity-task-list", "non-existent-queue"])
@mock_swf
def test_poll_for_activity_task_when_none_boto3(task_name):
client = setup_workflow_boto3()
resp = client.poll_for_decision_task(
domain="test-domain", taskList={"name": task_name}
)
resp.shouldnt.have.key("taskToken")
resp.should.have.key("startedEventId").equal(0)
resp.should.have.key("previousStartedEventId").equal(0)
# CountPendingActivityTasks endpoint
# Has boto3 equivalent
@mock_swf_deprecated
def test_count_pending_activity_tasks():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
resp = conn.count_pending_activity_tasks("test-domain", "activity-task-list")
resp.should.equal({"count": 1, "truncated": False})
# Has boto3 equivalent
@mock_swf_deprecated
def test_count_pending_decision_tasks_on_non_existent_task_list():
conn = setup_workflow()
resp = conn.count_pending_activity_tasks("test-domain", "non-existent")
resp.should.equal({"count": 0, "truncated": False})
@pytest.mark.parametrize(
"task_name,cnt", [("activity-task-list", 1), ("non-existent", 0)]
)
@mock_swf
def test_count_pending_activity_tasks_boto3(task_name, cnt):
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
resp = client.count_pending_activity_tasks(
domain="test-domain", taskList={"name": task_name}
)
resp.should.have.key("count").equal(cnt)
resp.should.have.key("truncated").equal(False)
# RespondActivityTaskCompleted endpoint
# Has boto3 equivalent
@mock_swf_deprecated
def test_respond_activity_task_completed():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")[
"taskToken"
]
resp = conn.respond_activity_task_completed(
activity_token, result="result of the task"
)
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234"
)
resp["events"][-2]["eventType"].should.equal("ActivityTaskCompleted")
resp["events"][-2]["activityTaskCompletedEventAttributes"].should.equal(
{"result": "result of the task", "scheduledEventId": 5, "startedEventId": 6}
)
@mock_swf
def test_respond_activity_task_completed_boto3():
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
client.respond_activity_task_completed(
taskToken=activity_token, result="result of the task"
)
resp = client.get_workflow_execution_history(
domain="test-domain",
execution={"runId": client.run_id, "workflowId": "uid-abcd1234"},
)
resp["events"][-2]["eventType"].should.equal("ActivityTaskCompleted")
resp["events"][-2]["activityTaskCompletedEventAttributes"].should.equal(
{"result": "result of the task", "scheduledEventId": 5, "startedEventId": 6}
)
# Has boto3 equivalent
@mock_swf_deprecated
def test_respond_activity_task_completed_on_closed_workflow_execution():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")[
"taskToken"
]
# bad: we're closing workflow execution manually, but endpoints are not
# coded for now..
wfe = swf_backend.domains[0].workflow_executions[-1]
wfe.execution_status = "CLOSED"
# /bad
conn.respond_activity_task_completed.when.called_with(activity_token).should.throw(
SWFResponseError, "WorkflowExecution="
)
@mock_swf
def test_respond_activity_task_completed_on_closed_workflow_execution_boto3():
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
client.terminate_workflow_execution(domain="test-domain", workflowId="uid-abcd1234")
with pytest.raises(ClientError) as ex:
client.respond_activity_task_completed(taskToken=activity_token)
ex.value.response["Error"]["Code"].should.equal("UnknownResourceFault")
ex.value.response["Error"]["Message"].should.equal(
"Unknown execution: WorkflowExecution=[workflowId=uid-abcd1234, runId={}]".format(
client.run_id
)
)
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
# Has boto3 equivalent
@mock_swf_deprecated
def test_respond_activity_task_completed_with_task_already_completed():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")[
"taskToken"
]
conn.respond_activity_task_completed(activity_token)
conn.respond_activity_task_completed.when.called_with(activity_token).should.throw(
SWFResponseError, "Unknown activity, scheduledEventId = 5"
)
@mock_swf
def test_respond_activity_task_completed_with_task_already_completed_boto3():
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
client.respond_activity_task_completed(taskToken=activity_token)
with pytest.raises(ClientError) as ex:
client.respond_activity_task_completed(taskToken=activity_token)
ex.value.response["Error"]["Code"].should.equal("UnknownResourceFault")
ex.value.response["Error"]["Message"].should.equal(
"Unknown activity, scheduledEventId = 5"
)
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
# RespondActivityTaskFailed endpoint
# Has boto3 equivalent
@mock_swf_deprecated
def test_respond_activity_task_failed():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")[
"taskToken"
]
resp = conn.respond_activity_task_failed(
activity_token, reason="short reason", details="long details"
)
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234"
)
resp["events"][-2]["eventType"].should.equal("ActivityTaskFailed")
resp["events"][-2]["activityTaskFailedEventAttributes"].should.equal(
{
"reason": "short reason",
"details": "long details",
"scheduledEventId": 5,
"startedEventId": 6,
}
)
@mock_swf
def test_respond_activity_task_failed_boto3():
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
client.respond_activity_task_failed(
taskToken=activity_token, reason="short reason", details="long details"
)
resp = client.get_workflow_execution_history(
domain="test-domain",
execution={"runId": client.run_id, "workflowId": "uid-abcd1234"},
)
resp["events"][-2]["eventType"].should.equal("ActivityTaskFailed")
resp["events"][-2]["activityTaskFailedEventAttributes"].should.equal(
{
"reason": "short reason",
"details": "long details",
"scheduledEventId": 5,
"startedEventId": 6,
}
)
# Has boto3 equivalent
@mock_swf_deprecated
def test_respond_activity_task_completed_with_wrong_token():
# NB: we just test ONE failure case for RespondActivityTaskFailed
# because the safeguards are shared with RespondActivityTaskCompleted, so
# no need to retest everything end-to-end.
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
conn.poll_for_activity_task("test-domain", "activity-task-list")
conn.respond_activity_task_failed.when.called_with(
"not-a-correct-token"
).should.throw(SWFResponseError, "Invalid token")
@mock_swf
def test_respond_activity_task_completed_with_wrong_token_boto3():
# NB: we just test ONE failure case for RespondActivityTaskFailed
# because the safeguards are shared with RespondActivityTaskCompleted, so
# no need to retest everything end-to-end.
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
with pytest.raises(ClientError) as ex:
client.respond_activity_task_failed(taskToken="not-a-correct-token")
ex.value.response["Error"]["Code"].should.equal("ValidationException")
ex.value.response["Error"]["Message"].should.equal("Invalid token")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
# RecordActivityTaskHeartbeat endpoint
# Has boto3 equivalent
@mock_swf_deprecated
def test_record_activity_task_heartbeat():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")[
"taskToken"
]
resp = conn.record_activity_task_heartbeat(activity_token)
resp.should.equal({"cancelRequested": False})
@mock_swf
def test_record_activity_task_heartbeat_boto3():
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
resp = client.record_activity_task_heartbeat(taskToken=activity_token)
resp.should.have.key("cancelRequested").equal(False)
# Has boto3 equivalent
@mock_swf_deprecated
def test_record_activity_task_heartbeat_with_wrong_token():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"]
conn.record_activity_task_heartbeat.when.called_with(
"bad-token", details="some progress details"
).should.throw(SWFResponseError)
@mock_swf
def test_record_activity_task_heartbeat_with_wrong_token_boto3():
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
with pytest.raises(ClientError) as ex:
client.record_activity_task_heartbeat(taskToken="bad-token")
ex.value.response["Error"]["Code"].should.equal("ValidationException")
ex.value.response["Error"]["Message"].should.equal("Invalid token")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
# Has boto3 equivalent
@mock_swf_deprecated
def test_record_activity_task_heartbeat_sets_details_in_case_of_timeout():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
with freeze_time("2015-01-01 12:00:00"):
activity_token = conn.poll_for_activity_task(
"test-domain", "activity-task-list"
)["taskToken"]
conn.record_activity_task_heartbeat(
activity_token, details="some progress details"
)
with freeze_time("2015-01-01 12:05:30"):
# => Activity Task Heartbeat timeout reached!!
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234"
)
resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut")
attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"]
attrs["details"].should.equal("some progress details")
@mock_swf
def test_record_activity_task_heartbeat_sets_details_in_case_of_timeout_boto3():
if settings.TEST_SERVER_MODE:
raise SkipTest("Unable to manipulate time in ServerMode")
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
with freeze_time("2015-01-01 12:00:00"):
activity_token = client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
client.record_activity_task_heartbeat(
taskToken=activity_token, details="some progress details"
)
with freeze_time("2015-01-01 12:05:30"):
# => Activity Task Heartbeat timeout reached!!
resp = client.get_workflow_execution_history(
domain="test-domain",
execution={"runId": client.run_id, "workflowId": "uid-abcd1234"},
)
resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut")
attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"]
attrs["details"].should.equal("some progress details")
|
435083
|
import sys
sys.path.append("./")
import examples as ex
import pytest
import pyinspect as pi
def test_examples():
pi.search(ex) # runs all of them
|
435098
|
def decimal_to_binary(n):
if n > 1: decimal_to_binary(n//2)
print(n % 2, end = '')
try: decimal_to_binary(int(input("Enter an Integer to Covert into Binary: ")))
except ValueError: print("Input is not an integer.")
print()
|
435119
|
from __future__ import print_function
from coverage import coverage
cov = coverage(source=('doubles',))
cov.start()
pytest_plugins = ['doubles.pytest_plugin']
def pytest_sessionfinish(session, exitstatus):
cov.stop()
cov.save()
def pytest_terminal_summary(terminalreporter):
print("\nCoverage report:\n")
cov.report(show_missing=True, ignore_errors=True, file=terminalreporter._tw)
cov.html_report()
|
435133
|
from database import db
from flask import Flask
from yelp_beans.logic.config import get_config
def create_app():
app = Flask(__name__, template_folder='yelp_beans/templates')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = get_config().get('DATABASE_URL_PROD', "sqlite://")
db.init_app(app)
from yelp_beans.routes.api.v1.meeting_requests import meeting_requests
from yelp_beans.routes.api.v1.metrics import metrics_blueprint
from yelp_beans.routes.api.v1.preferences import preferences_blueprint
from yelp_beans.routes.api.v1.user import user_blueprint
from yelp_beans.routes.tasks import tasks
# Cron Endpoint
app.register_blueprint(tasks, url_prefix='/tasks')
# Api Endpoints
app.register_blueprint(meeting_requests, url_prefix='/v1/meeting_request')
app.register_blueprint(metrics_blueprint, url_prefix='/v1/metrics')
app.register_blueprint(preferences_blueprint,
url_prefix='/v1/user/preferences')
app.register_blueprint(user_blueprint, url_prefix='/v1/user')
with app.app_context():
db.create_all()
return app
|
435136
|
from modules import DNSListener
from modules import AgentControllerCLI
import threading
banner = """
____ _ _______ ____ _ __
/ __ \/ | / / ___/ / __ \___ __________(_)____/ /_
/ / / / |/ /\__ \______/ /_/ / _ \/ ___/ ___/ / ___/ __/
/ /_/ / /| /___/ /_____/ ____/ __/ / (__ ) (__ ) /_
/_____/_/ |_//____/ /_/ \___/_/ /____/_/____/\__/
"""
host = "0.0.0.0"
port = 53
print banner
DNSObject = DNSListener.DNSListener(host,port)
commandInputs = DNSListener.Input().cmdloop()
|
435154
|
from abc import abstractmethod
import math
import generator
import magma
import mantle
from from_magma import FromMagma
from configurable import Configurable, ConfigurationType
from const import Const
class FeatureGenerator(Configurable):
def __init__(self):
super().__init__()
class CoreGenerator(FeatureGenerator):
@abstractmethod
def inputs(self):
pass
@abstractmethod
def outputs(self):
pass
class SBGenerator(FeatureGenerator):
def __init__(self, width, num_tracks, core_inputs):
super().__init__()
self.width = width
self.num_tracks = num_tracks
assert core_inputs == 1
self.core_inputs = core_inputs
MuxCls = mantle.DefineMux(self.num_tracks, self.width)
self.muxs = [FromMagma(MuxCls) for _ in range(self.num_tracks)]
T = magma.Array(self.num_tracks, magma.Bits(self.width))
bits_per_sel = math.ceil(math.log(self.num_tracks, 2))
self.add_ports(
I=magma.In(T),
core_in=magma.In(magma.Array(self.core_inputs, T.T)),
O=magma.Out(T),
)
for i in range(self.num_tracks):
self.add_config(f"sel_{i}", bits_per_sel)
self.selects = [getattr(self, f"sel_{i}") \
for i in range(self.num_tracks)]
for i in range(self.num_tracks):
mux = self.muxs[i]
for j in range(self.num_tracks):
mux_in = self.I[j] if i != j else self.core_in[0]
self.wire(mux_in, getattr(mux, f"I{j}"))
self.wire(self.selects[i], mux.S)
self.wire(mux.O, self.O[i])
def name(self):
return f"SB_{self.width}_{self.num_tracks}_{self.core_inputs}"
class CBGenerator(FeatureGenerator):
def __init__(self, width, num_tracks):
super().__init__()
self.width = width
self.num_tracks = num_tracks
is_power_of_two = lambda x: x != 0 and ((x & (x - 1)) == 0)
assert is_power_of_two(self.num_tracks)
self.mux = FromMagma(mantle.DefineMux(self.num_tracks, self.width))
T = magma.Bits(self.width)
sel_bits = math.ceil(math.log(self.num_tracks, 2))
self.add_ports(
I=magma.In(magma.Array(self.num_tracks, T)),
O=magma.Out(T),
)
self.add_configs(
sel=sel_bits,
)
for i in range(self.num_tracks):
self.wire(self.I[i], getattr(self.mux, f"I{i}"))
self.wire(self.sel, self.mux.S)
self.wire(self.mux.O, self.O)
def name(self):
return f"CB_{self.width}_{self.num_tracks}"
class PECoreGenerator(CoreGenerator):
def __init__(self, width):
super().__init__()
self.width = width
T = magma.Bits(self.width)
self.add_ports(
I0=magma.In(T),
I1=magma.In(T),
O=magma.Out(T),
)
self.add_configs(
op=5,
)
import const
zero = const.Const(magma.bits(0, self.width))
self.wire(zero, self.O)
del const
def inputs(self):
return [self.I0, self.I1]
def outputs(self):
return [self.O]
def name(self):
return f"PECore_{self.width}"
class TileGeneratorBase(Configurable):
class _FeatureContainer(dict):
def __getattr__(self, name):
return self[name]
def __init__(self):
super().__init__()
self.features = TileGeneratorBase._FeatureContainer()
def add_feature(self, name, feature):
assert name not in self.features
self.features[name] = feature
def add_features(self, **kwargs):
for name, feature in kwargs.items():
self.add_feature(name, feature)
class TileGenerator(TileGeneratorBase):
def __init__(self, width, num_tracks):
super().__init__()
self.width = width
self.num_tracks = num_tracks
core = PECoreGenerator(self.width)
sb = SBGenerator(self.width, self.num_tracks, len(core.outputs()))
self.add_features(
core=core,
sb=sb,
)
for i in range(len(self.features.core.inputs())):
cb = CBGenerator(self.width, self.num_tracks)
self.add_feature(f"cb{i}", cb)
T = magma.Array(self.num_tracks, magma.Bits(self.width))
self.add_ports(
I=magma.In(T),
O=magma.Out(T),
)
self.wire(self.I, self.features.sb.I)
for i in range(len(self.features.core.inputs())):
cb = getattr(self.features, f"cb{i}")
self.wire(self.I, cb.I)
for i, core_in in enumerate(self.features.core.inputs()):
cb = getattr(self.features, f"cb{i}")
self.wire(cb.O, core_in)
for i, core_out in enumerate(self.features.core.outputs()):
self.wire(core_out, self.features.sb.core_in[i])
self.wire(self.features.sb.O, self.O)
def name(self):
return f"Tile_{self.width}_{self.num_tracks}"
class TopGenerator(Configurable):
def __init__(self):
super().__init__()
width = 16
num_tracks = 4
num_tiles = 10
T = magma.Array(num_tracks, magma.Bits(width))
self.tiles = [TileGenerator(width, num_tracks) \
for _ in range(num_tiles)]
self.add_ports(
I=magma.In(T),
O=magma.Out(T),
)
# for tile in self.tiles:
# self.wire(self.config_addr, tile.config_addr)
# self.wire(self.config_data, tile.config_data)
self.wire(self.I, self.tiles[0].I)
self.wire(self.tiles[-1].O, self.O)
for i in range(1, len(self.tiles)):
t0 = self.tiles[i - 1]
t1 = self.tiles[i]
self.wire(t0.O, t1.I)
def name(self):
return "Top"
if __name__ == "__main__":
top_gen = TopGenerator()
from bit_vector import BitVector as BV
addr_map = {}
for tile_idx, tile in enumerate(top_gen.tiles):
addr_map[tile] = BV(tile_idx, 16)
for feature_idx, (feature_name, feature) in enumerate(tile.features.items()):
if feature_name in addr_map:
continue
addr_map[feature_name] = BV(feature_idx, 8)
for reg_idx, (reg_name, reg) in enumerate(feature.registers.items()):
qualified_name = ".".join((feature_name, reg_name))
if qualified_name in addr_map:
continue
addr_map[qualified_name] = BV(reg_idx, 8)
print (addr_map)
exit()
def top_to_tile(top, tile, tile_idx):
tile.add_ports(
config=magma.In(ConfigurationType(32, 32)),
tile_id=magma.In(magma.Bits(16)),
)
top.wire(top.config, tile.config)
top.wire(Const(magma.bits(tile_idx, 16)), tile.tile_id)
tile_eq = FromMagma(mantle.DefineEQ(16))
tile.wire(tile.tile_id, tile_eq.I0)
tile.wire(tile.config.config_addr[0:16], tile_eq.I1)
return tile_eq
def tile_to_feature(tile, tile_eq, feature, feature_idx):
feature.add_ports(
config=magma.In(ConfigurationType(8, 32)),
config_en=magma.In(magma.Bit),
)
tile.wire(tile.config.config_addr[24:], feature.config.config_addr)
tile.wire(tile.config.config_data, feature.config.config_data)
feature_eq = FromMagma(mantle.DefineEQ(8))
tile.wire(tile.config.config_addr[16:24], feature_eq.I0)
tile.wire(Const(magma.bits(feature_idx, 8)), feature_eq.I1)
feature_en = FromMagma(mantle.DefineAnd())
tile.wire(feature_eq.O, feature_en.I0)
tile.wire(tile_eq.O, feature_en.I1)
tile.wire(feature_en.O, feature.config_en)
def feature_to_reg(feature, reg, idx):
reg.finalize(idx, idx, 8, 32)
feature.wire(feature.config.config_addr, reg._register.addr_in)
feature.wire(feature.config.config_data, reg._register.data_in)
return idx + 1
top_gen.add_ports(config=magma.In(ConfigurationType(32, 32)))
idx = 0
for tile_idx, tile in enumerate(top_gen.tiles):
tile_eq = top_to_tile(top_gen, tile, tile_idx)
features = (tile.sb, tile.core, *(cb for cb in tile.cbs))
for feature_idx, feature in enumerate(features):
tile_to_feature(tile, tile_eq, feature, feature_idx)
for name, reg in feature.registers.items():
idx = feature_to_reg(feature, reg, idx)
# def _fn(gen):
# if isinstance(gen, Configurable):
# for name, reg in gen.registers.items():
# print (name, reg._global_addr)
# for child in gen.children():
# _fn(child)
# _fn(top_gen)
# exit()
top_circ = top_gen.circuit()
magma.compile("top", top_circ, output="coreir")
print(open("top.json").read())
|
435156
|
from networkx import DiGraph, disjoint_union # type: ignore
from veniq.ast_framework import AST, ASTNodeType
from typing import Tuple
NODE_TYPES = [
ASTNodeType.ASSIGNMENT,
ASTNodeType.RETURN_STATEMENT
]
def build_cfg(tree: AST) -> DiGraph:
'''Create Control Flow Graph'''
g = DiGraph()
g.add_node(0)
for node in tree:
if node.node_type not in NODE_TYPES:
continue
_g = _mk_cfg_graph(node.node_type)
g = _compose_two_graphs(g, _g)
return g
def _mk_cfg_graph(node: ASTNodeType) -> Tuple[DiGraph, int]:
'''Takes in Javalang statement and returns corresponding CFG'''
g = DiGraph()
g.add_node(0)
return g
def _compose_two_graphs(g1: DiGraph, g2: DiGraph) -> DiGraph:
'''Compose two graphs by creating the edge between last of the fist graph and fist of the second.
We assume that node in the each graph G has order from 0 to len(G)-1
'''
g = disjoint_union(g1, g2)
g.add_edge(len(g1) - 1, len(g1))
return g
|
435163
|
import _kratos
import sqlite3
import tempfile
import os
import json
from kratos import Generator, Event, always_comb, always_ff, posedge, Transaction, verilog
def test_event_extraction():
mod = Generator("mod")
event = Event("test1/event1")
event_ff = Event("test1/event2")
t = Transaction("test")
a = mod.var("a", 1)
b = mod.var("b", 1)
c = mod.var("c", 2)
d = mod.var("d", 2)
clk = mod.clock("clk")
@always_comb
def if_stmt():
if a:
t @ event({"a": a})
else:
if b:
t @ event(b=b)
else:
t @ event(c=c)
@always_comb
def switch_if():
# this will be turn into a switch statement
if d == 0:
t @ event(a1=a)
elif d == 1:
t @ event(a2=a)
elif d == 2:
t @ event(a3=a)
else:
t @ event(a4=a)
@always_ff((posedge, clk))
def if_seq():
if a:
t @ event_ff(a5=a)
mod.add_always(if_stmt)
mod.add_always(switch_if)
mod.add_always(if_seq)
# convert to if to switch
_kratos.passes.transform_if_to_case(mod.internal_generator)
info = _kratos.extract_event_info(mod.internal_generator)
assert len(info) == 8
# check seq
ffs = [i for i in info if not i.combinational]
assert len(ffs) == 1
assert str(ffs[0].fields["a5"]) == "a"
# check switch
ffs = [i for i in info if "a4" in i.fields]
assert len(ffs) == 1
assert str(ffs[0].fields["a4"]) == "a"
ffs = [i for i in info if "a3" in i.fields]
assert len(ffs) == 1
# check out if statements
ffs = [i for i in info if "c" in i.fields]
assert len(ffs) == 1
def test_event_actions():
mod = Generator("mod")
event = Event("test1/event1")
t = Transaction("transaction1")
a = mod.var("a", 1)
b = mod.var("b", 1)
@always_comb
def code():
t @ event(a=a).matches(a=a).starts()
t @ event(b=b).matches(a=b).terminates()
mod.add_always(code)
info = _kratos.extract_event_info(mod.internal_generator)
assert len(info) == 2
# check actions
event1 = info[0]
assert "a" in event1.fields
stmt = event1.stmt
assert str(stmt.match_values["a"]) == "a"
assert event1.transaction == "transaction1"
assert event1.type == _kratos.EventActionType.Start
event2 = info[1]
assert "b" in event2.fields
stmt = event2.stmt
assert str(stmt.match_values["a"]) == "b"
assert event2.type == _kratos.EventActionType.End
def test_event_debug_fn_ln():
mod = Generator("mod", debug=True)
event = Event("event")
t = Transaction("transaction")
sig = mod.var("sig", 1)
@always_comb
def code():
t @ event(sig=sig)
mod.add_always(code)
info = _kratos.extract_event_info(mod.internal_generator)
stmt = info[0].stmt
fn_lns = stmt.fn_name_ln
assert len(fn_lns) == 1
with open(__file__) as f:
lines = f.readlines()
idx = lines.index(" t @ event(sig=sig)\n")
assert (idx + 1) == fn_lns[0][1]
def test_event_serialization():
mod = Generator("mod", debug=True)
event = Event("event")
t = Transaction("transaction")
a = mod.var("a", 8)
b = mod.var("b", 1)
in_ = mod.input("in", 4)
out = mod.output("out", 4)
clk = mod.clock("clk")
# notice that in kratos we only limit
@always_ff((posedge, clk))
def code():
if a == 0:
out = in_ + 1
t @ event(value1=a, value2=b).matches(value2=b).starts()
elif a == 1:
out = in_ + 2
t @ event(value3=a, value4=b).matches(value2=b)
else:
out = in_
t @ event(value1=a, value4=b).matches(value2=b).terminates()
mod.add_always(code)
with tempfile.TemporaryDirectory() as temp:
db_filename = os.path.join(temp, "debug.db")
verilog(mod, insert_debug_info=True, debug_db_filename=db_filename, contains_event=True)
conn = sqlite3.connect(db_filename)
c = conn.cursor()
c.execute("SELECT * from breakpoint")
result = c.fetchall()
# we have 8 lines
assert len(result) == 8
event_last = result[-1]
assert event_last[-2] == "(!(a == 8'h1)) && (!(a == 8'h0))"
# test out the event table
c.execute("SELECT * from event")
result = c.fetchall()
# 3 events
assert len(result) == 3
assert result[0][0] == "event"
# fields
fields = json.loads(result[0][3])
assert fields["value1"] == "mod.a"
matches = json.loads(result[0][4])
assert matches["value2"] == "mod.b"
conn.close()
if __name__ == "__main__":
test_event_serialization()
|
435180
|
from ckan_cloud_operator.providers import manager as providers_manager
from .constants import PROVIDER_SUBMODULE
from .adminer.constants import PROVIDER_ID as adminer_provider_id
def initialize():
get_provider(default=adminer_provider_id).initialize()
def start():
"""Start a web-UI for db management"""
get_provider().start()
def get_provider(default=None):
return providers_manager.get_provider(PROVIDER_SUBMODULE, default=default)
|
435181
|
from opytimizer.optimizers.population import OSA
# One should declare a hyperparameters object based
# on the desired algorithm that will be used
params = {
'beta': 1.9
}
# Creates an OSA optimizer
o = OSA(params=params)
|
435204
|
import os
from spira.technologies.mit.process import RDD
import spira.all as spira
import spira.yevon.io.input_gdsii as io
from copy import copy, deepcopy
def wrap_references(cell, c2dmap, devices):
""" """
for e in cell.elements.sref:
if e.reference in c2dmap.keys():
e.reference = c2dmap[e.reference]
return cell
def device_detector(cell):
"""
We are working with the presupposition that JJ cells
are flattend. Future versions can automate this process.
"""
c2dmap, devices = {}, {}
for c in cell.dependencies():
c2dmap.update({c: c})
for c in cell.dependencies():
cc = deepcopy(c)
# FIXME: Seems like there is a lack of
# transformations in the parsed cells.
# D = spira.Cell(elements=cc.elements).flat_copy()
# elems = RDD.FILTERS.PCELL.DEVICE(D).elements
# c.elements = elems
# c.elements = D.elements
cell_types = {0: 'JUNCTION', 1: 'VIA'}
_type = None
for p in cc.elements:
if p.alias == 'J5':
_type = cell_types[0]
for key in RDD.VIAS.keys:
# via_layer = RDD.VIAS[key].LAYER_STACK['VIA_LAYER']
for p in cc.elements:
if p.alias == key:
pcell = RDD.VIAS[key].PCELLS.DEFAULT
D = spira.Cell(elements=deepcopy(cc).elements.flat_copy())
elems = RDD.FILTERS.PCELL.DEVICE(D).elements
D = pcell(elements=elems)
c2dmap[c] = D
for key in RDD.DEVICES.keys:
if _type == key:
pcell = RDD.DEVICES[key].PCELLS.DEFAULT
D = spira.Cell(elements=deepcopy(cc).elements.flat_copy())
elems = RDD.FILTERS.PCELL.DEVICE(D).elements
D = pcell(elements=elems)
c2dmap[c] = D
for c in cell.dependencies():
wrap_references(c, c2dmap, devices)
return cell
if __name__ == '__main__':
# file_name = '/home/therealtyler/code/phd/spira/tests/8-parse/jj_mitll.gds'
# file_name = '/home/therealtyler/code/phd/spira/tests/8-parse/mit/ruben/jtl_mitll_diff.gds'
# file_name = '/home/therealtyler/code/phd/spira/tests/8-parse/mit/lieze/jtl.gds'
file_name = '/home/therealtyler/code/phd/spira/tests/8-parse/mit/lieze/ptlrx.gds'
# file_name = '/home/therealtyler/code/phd/spira/tests/8-parse/mit/lieze/dfft.gds'
# file_name = '/home/therealtyler/code/phd/spira/tests/8-parse/mit/lieze/circuit.gds'
# file_name = '/home/therealtyler/code/phd/spira/tests/8-parse/mit/lieze/jtlt.gds'
# input_gdsii = spira.InputGdsii(file_name=file_name)
# input_gdsii.layer_map = RDD.GDSII.IMPORT_LAYER_MAP
input_gdsii = spira.InputGdsii(file_name=file_name, layer_map=RDD.GDSII.IMPORT_LAYER_MAP)
D = input_gdsii.parse()
# NOTE: Parse the input layout to SPiRA elements.
D = device_detector(cell=D)
D = RDD.FILTERS.PCELL.CIRCUIT(D)
C = spira.Circuit(elements=D.elements)
D = RDD.FILTERS.PCELL.MASK(C)
# from spira.yevon.vmodel.virtual import virtual_connect
# v_model = virtual_connect(device=D)
# v_model.view_virtual_connect(show_layers=True, write=True)
D.gdsii_view()
net = D.extract_netlist
D.netlist_view(net=net)
# FIXME: Add ports to layouts for i/o.
|
435215
|
from distutils.core import setup
setup(
name='influx-nagios-plugin',
version="1.1.0",
packages=['src'],
install_requires=['influxdb', 'NagAconda'],
license='MIT',
url="https://github.com/shaharke/nagios-influx-plugin.git",
description='Nagios plugin for querying stats from InfluxDB',
author='<NAME>',
author_email='<EMAIL>',
scripts=['src/check_influx']
)
|
435264
|
import math
import pytest
from skspatial._functions import _allclose
from skspatial.objects import Line
from skspatial.objects import Plane
from skspatial.objects import Points
@pytest.mark.parametrize(
("array_point", "array_a", "array_b", "plane_expected"),
[
([0, 0], [1, 0], [0, 1], Plane([0, 0, 0], [0, 0, 1])),
([1, 2], [1, 0], [0, 1], Plane([1, 2, 0], [0, 0, 1])),
([0, 0], [0, 1], [1, 0], Plane([0, 0, 0], [0, 0, -1])),
([0, 0], [2, 0], [0, 1], Plane([0, 0, 0], [0, 0, 2])),
([0, 0], [2, 0], [0, 2], Plane([0, 0, 0], [0, 0, 4])),
([1, 2, 3], [2, 0], [0, 2], Plane([1, 2, 3], [0, 0, 4])),
([-3, 2, 6], [1, 4, 6], [-1, 5, 8], Plane([-3, 2, 6], [2, -14, 9])),
],
)
def test_from_vectors(array_point, array_a, array_b, plane_expected):
plane = Plane.from_vectors(array_point, array_a, array_b)
assert plane.is_close(plane_expected)
# Also ensure that the vector is exactly as expected.
assert plane.vector.is_close(plane_expected.vector)
@pytest.mark.parametrize(
("array_point", "array_a", "array_b"),
[
([0, 0], [1, 0], [1, 0]),
([2, 3], [1, 0], [1, 0]),
([0, 0], [0, 1], [0, 1]),
([0, 0], [1, 1], [-1, -1]),
([0, 0], [5, 3], [-5, -3]),
([0, 0, 0], [1, 0, 0], [1, 0, 0]),
],
)
def test_from_vectors_failure(array_point, array_a, array_b):
message_expected = "The vectors must not be parallel."
with pytest.raises(ValueError, match=message_expected):
Plane.from_vectors(array_point, array_a, array_b)
@pytest.mark.parametrize(
("point_a", "point_b", "point_c", "plane_expected"),
[
([0, 0], [1, 0], [0, 1], Plane([0, 0, 0], [0, 0, 1])),
# The spacing between the points is irrelevant.
([0, 0], [9, 0], [0, 9], Plane([0, 0, 0], [0, 0, 1])),
# The first point is used as the plane point.
([0, 0.1], [1, 0], [0, 1], Plane([0, 0.1, 0], [0, 0, 1])),
# The order of points is relevant.
([0, 0], [0, 1], [1, 0], Plane([0, 0, 0], [0, 0, -1])),
],
)
def test_from_points(point_a, point_b, point_c, plane_expected):
plane = Plane.from_points(point_a, point_b, point_c)
assert plane.point.is_close(plane_expected.point)
assert plane.is_close(plane_expected)
@pytest.mark.parametrize(
("point_a", "point_b", "point_c"),
[
([0, 0], [0, 0], [0, 0]),
([0, 0], [0, 1], [0, 2]),
([-2, 1], [0, 2], [2, 3]),
([0, 0, 0], [1, 1, 1], [-2, -2, -2]),
([0, 1, 2], [1, 2, 3], [4, 5, 6]),
],
)
def test_from_points_failure(point_a, point_b, point_c):
message_expected = "The points must not be collinear."
with pytest.raises(ValueError, match=message_expected):
Plane.from_points(point_a, point_b, point_c)
@pytest.mark.parametrize(
("plane", "coeffs_expected"),
[
(Plane([-1, 2], [22, -3]), [22, -3, 0, 28]),
(Plane([0, 0, 0], [0, 0, 1]), [0, 0, 1, 0]),
(Plane([0, 0, 0], [0, 0, 25]), [0, 0, 25, 0]),
(Plane([1, 2, 0], [5, 4, 6]), [5, 4, 6, -13]),
(Plane([-4, 5, 8], [22, -3, 6]), [22, -3, 6, 55]),
(Plane([0, 0, 0, 0], [1, 2, 3, 4]), None),
],
)
def test_cartesian(plane, coeffs_expected):
"""Test the coefficients of the Cartesian plane equation."""
if coeffs_expected is None:
with pytest.raises(ValueError, match="The plane dimension must be <= 3."):
plane.cartesian()
else:
assert _allclose(plane.cartesian(), coeffs_expected).all()
@pytest.mark.parametrize(
("point", "point_plane", "normal_plane", "point_expected", "dist_expected"),
[
([0, 0, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], 0),
([0, 0, 0], [0, 0, 0], [0, 0, -1], [0, 0, 0], 0),
([0, 0, 1], [0, 0, 0], [0, 0, 1], [0, 0, 0], 1),
([0, 0, 1], [0, 0, 0], [0, 0, -1], [0, 0, 0], -1),
([0, 0, 1], [0, 0, 0], [0, 0, 50], [0, 0, 0], 1),
([0, 0, 1], [0, 0, 0], [0, 0, -50], [0, 0, 0], -1),
([0, 0, 5], [0, 0, 0], [0, 0, 50], [0, 0, 0], 5),
([0, 0, 5], [0, 0, 0], [0, 0, -50], [0, 0, 0], -5),
([5, -4, 1], [0, 0, 0], [0, 0, 1], [5, -4, 0], 1),
],
)
def test_project_point(point, point_plane, normal_plane, point_expected, dist_expected):
plane = Plane(point_plane, normal_plane)
point_projected = plane.project_point(point)
distance_signed = plane.distance_point_signed(point)
assert point_projected.is_close(point_expected)
assert math.isclose(distance_signed, dist_expected)
@pytest.mark.parametrize(
("plane", "vector", "vector_expected"),
[
(Plane([0, 0, 0], [0, 0, 1]), [1, 1, 0], [1, 1, 0]),
(Plane([0, 0, 0], [0, 0, 1]), [1, 1, 1], [1, 1, 0]),
(Plane([0, 0, 0], [0, 0, 1]), [7, -5, 20], [7, -5, 0]),
(Plane([0, 0, 0], [0, 0, -10]), [7, -5, 20], [7, -5, 0]),
],
)
def test_project_vector(plane, vector, vector_expected):
vector_projected = plane.project_vector(vector)
assert vector_projected.is_close(vector_expected)
@pytest.mark.parametrize(
("point", "plane", "dist_signed_expected"),
[
([0, 0, 0], Plane([0, 0, 0], [0, 0, 1]), 0),
([50, -67, 0], Plane([0, 0, 0], [0, 0, 1]), 0),
([50, -67, 0], Plane([0, 0, 1], [0, 0, 1]), -1),
([5, 3, 8], Plane([0, 0, 0], [0, 0, 1]), 8),
([5, 3, 7], Plane([0, 0, 0], [0, 0, -50]), -7),
([5, 3, -8], Plane([0, 0, 0], [0, 0, 1]), -8),
],
)
def test_distance_point(point, plane, dist_signed_expected):
assert math.isclose(plane.distance_point_signed(point), dist_signed_expected)
assert math.isclose(plane.distance_point(point), abs(dist_signed_expected))
@pytest.mark.parametrize(
("plane", "point", "value_expected"),
[
(Plane([0, 0], [1, 1]), [2, 2], 1),
(Plane([0, 0], [1, 1]), [0, 0], 0),
(Plane([0, 1], [1, 1]), [0, 0], -1),
(Plane([0, 0, 0], [1, 0, 0]), [0, 0, 0], 0),
(Plane([0, 0, 0], [1, 0, 0]), [1, 0, 0], 1),
(Plane([0, 0, 0], [1, 0, 0]), [-1, 0, 0], -1),
(Plane([0, 0, 0], [1, 0, 0]), [25, 53, -105], 1),
(Plane([0, 0, 0], [1, 0, 0]), [-2, 53, -105], -1),
(Plane([0, 0, 0], [1, 0, 0]), [0, 38, 19], 0),
(Plane([0, 0, 0], [1, 0, 0]), [0, 101, -45], 0),
(Plane([0, 0, 0], [-1, 0, 0]), [1, 0, 0], -1),
(Plane([5, 0, 0], [1, 0, 0]), [1, 0, 0], -1),
],
)
def test_side_point(plane, point, value_expected):
assert plane.side_point(point) == value_expected
@pytest.mark.parametrize(
("line", "plane", "array_expected"),
[
(Line([0, 0, 0], [1, 0, 0]), Plane([0, 0, 0], [1, 0, 0]), [0, 0, 0]),
(Line([0, 0, 0], [0, 0, 1]), Plane([0, 0, 0], [0, 0, 1]), [0, 0, 0]),
(Line([5, -3, 0], [0, 0, 1]), Plane([0, 0, 0], [0, 0, 1]), [5, -3, 0]),
],
)
def test_intersect_line(line, plane, array_expected):
point_intersection = plane.intersect_line(line)
assert point_intersection.is_close(array_expected)
@pytest.mark.parametrize(
("line", "plane"),
[
(Line([0, 0, 0], [1, 0, 0]), Plane([0, 0, 0], [0, 0, 1])),
(Line([0, 0, 0], [0, 0, 1]), Plane([0, 0, 0], [1, 0, 0])),
(Line([0, 0, 0], [0, 0, 1]), Plane([0, 0, 0], [0, 1, 0])),
],
)
def test_intersect_line_failure(line, plane):
message_expected = "The line and plane must not be parallel."
with pytest.raises(ValueError, match=message_expected):
plane.intersect_line(line)
@pytest.mark.parametrize(
("plane_a", "plane_b", "line_expected"),
[
(
Plane([0, 0, 0], [0, 0, 1]),
Plane([0, 0, 0], [1, 0, 0]),
Line([0, 0, 0], [0, 1, 0]),
),
(
Plane([0, 0, 0], [0, 0, 1]),
Plane([0, 0, 1], [1, 0, 1]),
Line([1, 0, 0], [0, 1, 0]),
),
(
Plane([0, 0, 0], [-1, 1, 0]),
Plane([8, 0, 0], [1, 1, 0]),
Line([4, 4, 0], [0, 0, -1]),
),
],
)
def test_intersect_plane(plane_a, plane_b, line_expected):
line_intersection = plane_a.intersect_plane(plane_b)
assert line_intersection.is_close(line_expected)
@pytest.mark.parametrize(
("plane_a", "plane_b"),
[
(Plane([0, 0, 0], [1, 0, 0]), Plane([0, 0, 0], [1, 0, 0])),
(Plane([1, 0, 0], [1, 0, 0]), Plane([0, 0, 0], [1, 0, 0])),
(Plane([0, 0, 5], [0, 0, 1]), Plane([4, 2, 4], [0, 0, 3])),
(Plane([0, 0, -5], [0, 0, 1]), Plane([4, 2, 4], [0, 0, 3])),
],
)
def test_intersect_plane_failure(plane_a, plane_b):
message_expected = "The planes must not be parallel."
with pytest.raises(Exception, match=message_expected):
plane_a.intersect_plane(plane_b)
@pytest.mark.parametrize(
("plane", "points", "error_expected"),
[
(Plane([0, 0, 0], [0, 0, 1]), [[25, 3, 0], [-6, 5, 0]], 0),
(Plane([25, 9, 0], [0, 0, 1]), [[25, 3, 0], [-6, 5, 0]], 0),
(Plane([25, 9, -2], [0, 0, 1]), [[25, 3, 0], [-6, 5, 0]], 8),
(Plane([0, 0, 0], [0, 0, 1]), [[25, 3, 2], [-6, 5, 0]], 4),
(Plane([0, 0, 0], [0, 0, 5]), [[25, 3, 2], [-6, 5, 0]], 4),
(Plane([0, 0, 0], [0, 0, -5]), [[25, 3, 2], [-6, 5, 0]], 4),
],
)
def test_sum_squares_plane(plane, points, error_expected):
error = plane.sum_squares(points)
assert math.isclose(error, error_expected)
@pytest.mark.parametrize(
("points", "plane_expected"),
[
# The points are coplanar.
([[0, 0], [1, 1], [0, 2]], Plane([1 / 3, 1, 0], [0, 0, 1])),
([[0, 0], [0, 1], [1, 0], [1, 1]], Plane([0.5, 0.5, 0], [0, 0, 1])),
([[0, 0, 0], [1, 0, 0], [0, 0, 1]], Plane([1 / 3, 0, 1 / 3], [0, 1, 0])),
(
[[1, 0, 0], [-1, 0, 0], [1, 1, 1], [-1, 1, 1]],
Plane([0, 0.5, 0.5], [0, 1, -1]),
),
(
[[1, 0, 1], [1, 1, 1], [-1, 0, -1], [-1, 1, -1]],
Plane([0, 0.5, 0], [1, 0, -1]),
),
(
[[1, 0, 1], [1, 1, 1], [-1, 0, -1], [-1, 1, -1], [0, 0, 0]],
Plane([0, 0.4, 0], [1, 0, -1]),
),
# The points are not coplanar.
(
[[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]],
Plane([0.25, 0.25, 0.25], [1, 1, 1]),
),
(
[
[0, 0, 0],
[1, 0, 0],
[2, 0, 0],
[0, 1, 0],
[1, 1, 0],
[2, 1, 0],
[0, 2, 0],
[1, 2, 0],
[2, 2, 0],
[0, 0, 1],
[1, 0, 1],
[2, 0, 1],
[0, 1, 1],
[1, 1, 1],
[2, 1, 1],
[0, 2, 1],
[1, 2, 1],
[2, 2, 1],
],
Plane([1, 1, 0.5], [0, 0, 1]),
),
],
)
def test_best_fit(points, plane_expected):
points = Points(points).set_dimension(3)
plane_fit = Plane.best_fit(points)
assert plane_fit.is_close(plane_expected)
assert plane_fit.point.is_close(plane_expected.point)
@pytest.mark.parametrize(
("points", "message_expected"),
[
([[0, 0], [1, 0]], "The points must be 3D."),
([[0, 0], [2, 5]], "The points must be 3D."),
(
[[0, 0, 0], [1, 1, 1], [2, 2, 2]],
"The points must not be collinear.",
),
(
[[0, 0, 0], [1, 1, 1], [-10, -10, -10]],
"The points must not be collinear.",
),
],
)
def test_best_fit_failure(points, message_expected):
with pytest.raises(ValueError, match=message_expected):
Plane.best_fit(points)
@pytest.mark.parametrize(
("plane", "points_expected"),
[
(Plane([0, 0, 0], [0, 0, 1]), [[-1, -1, 0], [1, -1, 0], [-1, 1, 0], [1, 1, 0]]),
(Plane([1, 0, 0], [0, 0, 1]), [[0, -1, 0], [2, -1, 0], [0, 1, 0], [2, 1, 0]]),
(
Plane([0, 0, 0], [0, 0, -1]),
[[-1, -1, 0], [1, -1, 0], [-1, 1, 0], [1, 1, 0]],
),
(Plane([0, 0, 0], [0, 0, 5]), [[-1, -1, 0], [1, -1, 0], [-1, 1, 0], [1, 1, 0]]),
(Plane([0, 0, 0], [0, 1, 0]), [[-1, 0, -1], [1, 0, -1], [-1, 0, 1], [1, 0, 1]]),
(Plane([0, 0, 0], [1, 0, 0]), [[0, -1, -1], [0, 1, -1], [0, -1, 1], [0, 1, 1]]),
(
Plane([0, 0, 0], [1, 1, 0]),
[[-1, 1, -1], [1, -1, -1], [-1, 1, 1], [1, -1, 1]],
),
],
)
def test_to_points(plane, points_expected):
points = plane.to_points()
assert points.is_close(points_expected)
|
435299
|
import pytest
from impl import type_of_triangle
@pytest.mark.parametrize("x", range(1, 10))
def test_equilateral(x):
ret = type_of_triangle(x, x, x)
assert ret == "equilateral"
@pytest.mark.parametrize("x", range(1, 10))
@pytest.mark.parametrize("y", range(1, 10))
def test_isoceles(x, y):
if y >= x + x or y == x:
pytest.skip("Invalid test input")
assert type_of_triangle(x, x, y) == "isoceles"
assert type_of_triangle(x, y, x) == "isoceles"
assert type_of_triangle(y, x, x) == "isoceles"
@pytest.mark.parametrize("x", range(1, 10))
@pytest.mark.parametrize("y", range(1, 10))
@pytest.mark.parametrize("z", range(1, 10))
def test_scalene(x, y, z):
if x + y <= z or x + z <= y or y + z <= x or \
x == y or y == z or x == z:
pytest.skip("Invalid test input")
ret = type_of_triangle(x, y, z)
assert ret == "scalene"
@pytest.mark.parametrize("x", range(1, 10))
@pytest.mark.parametrize("y", range(1, 10))
@pytest.mark.parametrize("z", range(1, 10))
def test_not_a_triangle(x, y, z):
if not (x + y <= z or x + z <= y or y + z <= x):
pytest.skip("Invalid test input")
ret = type_of_triangle(x, y, z)
assert ret == "Not a triangle"
# parameterize this test as practice
def test_sides_greater_than_zero():
with pytest.raises(RuntimeError):
type_of_triangle(0, 1, 1)
# parameterize this test as practice
def test_sum_of_sides():
ret = type_of_triangle(1, 1, 2)
assert ret == "Not a triangle"
|
435311
|
from ...utils import needs_py310
@needs_py310
def test_app():
from docs_src.app_testing.app_b_py310 import test_main
test_main.test_create_existing_item()
test_main.test_create_item()
test_main.test_create_item_bad_token()
test_main.test_read_inexistent_item()
test_main.test_read_item()
test_main.test_read_item_bad_token()
|
435334
|
from __future__ import print_function, division
from collections import OrderedDict
from abc import ABCMeta
from six import text_type
from six import binary_type
from torch import nn
from torch.nn import init
CONTAINER_NAMES = (
'module.',
'segnet.',
'resnet.',
'encoder.',
'decoder.',
'classifier.',
)
class TurboModule(nn.Module):
__metaclass__ = ABCMeta
def init_xavier(self):
for m in self.children():
if isinstance(m, (nn.Conv2d, nn.Linear)):
init.xavier_uniform(m.weight)
init.constant(m.bias, 0)
def strip_modules(s, *modules):
stripped = []
for m in modules:
_, m, s = s.rpartition(m)
stripped.append(m)
return s, stripped
def get_params(state):
return state.get('model_state', state)
def get_tensors(params):
return OrderedDict(
(k, (v.data if isinstance(v, nn.Parameter) else v).cpu())
for k, v in params.items()
)
def strip_params(params, modules=CONTAINER_NAMES):
return OrderedDict(
(strip_modules(k, *modules)[0], p)
for k, p in params.items()
)
def other_type(s):
if isinstance(s, text_type):
return s.encode('utf-8')
elif isinstance(s, binary_type):
return s.decode('utf-8')
def try_dicts(k, *ds):
for d in ds:
v = d.get(k)
if v is not None:
return v
raise KeyError(k)
def try_types(k, *ds):
try:
return try_dicts(k, *ds)
except KeyError:
return try_dicts(other_type(k), *ds)
def filter_state(own_state, state_dict):
return OrderedDict((k, try_types(k, state_dict, own_state))
for k in own_state)
|
435402
|
import sys, cv2, time, os
from UI import Ui_TabWidget
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog,QTabWidget
from PyQt5.QtCore import QTimer, QThread, pyqtSignal, Qt
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtWidgets import QLabel,QWidget, QProgressBar
from py_util import read_show
from center_main import main
from opts2 import opts
import glob
from py_util import product_show
# from win32process import SuspendThread, ResumeThread
from opts2 import opts
from detectors.detector_factory import detector_factory
model_path = '/home/yangna/deepblue/2_MOT/CenterNet/exp/ctdet/dla/model_best.pth'
arch = 'dla_34'
task = 'ctdet'
opt = opts().init('--task {} --load_model {} --arch {}'.format(task, model_path, arch).split(' '))
class mywindow(QTabWidget,Ui_TabWidget): #这个窗口继承了用QtDesignner 绘制的窗口
def __init__(self):
super(mywindow,self).__init__()
self.setupUi(self)
self.thread = train_thred()
self.thread.my_signal.connect(self.set_step) # 3
global imgnums
path = r'/home/yangna/deepblue/2_MOT/CenterNet/data/pig/image/*.png'
self.datas = glob.glob(path)
imgnums = len(self.datas)
self.save_nums = 0 # 采集的图片数量
def collect_image(self):
'''自动化采集图片
只能采用线程的方式进行摄像头的显示
'''
self.collect_image_thread = collect_image_thread()
self.collect_image_thread.signal.connect(self.set_label)
self.collect_image_thread.start()
def collect_save_image(self):
folder = f'./data/{self.line51.text()}/image'
if not os.path.exists(folder):
os.makedirs(folder)
self.label53.pixmap().save(f'{folder}/{self.save_nums}.jpg')
# cv2.imwrite(f'{folder}/{self.save_nums}.jpg', img)
self.save_nums += 1
self.label52.setText('已采集图片: ' + str(self.save_nums))
def set_label(self, image):
'''显示采集了多少张图片'''
# self.label52.setText(text)
self.label53.setPixmap(QPixmap.fromImage(image))
def choose_train(self):
global train_json
train_json, file_type = QFileDialog.getOpenFileName(self,
'选择训练数据集',
"",
'All Files (*)')
self.label11.setText(train_json)
def choose_val(self):
global val_json
val_json, file_type = QFileDialog.getOpenFileName(self,
'选择验证数据集',
"",
'All Files (*)')
self.label12.setText(val_json)
def count_func(self):
self.thread.start()
def set_step(self, num):
self.bar.setValue(num)
def load_model(self):
opt.debug = min(opt.debug, 0) # 检测结果以cv2的格式返回
self.detector = detector_factory[opt.task](opt)
def load_picture(self):
'''
验证流程中的选择图片
'''
global imgname
if self.pushbutton_22.text() == '选择图片':
imgname, file_type = QFileDialog.getOpenFileName(self,
'选择图片',
"",
'All Files (*)')
read_show(imgname, self.label_21,
choose_id=self.combobox21.currentIndex() + 1) # 显示图片
def test(self):
'''验证流程中的测试过程'''
read_show(imgname, self.label_21, self.detector,
choose_id=self.combobox21.currentIndex() + 1)
def product_start(self):
'''流水线开始'''
if not hasattr(self, 'detector'): # 没有载入模型
opt.debug = min(opt.debug, 0)
self.detector = detector_factory[opt.task](opt)
if not hasattr(self, 'product_thread'): # 声明进程
# video_path = 'rtsp://admin:Shenlan2018@172.16.17.32:1554/h264/ch1/main/av_stream'
video_path = 0
self.product_thread = product_thread(self.detector, video_path, self.combobox41)
self.product_thread.mysignal.connect(self.product_cess)
self.product_thread.start()
def product_stop(self):
'''流水线暂停'''
self.product_thread.stop()
self.product_thread.quit()
self.product_thread.wait()
def exit(self):
sys.exit()
def product_cess(self, image):
self.label41.setPixmap(QPixmap.fromImage(image))
class collect_image_thread(QThread):
'''
数据采集页:
读取视频流;保存到指定文件夹;实时显示保存的图片数量
在线程中读取视频流,在推到UI进程
'''
signal = pyqtSignal(QImage)
def __init__(self):
super(collect_image_thread, self).__init__()
# self.cap = cv2.VideoCapture('rtsp://admin:Shenlan2018@172.16.17.32:1554/h264/ch1/main/av_stream')
self.cap = cv2.VideoCapture(0)
def run(self):
while self.cap.isOpened():
try:
ret, frame = self.cap.read()
if ret:
img = cv2.resize(frame, (1000,600))
h, w, c = img.shape
byteperlin = c * w
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
image = QImage(img.data, w, h, byteperlin, QImage.Format_RGB888)
self.signal.emit(image)
except:
self.signal.emit('something wrong with the input video source')
class product_thread(QThread):
'''
将这里做成一个API接口的样子, 模型,
模型一直加载在线程中,视频流可以释放、重启
'''
mysignal = pyqtSignal(QImage)
def __init__(self, detector, video_path, combobox):
super(product_thread, self).__init__()
self.flag = 1 # 实现开始暂停
self.video_path = video_path
self.cap = cv2.VideoCapture(video_path)
self.detector = detector
self.combobox = combobox
self.index = 0
def run(self):
'''4帧处理一次'''
self.flag = 1
if not self.cap.isOpened():
self.cap = cv2.VideoCapture(self.video_path)
while self.cap.isOpened() and self.flag:
if self.index > 1000000000:
self.index = 0
self.index += 1
try:
# ret, frame = self.cap.read()
ret = self.cap.grab()
if ret and self.index % 4 == 0:
tret, frame = self.cap.retrieve()
image = product_show(frame, self.detector,
choose_id=self.combobox.currentIndex() + 1)
self.mysignal.emit(image)
except:
print('something wrong with the product_thread')
def stop(self):
self.flag = 0
self.cap.release() # 释放摄像头
class train_thred(QThread):
my_signal = pyqtSignal(int) # 1
def __init__(self):
super(train_thred, self).__init__()
self.max_iter = 50 # 共训练50个epoch
def run(self):
opt = opts(train_json, val_json).parse() # 这是串行的
center_train = main(opt)
for i in range(self.max_iter):
self.my_signal.emit(i) # 2
center_train.train(i)
center_train.logger.close() # 关闭日志文件
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = mywindow()
window.show()
sys.exit(app.exec_())
|
435405
|
def test_augassign():
r: i32
s: i32
r = 0
r += 4
s = 5
r *= s
r -= 2
s = 10
r /= s
a: str
a = ""
a += "test"
|
435533
|
import typing
import pytest
from nonebug.app import App
@pytest.mark.asyncio
@pytest.mark.render
async def test_render(app: App):
from nonebot_bison.plugin_config import plugin_config
from nonebot_bison.utils import parse_text
plugin_config.bison_use_pic = True
res = await parse_text(
"""a\nbbbbbbbbbbbbbbbbbbbbbb\ncd
<h1>中文</h1>
VuePress 由两部分组成:第一部分是一个极简静态网站生成器
(opens new window),它包含由 Vue 驱动的主题系统和插件 API,另一个部分是为书写技术文档而优化的默认主题,它的诞生初衷是为了支持 Vue 及其子项目的文档需求。
每一个由 VuePress 生成的页面都带有预渲染好的 HTML,也因此具有非常好的加载性能和搜索引擎优化(SEO)。同时,一旦页面被加载,Vue 将接管这些静态内容,并将其转换成一个完整的单页应用(SPA),其他的页面则会只在用户浏览到的时候才按需加载。
"""
)
@pytest.mark.asyncio
@pytest.mark.render
async def test_arknights(app: App):
from nonebot_bison.platform.arknights import Arknights
ak = Arknights()
res = await ak.parse(
{"webUrl": "https://ak.hycdn.cn/announce/IOS/announcement/854_1644580545.html"}
)
|
435694
|
import os
import sys
from argparse import ArgumentParser
from windowsprefetch import Prefetch
# Note: This wrapper script is a work in progress, and needs to be refined.
# todo: robust error handling and notifications, built into result output.
# todo: error-handling based on file content, not just file extension
def main():
p = ArgumentParser()
p.add_argument("-c", "--csv", help="Present results in CSV format", action="store_true")
p.add_argument("-f", "--file", help="Parse a given Prefetch file", required=True)
args = p.parse_args()
file_paths = []
if os.path.isdir(args.file):
for filename in os.listdir(args.file):
file_paths.append(os.path.join(args.file, filename))
else:
file_paths.append(args.file)
parsed_files = []
for filepath in file_paths:
if filepath.endswith(".pf"):
if os.path.getsize(filepath) > 0:
p = Prefetch(filepath)
parsed_files.append(p)
if args.csv:
print("Timestamp,Executable Name,MFT Seq Number,MFT Entry Number,Prefetch Hash,Run Count")
for p in parsed_files:
if args.csv:
if p.version > 17:
for timestamp in p.timestamps:
print("{},{},{},{},{},{}".format(
timestamp,
p.executableName,
p.hash,
p.mftSeqNumber,
p.mftEntryNumber,
p.runCount
))
else:
for timestamp in p.timestamps:
print("{},{},{},{},{},{}".format(
timestamp,
p.executableName,
p.hash,
"N/A",
"N/A",
p.runCount
))
else:
p.prettyPrint()
if __name__ == '__main__':
main()
|
435698
|
import os
import sys
import time
import signal
import click
import json
@click.group()
def cli():
pass
@cli.command()
@click.option('-m', type=str)
def stdio(m):
print(m)
@cli.command()
@click.option('-m', type=str)
@click.option('-p', default='/mnt/girder_worker/data/output_pipe', type=click.File('w'))
def write(m, p):
p.write(m)
@cli.command()
@click.option('-p', default='/mnt/girder_worker/data/input_pipe', type=click.File('r'))
def read(p):
print(p.read())
@cli.command()
def sigkill():
time.sleep(30)
@cli.command()
def sigterm():
def _signal_handler(signal, frame):
sys.exit(0)
# Install signal handler
signal.signal(signal.SIGTERM, _signal_handler)
time.sleep(30)
@cli.command()
def stdout_stderr():
sys.stdout.write('this is stdout data\n')
sys.stderr.write('this is stderr data\n')
@cli.command()
@click.option('-p', type=click.File('w'))
@click.option('--progressions', type=str)
def progress(p, progressions):
progressions = json.loads(progressions)
for msg in progressions:
p.write('%s\n' % json.dumps(msg))
p.flush()
@cli.command()
@click.option('-i', type=click.File('r'))
@click.option('-o', type=click.File('w'))
def read_write(i, o):
while True:
data = i.read(1024)
if not data:
break
o.write(data)
@cli.command()
@click.option('-p', type=str)
def print_path(p):
print(p)
@cli.command()
def raise_exception():
raise Exception('girder docker exception')
@cli.command()
@click.option('-d', type=click.Path(dir_okay=True, file_okay=False))
def listdir(d):
for path in os.listdir(d):
print(path)
if __name__ == '__main__':
cli(obj={})
|
435702
|
import sys
sys.path = ["."] + sys.path
from petlib.ec import EcGroup
from petlib.bn import Bn
from hashlib import sha256
import math
## ######################################################
## An implementation of the ring signature scheme in
##
## <NAME> and <NAME>. "One-out-of-Many Proofs:
## Or How to Leak a Secret and Spend a Coin"
## Cryptology ePrint Archive: Report 2014/764
##
## https://eprint.iacr.org/2014/764
## ######################################################
def challenge(elements):
"""Packages a challenge in a bijective way"""
elem = [len(elements)] + elements
elem_str = list(map(str, elem))
elem_len = list(map(lambda x: "%s||%s" % (len(x) , x), elem_str))
state = "|".join(elem_len)
H = sha256()
H.update(state.encode("utf8"))
return Bn.from_binary(H.digest())
def setup():
""" Generates parameters for Commitments """
G = EcGroup()
g = G.hash_to_point(b'g')
h = G.hash_to_point(b'h')
o = G.order()
return (G, g, h, o)
def Com(ck, m, k):
""" Pedersen Commitment. """
(G, g, h, o) = ck
return m * g + k * h
def ProveZeroOne(ck, c, m, r):
""" Simple proof that a Commitment c = Com(m,r) is either 0 or 1 """
assert Com(ck, m, r) == c
(G, g, h, o) = ck
a, s, t = o.random(), o.random(), o.random()
ca = Com(ck, a, s)
cb = Com(ck, a*m, t)
x = challenge([g, h, ca, cb]) % o
f = (x * m + a) % o
za = (r * x + s) % o
zb = (r * (x - f) + t) % o
return (x, f, za, zb)
def VerifyZeroOne(ck, c, proof):
""" Verify that a Commitment c = Com(m,r) is either 0 or 1 """
(G, g, h, o) = ck
(x, f, za, zb) = proof
assert 0 < x < o
assert 0 < f < o
assert 0 < za < o
assert 0 < zb < o
ca = Com(ck,f,za) - x * c
cb = Com(ck, 0, zb) - (x-f) * c
xp = challenge([g, h, ca, cb]) % o
return xp == x
def ProveOneOfN(ck, cis, el, r, message = ""):
""" NIZK Proof that Com(0; r) is within Cis.
The fact that it is the el'th commitmtnet is not revealed.
+ Ring signature on "message". """
n = int(math.ceil(math.log(len(cis)) / math.log(2)))
assert Com(ck, 0, r) == cis[el]
(G, g, h, o) = ck
## Commit to the bits of the index
el = Bn(el)
eli = [Bn(int(el.is_bit_set(i))) for i in range(n)]
ri = [o.random() for i in range(n)]
ai = [o.random() for i in range(n)]
si = [o.random() for i in range(n)]
ti = [o.random() for i in range(n)]
Celi = [Com(ck, elix, rix) for elix, rix in zip(eli, ri)]
Cai = [Com(ck, a, s) for a, s in zip(ai, si)]
Cbi = [Com(ck, l * a , s) for l, a, s in zip(eli, ai, ti)]
# Compute p_idxi(x)
p_idx_i = []
for idx in range(len(cis)):
idx = Bn(idx)
idxi = [Bn(int(idx.is_bit_set(i))) for i in range(n)]
p = [Bn(1)]
for j, idxi_j in enumerate(idxi):
if idxi_j == 0:
p = poly_mul(o, p, [ -ai[j] , - eli[j] + 1] )
else:
p = poly_mul(o, p, [ ai[j] , eli[j] ])
p_idx_i += [p]
# Compute all Cdi's
roi = []
cdi = []
for i in range(n):
roi_i = o.random()
roi += [ roi_i ]
# cdi_i = Com(ck, 0, roi_i)
wis = []
for idx, cidx in enumerate(cis):
wis += [ p_idx_i[idx][i] ]
# cdi_i += p_idx_i[idx][i] * cidx
# assert G.wsum(wis, cis) + Com(ck, 0, roi_i) == cdi_i
cdi_i = G.wsum(wis, cis) + Com(ck, 0, roi_i)
cdi += [ cdi_i ]
## The challenge
x = challenge(list(ck) + cis + Celi + Cai + Cbi + cdi + [ message ])
## The responses
fi = [(elj * x + aj) % o for elj, aj in zip(eli, ai)]
zai = [(rj * x + sj) % o for rj, sj in zip(ri, si)]
zbi = [(rj * (x - fj) + tj) % o for rj, fj, tj in zip(ri, fi, ti)]
zd = r * pow(x, n, o) % o
for k in range(n):
zd = (zd - roi[k] * pow(x, k, o)) % o
proof = (Celi, Cai, Cbi, cdi, fi, zai, zbi, zd)
return proof
def VerifyOneOfN(ck, cis, proof, message = ""):
""" Verify the ring signature on message """
n = int(math.ceil(math.log(len(cis)) / math.log(2)))
(G, g, h, o) = ck
(Celi, Cai, Cbi, cdi, fi, zai, zbi, zd) = proof
## Check all parts of the proof are in the right groups
assert 0 <= zd < o
for k in range(n):
assert 0 <= fi[k] < o
assert 0 <= zai[k] < o
assert 0 <= zbi[k] < o
assert G.check_point(Celi[k])
assert G.check_point(Cai[k])
assert G.check_point(Cbi[k])
assert G.check_point(cdi[k])
# Recompute the challenge
x = challenge(list(ck) + cis + Celi + Cai + Cbi + cdi + [ message ])
ret = True
for i in range(n):
ret &= x * Celi[i] + Cai[i] == Com(ck, fi[i], zai[i])
ret &= (x - fi[i]) * Celi[i] + Cbi[i] == Com(ck, Bn(0), zbi[i])
# acc = G.infinite()
bases = []
expons = []
for idx, ci in enumerate(cis):
idx = Bn(idx)
idxi = [Bn(int(idx.is_bit_set(i))) for i in range(n)]
acc_exp = Bn(1)
for k, ij in enumerate(idxi):
if ij == 0:
acc_exp = acc_exp.mod_mul(x - fi[k], o)
else:
acc_exp = acc_exp.mod_mul(fi[k], o)
bases += [ ci ]
expons += [ acc_exp ]
# acc = acc + acc_exp * ci
for k in range(n):
expi = (- pow(x,k,o))
# acc = acc + expi * cdi[k]
bases += [ cdi[k] ]
expons += [ expi ]
# assert G.wsum(expons, bases) == acc
acc = G.wsum(expons, bases)
ret &= acc == Com(ck, 0, zd)
return ret
## ######################################
## Naive polynomial arithmetic
zero = Bn(0)
def poly_expand(o, poly, size):
global zero
assert len(poly) <= size
# zero = Bn(0)
new_poly = [zero for _ in range(size)]
for i in range(len(poly)):
new_poly[i] = poly[i]
return new_poly
def poly_add(o, poly1, poly2):
size = max(len(poly1), len(poly2))
p1 = poly_expand(o, poly1, size)
p2 = poly_expand(o, poly2, size)
pout = poly_expand(o, [], size)
for i, (c1, c2) in enumerate(zip(p1, p2)):
pout[i] = c1.mod_add( c2, o)
return pout
def poly_mul(o, poly1, poly2):
global zero
p = [ zero ]
for i, c1 in enumerate(poly1):
p2 = ([ zero ] * i) + [(c1.mod_mul(c2, o)) for c2 in poly2]
p = poly_add(o, p2, p)
return p
###################################################
# ---------------- TESTS ----------------------- #
###################################################
import pytest
def test_poly_expand():
ck = setup()
(G, g, h, o) = ck
p1 = [Bn(1), Bn(2)]
p2 = poly_expand(o, p1, 10)
assert len(p2) == 10
assert p2[:2] == p1
def test_poly_add():
ck = setup()
(G, g, h, o) = ck
p1 = [Bn(1), Bn(2)]
p2 = poly_add(o, p1, p1)
assert len(p2) == len(p1)
assert p2 == [2, 4]
def test_poly_mul():
ck = setup()
(G, g, h, o) = ck
p1 = [Bn(1), Bn(2)]
p2 = poly_mul(o, p1, p1)
assert p2 == [1, 4, 4]
def test_setup():
ck = setup()
def test_proof():
ck = setup()
(G, g, h, o) = ck
m, r = 1, o.random()
c = Com(ck, m, r)
ProveZeroOne(ck, c, m, r)
@pytest.mark.parametrize("input,expected", [
(1, True),
(0, True),
(2, False),
])
def test_verify(input,expected):
ck = setup()
(G, g, h, o) = ck
m, r = input, o.random()
c = Com(ck, m, r)
proof = ProveZeroOne(ck, c, m, r)
assert VerifyZeroOne(ck, c, proof) == expected
def test_prove_n():
ck = setup()
(G, g, h, o) = ck
c0 = Com(ck, 1, o.random())
c1 = Com(ck, 1, o.random())
c2 = Com(ck, 1, o.random())
c3 = Com(ck, 1, o.random())
r = o.random()
cr = Com(ck,0, r)
cis = [c0, c1, c2, c3, cr]
proof = ProveOneOfN(ck, cis, 4, r, message="Hello World!")
ret = VerifyOneOfN(ck, cis, proof, message="Hello World!")
assert ret
def notest_timing(upper=101):
ck = setup()
(G, g, h, o) = ck
c0 = Com(ck, 1, o.random())
r = o.random()
cr = Com(ck,0, r)
import time
repeats = 10
all_sizes = range(10, upper, 10)
prove_time = []
verify_time = []
for size in all_sizes:
cis = [c0] * (size + 1) + [cr]
t0 = time.clock()
for _ in range(repeats):
proof = ProveOneOfN(ck, cis, len(cis)-1, r, message="Hello World!")
t1 = time.clock()
dt = (t1-t0) / repeats
prove_time += [ dt ]
print( "Proof time: %s - %2.4f" % (size, dt) )
t0 = time.clock()
for _ in range(repeats):
ret = VerifyOneOfN(ck, cis, proof, message="Hello World!")
assert ret
t1 = time.clock()
dt = (t1-t0) / repeats
verify_time += [ dt ]
print( "Verify time: %s - %2.4f" % (size, dt) )
return all_sizes, prove_time, verify_time
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Test and time the Tor median statistics.')
parser.add_argument('--time', action='store_true', help='Run timing tests')
parser.add_argument('--lprof', action='store_true', help='Run the line profiler')
parser.add_argument('--cprof', action='store_true', help='Run the c profiler')
parser.add_argument('--plot', action='store_true', help='Upload time plot to plotly')
args = parser.parse_args()
if args.time:
notest_timing(31)
if args.cprof:
import cProfile
cProfile.run("notest_timing(51)", sort="tottime")
if args.lprof:
from line_profiler import LineProfiler
profile = LineProfiler(VerifyOneOfN, ProveOneOfN, Bn.__init__, Bn.__del__)
profile.run("notest_timing(31)")
profile.print_stats()
if args.plot:
all_sizes, prove_time, verify_time = notest_timing()
import plotly.plotly as py
from plotly.graph_objs import *
trace0 = Scatter(
x=all_sizes,
y=prove_time,
name='Proving',
)
trace1 = Scatter(
x=all_sizes,
y=verify_time,
name='Verification',
)
data = Data([trace0, trace1])
layout = Layout(
title='Timing for GK15 Proof and Verification using petlib',
xaxis=XAxis(
title='Size of ring (no. commits)',
showgrid=False,
zeroline=False
),
yaxis=YAxis(
title='time (sec)',
showline=False
)
)
fig = Figure(data=data, layout=layout)
unique_url = py.plot(fig, filename = 'GK15-petlib-timing')
|
435712
|
from ..core import ProteinLigandDatasetProvider
class KinomeScanDatasetProvider(ProteinLigandDatasetProvider):
pass
|
435734
|
def parse(a):
a = a.split("\n")
a = [x+", " for x in a]
a = [''.join(a[i:i+6])+"\n" for i in range(0, len(a), 6)]
print(''.join(a))
|
435763
|
from fastapi import Depends
from sqlalchemy.orm import Session
from utils.logger import Logger
from utils.db_connection import get_db_session
from schema.sample_object import SampleObject as SampleObjectSchema
from model.sample_object import SampleObject as SampleObjectModel
logger = Logger.get_logger(__name__)
class SampleRepository():
def __init__(self, db_session: Session = Depends(get_db_session)) -> None:
self.db = db_session
def get_by_id(self, id):
logger.debug('Getting item {} from db'.format(id))
return self.db.query(SampleObjectModel).get({'id': id})
def insert(self, item: SampleObjectSchema):
logger.debug('Insert item {}'.format(str(item)))
db_item = SampleObjectModel(id=item.id)
self.db.add(db_item)
self.db.commit()
self.db.refresh(db_item)
|
435772
|
import sys
import random
import copy
#reload(sys)
#sys.setdefaultencoding="utf-8"
if (len(sys.argv)<3):
print("no enough parameter")
exit()
hownet_filename = sys.argv[1]
embedding_filename = sys.argv[2]
answer_filename = hownet_filename+"_answer"
test_filename = hownet_filename+"_test"
with open(hownet_filename,'r',encoding='utf-8') as hownet:
with open(test_filename,'w',encoding='utf-8') as test:
with open(answer_filename,'w',encoding='utf-8') as answer:
with open(embedding_filename,'r',encoding='utf-8') as embedding:
data = hownet.readlines()
dataBuf = []
for line in data:
dataBuf.append(line.strip())
data = dataBuf
wordsBuf = embedding.readlines()
sourcewords = []
length = len(wordsBuf)
for i in range(1,length):
line = wordsBuf[i].strip()
arr = line.split()
sourcewords.append(arr[0])
words = data[0::2]
sememes = data[1::2]
data = list(zip(words,sememes))
samples = random.sample(sourcewords,int(len(sourcewords)*0.1))
#samples = random.sample(samples_test_and_valid,int(len(samples_test_and_valid)*0.5))
samplesBuf = []
for word in samples:
try:
position = words.index(word.strip())
sememe = sememes[position]
samplesBuf.append((word,sememe))
except:
print(samples.index(word))
sample_words = set(copy.copy(samples))
samples = samplesBuf
for word,sememe in samples:
test.write(word+'\n')
answer.write(word+'\n'+sememe+'\n')
with open('train_hownet','w',encoding='utf-8') as train:
for word,sememe in zip(words,sememes):
if word not in sample_words:
train.write(word+'\n'+sememe+'\n')
|
435871
|
import collections
class netlistspec(collections.namedtuple(
"netlistspec", "vertices, load_function, before_simulation_function, "
"after_simulation_function, constraints")):
"""Specification of how an operator should be added to a netlist."""
def __new__(cls, vertices, load_function=None,
before_simulation_function=None,
after_simulation_function=None, constraints=None):
return super(netlistspec, cls).__new__(
cls, vertices, load_function, before_simulation_function,
after_simulation_function, constraints
)
|
435873
|
import torch
import torch.nn.functional as F
from ..base_loss import BaseLoss
class CrossEntropyLoss(BaseLoss):
"""
paper: `Momentum Contrast for Unsupervised Visual Representation Learning <http://openaccess.thecvf.com/content_CVPR_2020/html/He_Momentum_Contrast_for_Unsupervised_Visual_Representation_Learning_CVPR_2020_paper.html>`_
Cross-entropy loss designed for MoCo.
"""
def __init__(
self,
**kwargs
):
super().__init__(**kwargs)
def required_metric(self):
return ["moco"]
def compute_loss(
self,
metric_mat,
row_labels,
col_labels,
is_same_source=False,
*args,
**kwargs
) -> torch.Tensor:
dtype, device = metric_mat.dtype, metric_mat.device
pos_mask = (row_labels == col_labels).type(dtype).to(device)
pos_index = torch.where(pos_mask)[1]
loss = F.cross_entropy(metric_mat, pos_index)
return loss
|
435891
|
def convert_bytes(byte):
units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB'
'YiB', 'BiB', 'NiB', 'DiB', 'CiB']
for x in units:
if divmod(byte, 1024)[0] == 0:
break
else:
byte /= 1024
return ('%.2lf%s' % (byte, x))
|
435901
|
import django
import logging
# from nltk import word_tokenize
import os
# import re
from slacker import Slacker
from sutime import SUTime
import tweepy
from tweepy.api import API
from slack_msg import send_slack_message
# need to point Django at the right settings to access pieces of app
os.environ["DJANGO_SETTINGS_MODULE"] = "openchat.settings" # noqa
django.setup() # noqa
from openspaces.bot_utils import db_utils, tweet_utils, time_utils
import openspaces.secrets as s
from openchat.settings import BASE_DIR
class StreamListener(tweepy.StreamListener):
"""Object that defines the callback actions passed to tweepy.Stream"""
def __init__(self, streambot, api=None):
self.api = api or API()
# needed ref to streambot so method can be called
self.streambot = streambot
self.tw_bot_id = 964215184951713792
self.ignored_users = []
def update_ignore_users(self):
"""Check app config table to get list of ignored twitter ids"""
ignore_list = db_utils.get_ignored_users()
ignore_list.append(self.tw_bot_id)
self.ignored_users = ignore_list
def on_status(self, status):
"""Take a tweet with matching keyword save and trigger retweet_logic"""
self.update_ignore_users()
if status.user.id in self.ignored_users:
return
# create or update user and tweet records in Django models
db_utils.get_or_create_user_and_tweet(status)
# trigger logic to handle tweet and decide on response in Streambot
self.streambot.retweet_logic(status.text, status.id_str,
status.user.screen_name, status.user.id)
def on_error(self, status_code):
if status_code == 420:
return False
class Streambot:
""" Stream Twitter and look for tweets that contain targeted words,
when tweets found look for datetime and room, if present save tweet
to OutgoingTweet model.
Ex.
bot = Streambot()
# to run a stream looking for tweets about PyCon
bot.run_stream(["PyCon"])
"""
def __init__(self):
db_utils.setup_outgoing_config() # needs an outgoing config obj to check against
self.api = self.setup_auth()
self.stream_listener = StreamListener(self)
jar_files = os.path.join(BASE_DIR, "python-sutime", "jars")
self.sutime = SUTime(jars=jar_files, mark_time_ranges=True)
self.slacker = Slacker(s.SLACK_TOKEN)
def setup_auth(self):
"""Set up auth stuff for api and return tweepy api object"""
auth = tweepy.OAuthHandler(s.openspacesbot["CONSUMER_KEY"],
s.openspacesbot["CONSUMER_SECRET"])
auth.set_access_token(s.openspacesbot["ACCESS_TOKEN"],
s.openspacesbot["ACCESS_TOKEN_SECRET"])
api = tweepy.API(auth)
return api
def run_stream(self, search_list=None):
""" Start stream, when matching tweet found on_status method called.
search_list arg is a list of terms that will be looked for in tweets
"""
if search_list == None:
raise ValueError("Need a list of search terms as arg to run_stream")
stream = tweepy.Stream(auth=self.api.auth, listener=self.stream_listener)
stream.filter(track=search_list)
def send_mention_tweet(self, screen_name):
"""Mention a user in a tweet from bot letting them know that
their tweet has been recieved and that we will send out reminders
about their event.
"""
hours_mins = time_utils.get_local_clock_time()
mention = "@{} just saw your Open Spaces tweet!"
mention += " I'll retweet a reminder 15 minutes before your event!"
mention = mention.format(screen_name, hours_mins)
try:
self.api.update_status(status=mention)
except:
# if same user tweets valid openspaces tweet at exact same clock time
# it causes a duplicate tweet which bot can't send
pass
def parse_time_room(self, tweet):
"""Get time and room number from a tweet using SUTime and tweet_utils"""
extracted_time = self.sutime.parse(tweet)
time_and_room = tweet_utils.get_time_and_room(tweet, extracted_time)
return time_and_room
def value_check(self, time_room_obj):
"""Returns a tuple with the counts of values extracted from a tweet
in the parse_time_room method. This tuple is used to decide how bot
will respond to tweet.
"""
num_room_values = len(time_room_obj["room"])
num_time_values = len(time_room_obj["date"])
return (num_room_values, num_time_values)
def retweet_logic(self, tweet, tweet_id, screen_name, user_id):
"""Use SUTime to try to parse a datetime out of a tweet, if successful
save tweet to OutgoingTweet to be retweeted
"""
# use SUTime to parse a datetime out of tweet
time_room = self.parse_time_room(tweet)
# make sure both time and room extracted and only one val each
val_check = self.value_check(time_room)
if val_check == (1, 1):
room = time_room["room"][0]
date_mention = tweet_utils.check_date_mention(tweet)
converted_time = time_utils.convert_to_utc(time_room["date"][0], date_mention)
# check for a time and room conflict, only 1 set of retweets per event
# default time range that a room is resrved for is -15 +30 mins
conflict = db_utils.check_time_room_conflict(converted_time, room)
if not conflict:
# This record lets us check that retweets not for same event
event_obj = db_utils.create_event(description=tweet,
start=converted_time,
location=room,
creator=screen_name)
tweet_utils.schedule_tweets(screen_name, tweet, tweet_id, converted_time, event_obj)
slack_msg = "{} \n Tweet from: {}, \n id: {}".format(tweet, screen_name, user_id)
send_slack_message(user_id=user_id,
tweet_id=tweet_id,
screen_name=screen_name,
tweet_created=True,
tweet=tweet,
slack_msg=slack_msg)
self.send_mention_tweet(screen_name)
else:
message = f"Tweet found for an already scheduled event: \n {tweet}"
send_slack_message(user_id=user_id,
tweet_id=tweet_id,
screen_name=screen_name,
tweet_created=False,
tweet=tweet,
slack_msg=message,
channel="conflict")
elif val_check == (0, 0):
# tweet found but without valid time or room extracted, ignore
pass
else:
# tweet with relevant information but not exactly 1 time & 1 room
slack_msg = "Tweet found that needs review: {} \n tweet_id: {} \n screen_name: {} \n user_id: {}"
slack_msg = slack_msg.format(tweet, tweet_id, screen_name, user_id)
send_slack_message(user_id=user_id,
tweet_id=tweet_id,
screen_name=screen_name,
tweet_created=False,
tweet=tweet,
slack_msg=slack_msg)
if __name__ == '__main__':
bot = Streambot()
bot.run_stream(["pyconopenspaces", "pyconopenspace"])
|
435907
|
from terra_sdk.core.coins import Coins
from ._base import BaseAsyncAPI, sync_bind
__all__ = ["AsyncSupplyAPI", "SupplyAPI"]
class AsyncSupplyAPI(BaseAsyncAPI):
async def total(self) -> Coins:
"""Fetches the current total supply of all tokens.
Returns:
Coins: total supply
"""
res = await self._c._get("/supply/total")
return Coins.from_data(res)
class SupplyAPI(AsyncSupplyAPI):
@sync_bind(AsyncSupplyAPI.total)
def total(self) -> Coins:
pass
total.__doc__ = AsyncSupplyAPI.total.__doc__
|
435916
|
import asyncio
import time
import asynctnt
import logging
logging.basicConfig(level=logging.DEBUG)
async def run():
conn = asynctnt.Connection(host='127.0.0.1', port=3305)
begin = time.time()
n = 1000
for i in range(n):
await conn.connect()
await conn.disconnect()
dt = time.time() - begin
print('Total: {}'.format(dt))
print('1 connect+disconnect: {}'.format(dt / n))
async def run2():
conn = asynctnt.Connection(host='127.0.0.1', port=3305)
await conn.connect()
begin = time.time()
n = 10000
for i in range(n):
await conn.eval('return box.info')
dt = time.time() - begin
print('Total: {}'.format(dt))
print('1 ping: {}'.format(dt / n))
async def run3():
conn = asynctnt.Connection(host='127.0.0.1', port=3301)
await conn.connect()
begin = time.time()
n = 10000
for i in range(n):
await conn.select('S')
dt = time.time() - begin
print('Total: {}'.format(dt))
print('1 select: {}'.format(dt / n))
async def run4():
conn = asynctnt.Connection(host='127.0.0.1', port=3301)
await conn.connect()
begin = time.time()
n = 10000
for i in range(n):
resp = await conn.sql('select * from s')
dt = time.time() - begin
print('Total: {}'.format(dt))
print('1 sql: {}'.format(dt / n))
print(resp.encoding)
print(resp)
print(list(resp))
loop = asyncio.get_event_loop()
loop.run_until_complete(run3())
loop.run_until_complete(run4())
|
435936
|
import pytest
from ocs_ci.framework.pytest_customization.marks import tier1
@tier1
@pytest.mark.last
class TestFailurePropagator:
"""
Test class for failure propagator test case. The test intention is to run last and propagate
teardown failures caught during the test execution, so regular test cases won't false negatively fail
"""
def test_failure_propagator(self):
pass
|
435938
|
import os
import ssl
import unittest
from mock import MagicMock, patch, call
from kafka.tools.configuration import ClientConfiguration, eval_boolean, check_file_access
from kafka.tools.exceptions import ConfigurationError
class ConfigurationTests(unittest.TestCase):
def test_eval_boolean(self):
assert eval_boolean(True)
assert not eval_boolean(False)
assert eval_boolean(1)
assert not eval_boolean(0)
assert eval_boolean('True')
assert not eval_boolean('False')
@patch('kafka.tools.configuration.os.access')
def test_check_file_access(self, mock_access):
mock_access.side_effect = [True, False]
check_file_access('file1')
self.assertRaises(ConfigurationError, check_file_access, 'file2')
mock_access.assert_has_calls([call('file1', os.R_OK), call('file2', os.R_OK)])
def test_create(self):
config = ClientConfiguration()
assert config.ssl_context is None
def test_create_both_zk_and_hosts(self):
self.assertRaises(ConfigurationError, ClientConfiguration, zkconnect='foo', broker_list='bar')
def test_create_invalid_name(self):
self.assertRaises(ConfigurationError, ClientConfiguration, invalidconfig='foo')
def test_client_id(self):
config = ClientConfiguration(client_id="testid")
assert config.client_id == "testid"
self.assertRaises(TypeError, ClientConfiguration, client_id=1)
self.assertRaises(TypeError, ClientConfiguration, client_id=None)
def test_metadata_refresh(self):
config = ClientConfiguration(metadata_refresh=2345)
assert config.metadata_refresh == 2345
self.assertRaises(TypeError, ClientConfiguration, metadata_refresh='foo')
self.assertRaises(TypeError, ClientConfiguration, metadata_refresh=-1)
def test_max_request_size(self):
config = ClientConfiguration(max_request_size=2345)
assert config.max_request_size == 2345
self.assertRaises(TypeError, ClientConfiguration, max_request_size='foo')
self.assertRaises(TypeError, ClientConfiguration, max_request_size=-1)
def test_num_retries(self):
config = ClientConfiguration(num_retries=5)
assert config.num_retries == 5
self.assertRaises(TypeError, ClientConfiguration, num_retries='foo')
self.assertRaises(TypeError, ClientConfiguration, num_retries=-1)
def test_retry_backoff(self):
config = ClientConfiguration(retry_backoff=5.4)
assert config.retry_backoff == 5.4
self.assertRaises(TypeError, ClientConfiguration, retry_backoff='foo')
self.assertRaises(TypeError, ClientConfiguration, retry_backoff=-1)
def test_broker_threads(self):
config = ClientConfiguration(broker_threads=31)
assert config.broker_threads == 31
self.assertRaises(TypeError, ClientConfiguration, broker_threads='foo')
self.assertRaises(TypeError, ClientConfiguration, broker_threads=-1)
def test_broker_list(self):
config = ClientConfiguration(broker_list='broker1.example.com:9091,broker2.example.com:9092')
assert config.broker_list == [('broker1.example.com', 9091), ('broker2.example.com', 9092)]
self.assertRaises(TypeError, ClientConfiguration, broker_list=1)
def test_zkconnect(self):
config = ClientConfiguration(zkconnect='zk.example.com:2181/kafka-cluster')
assert config.zkconnect == 'zk.example.com:2181/kafka-cluster'
self.assertRaises(TypeError, ClientConfiguration, zkconnect=1)
def test_verify_certificates(self):
config = ClientConfiguration(tls_verify_certificates=True)
assert config.tls_verify_certificates
config = ClientConfiguration(tls_verify_certificates=False)
assert not config.tls_verify_certificates
def test_verify_hostnames(self):
config = ClientConfiguration(tls_verify_hostnames=True)
assert config.tls_verify_hostnames
config = ClientConfiguration(tls_verify_hostnames=False)
assert not config.tls_verify_hostnames
@patch('kafka.tools.configuration.check_file_access')
def test_root_certificates(self, mock_access):
mock_access.return_value = True
config = ClientConfiguration(tls_root_certificates='filename')
assert config.tls_root_certificates == 'filename'
mock_access.assert_called_once_with('filename')
@patch('kafka.tools.configuration.check_file_access')
def test_client_certificate(self, mock_access):
mock_access.return_value = True
config = ClientConfiguration(tls_client_certificate='filename')
assert config.tls_client_certificate == 'filename'
mock_access.assert_called_once_with('filename')
@patch('kafka.tools.configuration.check_file_access')
def test_client_keyfile(self, mock_access):
mock_access.return_value = True
config = ClientConfiguration(tls_client_keyfile='filename')
assert config.tls_client_keyfile == 'filename'
mock_access.assert_called_once_with('filename')
def test_client_key_password(self):
def testfunc():
return 'foo'
config = ClientConfiguration(tls_client_key_password_callback=testfunc)
assert config.tls_client_key_password_callback == testfunc
self.assertRaises(TypeError, ClientConfiguration, tls_client_key_password_callback='notcallable')
def test_verify_ssl_configuration(self):
config = ClientConfiguration(tls_verify_certificates=False, tls_verify_hostnames=True)
self.assertRaises(ConfigurationError, config._verify_ssl_configuration)
config.tls_verify_certificates = True
config._verify_ssl_configuration()
def test_enable_tls_default(self):
config = ClientConfiguration(enable_tls=True)
assert isinstance(config.ssl_context, ssl.SSLContext)
assert config.ssl_context.protocol == ssl.PROTOCOL_SSLv23
assert config.ssl_context.verify_mode == ssl.CERT_REQUIRED
assert config.ssl_context.check_hostname is True
@patch('kafka.tools.configuration.ssl.SSLContext')
@patch('kafka.tools.configuration.check_file_access')
def test_enable_tls_custom_certs(self, mock_access, mock_ssl):
def testfunc():
return 'foo'
config = ClientConfiguration(enable_tls=True,
tls_verify_certificates=False,
tls_verify_hostnames=False,
tls_root_certificates='example_root_cert_file',
tls_client_certificate='example_client_cert_file',
tls_client_keyfile='example_client_key_file',
tls_client_key_password_callback=testfunc)
mock_ssl.assert_called_once_with(ssl.PROTOCOL_SSLv23)
assert config.ssl_context.verify_mode == ssl.CERT_NONE
assert config.ssl_context.check_hostname is False
config.ssl_context.load_verify_locations.assert_called_once_with(cafile='example_root_cert_file')
config.ssl_context.load_cert_chain.assert_called_once_with('example_client_cert_file',
keyfile='example_client_key_file',
password=<PASSWORD>)
@patch('kafka.tools.configuration.ssl.SSLContext')
@patch('kafka.tools.configuration.check_file_access')
def test_enable_tls_error(self, mock_access, mock_ssl):
def testfunc():
return 'foo'
ssl_instance = MagicMock()
ssl_instance.load_cert_chain.side_effect = ssl.SSLError
mock_ssl.return_value = ssl_instance
self.assertRaises(ConfigurationError, ClientConfiguration, enable_tls=True, tls_verify_certificates=False,
tls_verify_hostnames=False, tls_root_certificates='example_root_cert_file',
tls_client_certificate='example_client_cert_file',
tls_client_keyfile='example_client_key_file', tls_client_key_password_callback=testfunc)
|
435947
|
import fields
import debug
import csv
import os
import sys
import datetime
def writer(vulns, **params):
csvfile = params['fobj']
debug.write('.')
for vuln in vulns:
csvfile.writerow([vuln['name'], vuln['count']])
def gen_csv(sc, filename):
'''csv SecurityCenterObj, EmailAddress
'''
# First thing we need to do is initialize the csvfile and build the header
# for the file.
datafile = open(filename, 'wb')
csvfile = csv.writer(datafile)
csvfile.writerow(['Software Package Name', 'Count'])
debug.write('Generating %s: ' % filename)
# Next we will run the Security Center query. because this could be a
# potentially very large dataset that is returned, we don't want to run out
# of memory. To get around this, we will pass the query function the writer
# function with the appropriate fields so that it is parsed inline.
fparams = {'fobj': csvfile}
sc.query('listsoftware', func=writer, func_params=fparams)
debug.write('\n')
# Lastly we need to close the datafile.
datafile.close()
|
435948
|
from torch import nn
import torch.nn.init as init
import numpy as np
def gaussian_weights_init(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
def xavier_weights_init(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.xavier_uniform(m.weight, gain=np.sqrt(2))
init.constant(m.bias, 0.1)
|
435958
|
from django.conf.urls import url
import twofa.views
app_name = "twofa"
urlpatterns = [
url(r"^verify/$", twofa.views.verify, name="verify"),
url(r"^verify/(?P<device_id>[0-9]+)/$", twofa.views.verify, name="verify"),
url(r"^setup/totp/$", twofa.views.setup_totp, name="setup-totp"),
url(r"^setup/backup/(?P<device_id>[0-9]+)/$", twofa.views.setup_backup, name="paper-code"),
url(r"^remove/(?P<device_id>[0-9]+)/$", twofa.views.remove, name="remove"),
url(r"^regenerate/(?P<device_id>[0-9]+)/$", twofa.views.regenerate, name="regenerate"),
url(r"^$", twofa.views.list, name="list"),
]
|
435994
|
from typing import Callable, Tuple
import flax.linen as nn
import jax.numpy as jnp
from .weightnorm import constant
class PosDense(nn.Module):
"""Dense-layer with positive weights
"""
channels: int
use_bias: bool = True
kernel_init: Callable = nn.initializers.lecun_normal()
bias_init: Callable = nn.initializers.zeros
@nn.compact
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Project inputs with positive weights.
Args:
inputs: [float32; [..., C]], input tensor.
Returns:
[float32; [..., C']], projected.
"""
# C
in_channels = inputs.shape[-1]
# [C, C']
kernel = self.param('kernel', self.kernel_init,
[in_channels, self.channels])
# [..., C']
x = inputs @ nn.softplus(kernel)
if self.use_bias:
# [C']
bias = self.param('bias', self.bias_init, [self.channels])
# [..., C']
x = x + nn.softplus(bias)
return x
class LogSNR(nn.Module):
"""Learnable noise scheduler: logSNR.
"""
internal: int
# initialize in range [-10, 10]
# reference from Variational Diffusion Models, range of learned log-SNR.
initial_gamma_min: float = -10.
initial_gamma_gap: float = 20.
@nn.compact
def __call__(self, inputs: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Compute logSNR from continuous timesteps.
Args:
inputs: [float32; [B]], timesteps in [0, 1].
Returns:
[float32; [B]], logSNR and normalized -logSNR.
"""
# [B + 2], add terminal point
x = jnp.concatenate([jnp.array([0., 1.]), inputs], axis=0)
# [B + 2, 1]
l1 = PosDense(channels=1)(x[:, None])
# [B + 2, C]
l2 = nn.sigmoid(PosDense(channels=self.internal)(l1))
# [B + 2], learned scheduler
sched = jnp.squeeze(l1 + PosDense(channels=1)(l2), axis=-1)
# [], [], [B]
s0, s1, sched = sched[0], sched[1], sched[2:]
# [B], normalized -logSNR
norm_nlogsnr = (sched - s0) / (s1 - s0)
# [B], boundary matching
gamma_min = self.param(
'gamma_min', constant(self.initial_gamma_min), [])
gamma_gap = self.param(
'gamma_gap', constant(self.initial_gamma_gap), [])
# force gamma-gap positive
nlogsnr = gamma_min + nn.softplus(gamma_gap) * norm_nlogsnr
return -nlogsnr, norm_nlogsnr
|
436014
|
from django.contrib.auth import get_user_model
from django.contrib.auth.password_validation import password_validators_help_texts
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from bananas.admin.api.schemas import schema_serializer_method
class UserSerializer(serializers.ModelSerializer):
username = serializers.CharField(source="get_username", read_only=True)
full_name = serializers.SerializerMethodField()
email = serializers.SerializerMethodField()
is_superuser = serializers.BooleanField(read_only=True)
permissions = serializers.SerializerMethodField()
groups = serializers.SerializerMethodField()
class Meta:
model = get_user_model()
ref_name = "Me"
fields = (
"id",
"username",
"full_name",
"email",
"is_superuser",
"permissions",
"groups",
)
read_only_fields = fields
@schema_serializer_method(
serializer_or_field=serializers.CharField(
help_text=_("Falls back to username, if not implemented or empty")
)
)
def get_full_name(self, obj):
full_name = getattr(obj, "get_full_name", None)
if full_name is not None:
full_name = full_name()
if not full_name:
full_name = obj.get_username()
return full_name
@schema_serializer_method(serializer_or_field=serializers.CharField())
def get_email(self, obj):
return getattr(obj, obj.get_email_field_name(), None)
@schema_serializer_method(
serializer_or_field=serializers.ListField(
help_text=_(
"Permissions that the user has, both through group and user permissions."
)
)
)
def get_permissions(self, obj):
return sorted(obj.get_all_permissions())
@schema_serializer_method(serializer_or_field=serializers.ListField)
def get_groups(self, obj):
return obj.groups.order_by("name").values_list("name", flat=True)
class AuthenticationSerializer(serializers.Serializer):
username = serializers.CharField(label=_("Username"), write_only=True)
password = serializers.CharField(label=_("Password"), write_only=True)
class PasswordChangeSerializer(serializers.Serializer):
old_password = serializers.CharField(label=_("Old password"), write_only=True)
new_password1 = serializers.CharField(
label=_("New password"),
help_text=password_validators_help_texts(),
write_only=True,
)
new_password2 = serializers.CharField(
label=_("New password confirmation"), write_only=True
)
|
436035
|
import ray
from tqdm import tqdm
class Pool:
def __init__(self, actors):
"""
actors: list of ray actor handles
"""
self.actors = actors
assert len(self.actors) > 0
def map(self, exec_fn, iterable,
callback_fn=None,
desc=None,
pbar_update=None,
use_tqdm: bool = True):
"""
exec_fn: function to execute actor on each item of iterable
callback_fn: function to process each result
"""
arg_it = iter(iterable)
actor_it = iter(self.actors)
pending_tasks = []
results = []
while True:
arg = next(arg_it, None)
if arg is None:
break
actor = next(actor_it, None)
if actor is None:
actor_it = iter(self.actors)
actor = next(actor_it, None)
pending_tasks.append(exec_fn(actor, arg))
if use_tqdm:
pbar = tqdm(total=len(pending_tasks), desc=desc,
dynamic_ncols=True, smoothing=0.01)
while len(pending_tasks) > 0:
finished_tasks, pending_tasks = ray.wait(pending_tasks)
for finished_task in finished_tasks:
results.append(ray.get(finished_task))
if callback_fn is not None:
callback_fn(results[-1])
if use_tqdm:
pbar.update()
if pbar_update is not None:
pbar_update(pbar)
return results
|
436041
|
import pytest
from fastapi import Depends
from fastapi_jsonrpc import get_jsonrpc_request_id
@pytest.fixture
def probe(ep):
@ep.method()
def probe(
jsonrpc_request_id: int = Depends(get_jsonrpc_request_id),
) -> int:
return jsonrpc_request_id
return ep
def test_basic(probe, json_request):
resp = json_request({
'id': 123,
'jsonrpc': '2.0',
'method': 'probe',
'params': {},
})
assert resp == {'id': 123, 'jsonrpc': '2.0', 'result': 123}
def test_batch(probe, json_request):
resp = json_request([
{
'id': 1,
'jsonrpc': '2.0',
'method': 'probe',
'params': {},
},
{
'id': 2,
'jsonrpc': '2.0',
'method': 'probe',
'params': {},
},
])
assert resp == [
{'id': 1, 'jsonrpc': '2.0', 'result': 1},
{'id': 2, 'jsonrpc': '2.0', 'result': 2},
]
|
436049
|
import torch
import text2text as t2t
from transformers import AutoModelForQuestionAnswering, AutoTokenizer
class Answerer(t2t.Transformer):
pretrained_answerer = "valhalla/longformer-base-4096-finetuned-squadv1"
def __init__(self, **kwargs):
pretrained_answerer = kwargs.get('pretrained_answerer')
if not pretrained_answerer:
pretrained_answerer = self.__class__.pretrained_answerer
self.__class__.tokenizer = AutoTokenizer.from_pretrained(pretrained_answerer)
self.__class__.model = AutoModelForQuestionAnswering.from_pretrained(pretrained_answerer)
def _translate_lines(self, input_lines, src_lang, tgt_lang):
translator = getattr(self.__class__, "translator", t2t.Translator())
self.__class__.translator = translator
return translator.transform(input_lines, src_lang=src_lang, tgt_lang=tgt_lang)
def _get_answers(self, input_lines):
tokenizer = self.__class__.tokenizer
model = self.__class__.model
num_examples = len(input_lines)
encoded_inputs = tokenizer.batch_encode_plus(input_lines, padding=True, return_tensors="pt")
input_ids = encoded_inputs["input_ids"]
attention_mask = encoded_inputs["attention_mask"]
results = model(input_ids, attention_mask=attention_mask)
ans_ids = [None] * num_examples
for i in range(num_examples):
max_startscore = torch.argmax(results["start_logits"][i])
max_endscore = torch.argmax(results["end_logits"][i])
ans_ids[i] = input_ids[i][max_startscore:max_endscore+1]
answers = tokenizer.batch_decode(ans_ids, skip_special_tokens=True)
answers = [a.strip() for a in answers]
return answers
def transform(self, input_lines, src_lang='en', **kwargs):
input_lines = t2t.Transformer.transform(self, input_lines, src_lang, **kwargs)
if src_lang != 'en':
input_lines = self._translate_lines(input_lines, src_lang, 'en')
input_lines = [line.split(" [SEP] ")[::-1] for line in input_lines]
output_lines = self._get_answers(input_lines)
if src_lang != 'en':
output_lines = self._translate_lines(output_lines, src_lang='en', tgt_lang=src_lang)
return output_lines
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.