id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11491504
|
import request_generator
class MouseRequest(request_generator.RequestGenerator):
def __init__(self, client):
super(MouseRequest, self).__init__(client)
self.type = 'action'
self.device = 'mouse'
def press(self, mask):
self.action = 'press'
self.params = [mask]
self.send_request()
def release(self, mask):
self.action = 'release'
self.params = [mask]
self.send_request()
def click(self, mask):
self.press(mask)
self.release(mask)
def left_click(self, x = None, y = None):
self.action = 'left_lick'
if x is None or y is None:
self.params = []
else:
self.params = [x, y]
return self.send_request()
def right_click(self, x = None, y = None):
self.action = 'right_click'
if x is None or y is None:
self.params = []
else:
self.params = [x, y]
return self.send_request()
def move(self, x, y):
self.action = 'move'
self.params = [x,y]
return self.send_request()
def move_by(self, x, y):
self.action = 'move_by'
self.params = [x,y]
return self.send_request()
def drag(self, x1, y1, x2, y2):
self.action = 'drag'
self.params = [x1, y1, x2, y2]
return self.send_request()
def drag_by(self, x, y):
self.action = 'drag_by'
self.params = [x,y]
return self.send_request()
def get_position(self):
self.action = 'get_position'
self.params = []
return self.send_request()
def get_color(self, x = None, y = None):
self.action = 'get_color'
if x is None or y is None:
self.params = []
else:
self.params = [x, y]
return self.send_request()
|
11491527
|
import aiohttp
import asyncio
from typing import Awaitable, Callable
from tests.benchmark.driver_base import *
TestTask = Callable[[aiohttp.ClientSession], Awaitable[int]]
async def run_notify_test(session: aiohttp.ClientSession) -> int:
async with session.post(notify_url(), json=notify_entity()) as response:
return response.status
async def run_version_test(session: aiohttp.ClientSession) -> int:
async with session.get(version_url()) as response:
return response.status
# NOTE. Timing coroutines.
# Starting a timer before `async with` and then stopping it in the block
# won't measure what you think. E.g.
#
# sample_id = monitor.start_duration_sample()
# async with session.get(version_url()) as response:
# label = f"client:version:{response.status}"
# monitor.stop_duration_sample(label, sample_id)
#
# won't actually time just how long the HTTP request took from start to
# finish, but will also include the time the various coroutines sat waiting
# in the event loop. While there's no accurate way of timing coroutines that
# I know, in the specific case of aiohttp, we could provide some half-meaningful
# measurements:
# - https://stackoverflow.com/questions/46004745
def lookup_test_task(test_id: str) -> TestTask:
tasks = {
VERSION_TEST: run_version_test,
NOTIFY_TEST: run_notify_test
}
return tasks[test_id]
async def do_many(task: TestTask, how_many: int) -> TestRunResults:
async with aiohttp.ClientSession() as session:
tasks = [task(session) for _ in range(how_many)]
return await asyncio.gather(*tasks, return_exceptions=True)
class AsyncioDriver(Driver):
def _do_run(self, test_id: str) -> TestRunResults:
test_task = lookup_test_task(test_id)
return asyncio.run(do_many(test_task, REQUESTS_N))
if __name__ == "__main__":
AsyncioDriver().main()
|
11491551
|
from amazon.api import AmazonAPI
from ebaysdk.finding import Connection as finding
from ebaysdk.exception import ConnectionError
import csv
from fuzzywuzzy import fuzz
import argparse
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('-S','--SearchTerm', help='what you would like to search for', required=True)
args = vars(parser.parse_args())
#keys
AWSkey = 'your AWS key here'
AWSSecretKey = 'your AWS secret key here'
amazonProductAdvName = 'your amazon product advertising name here'
EbayKey = 'your ebay developer key here'
search = u'%s'%args['SearchTerm']
amazon = AmazonAPI(AWSkey,AWSSecretKey,amazonProductAdvName,MaxQPS=.9)
ebayApi = finding(appid=EbayKey, config_file=None)
path = open("AmazonEbayData.csv", "wt")
filewriter = csv.writer(path)
allData = []
filewriter.writerow(['Ebay Item Title', 'Ebay Item ID','Ebay Price','ebay Item URL', 'Amazon Item Title','Amazon ISBN', 'Amazon Price', 'Amazon Item URL', 'Price Difference'])
def findOnAmazon(ebayItem,ebayPrice,ebayItemID,ebayItemURL):
response = amazon.search_n(1,Keywords=ebayItem, SearchIndex='All')
amazonPrice = response[0].price_and_currency[0]
amazonTitle = response[0].title
amazonISBN = response[0].isbn
amazonOfferURL = response[0].offer_url
#check how close the titles are so we can better understand if they are the same product
r = fuzz.partial_ratio(ebayItem,amazonTitle)
if r > 75:
# we can assume that they are the same thing
if float(amazonPrice) > float(ebayPrice)*1.5:
# check if the item is at least 50% more expensive on amazon so we have room to make money
possibleReturn = float(amazonPrice) - float(ebayPrice)
possret = float("{0:.2f}".format(possibleReturn))
print(possret)
data = [ebayItem,ebayItemID,ebayPrice,ebayItemURL,amazonTitle,amazonISBN,amazonPrice,amazonOfferURL,possret]
allData.append(data)
def run():
try:
api_request = {
'keywords': search,
'itemFilter': [
{'name': 'LocatedIn',
'value': 'US'},
{'name': 'MinPrice',
'value': '0'},
{'name': 'MaxPrice',
'value': '50.00'},
],
'sortOrder': 'BestMatch',
}
response = ebayApi.execute('findItemsAdvanced', api_request)
dict = response.reply.get('searchResult')
clearedArray = dict.get('item')
for item in clearedArray:
ebaytitle = item.get('title')
ebayItemID = item.get('itemId')
ebayprice = item.get('sellingStatus')
current = ebayprice.get('currentPrice')
value = current.get('value')
ebayURL = item.get('viewItemURL')
try:
findOnAmazon(ebaytitle,value,ebayItemID,ebayURL)
except:
print("couldn`t find matching object")
for row in allData:
filewriter.writerow(row)
path.close()
except ConnectionError as e:
print(e)
print(e.response.dict())
if __name__ == '__main__':
if args['SearchTerm']:
run()
|
11491597
|
import functools
import torch
from scipy.linalg import lapack as scll
from falkon.la_helpers import potrf
from falkon.options import FalkonOptions
from falkon.utils.helpers import choose_fn
__all__ = ("check_init", "inplace_set_diag_th", "inplace_add_diag_th",
"lauum_wrapper", "potrf_wrapper")
def check_init(*none_check):
def _checker(fun):
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
is_init = True
for el in none_check:
if getattr(self, el, None) is None:
is_init = False
break
if not is_init:
raise RuntimeError(
"FALKON preconditioner is not initialized. Please run "
"`init` before any other method on the "
"preconditioner.")
return fun(self, *args, **kwargs)
return wrapper
return _checker
def inplace_set_diag_th(A: torch.Tensor, k: torch.Tensor) -> torch.Tensor:
A.diagonal().copy_(k)
return A
def inplace_add_diag_th(A: torch.Tensor, k: float) -> torch.Tensor:
# Assumes M is square (or wide also works).
# Need to use .diagonal() as .diag() makes a copy
A.diagonal().add_(k)
return A
def lauum_wrapper(A: torch.Tensor, upper: bool, use_cuda: bool, opt: FalkonOptions) -> torch.Tensor:
if use_cuda:
from falkon.ooc_ops.ooc_lauum import gpu_lauum
return gpu_lauum(A, upper=upper, write_opposite=True, overwrite=True, opt=opt)
else:
Anp = A.numpy()
lauum = choose_fn(Anp.dtype, scll.dlauum, scll.slauum, "LAUUM")
sol, info = lauum(Anp, lower=int(not upper), overwrite_c=1)
if info != 0:
raise RuntimeError(f"Lapack LAUUM failed with error code {info}.")
return torch.from_numpy(sol)
def potrf_wrapper(A: torch.Tensor, clean: bool, upper: bool, use_cuda: bool, opt: FalkonOptions) -> torch.Tensor:
if use_cuda:
from falkon.ooc_ops.ooc_potrf import gpu_cholesky
return gpu_cholesky(A, upper=upper, clean=clean, overwrite=True, opt=opt)
else:
return potrf(A, upper=upper, clean=clean, overwrite=True, cuda=False)
|
11491620
|
import config
import subprocess
from flask import request
from flask_restful import Resource, reqparse
import logging
import base64
from decorators import private_api
from requests import get
import json
import shlex
logger = logging.getLogger("api")
class Jobs(Resource):
@private_api
def get(self):
"""
List all jobs in the queue
---
tags:
- Scheduler
responses:
200:
description: List of all jobs
500:
description: Backend error
"""
parser = reqparse.RequestParser()
parser.add_argument('user', type=str, location='args')
args = parser.parse_args()
user = args['user']
try:
qstat_command = config.Config.PBS_QSTAT + " -f -Fjson"
try:
get_job_info = subprocess.check_output(shlex.split(qstat_command))
try:
job_info = json.loads(((get_job_info.decode('utf-8')).rstrip().lstrip()))
except Exception as err:
return {"success": False, "message": "Unable to retrieve this job. Job may have terminated."}, 500
if user is None:
return {"success": True, "message": job_info["Jobs"] if "Jobs" in job_info.keys() else {}}, 200
else:
job_for_user = {"Jobs": {}}
if "Jobs" in job_info.keys():
job_ids_key = list(job_info["Jobs"].keys())
for job_id in job_ids_key:
job_owner = job_info["Jobs"][job_id]["Job_Owner"].split("@")[0]
if job_owner == user:
job_for_user["Jobs"][job_id] = job_info["Jobs"][job_id]
return {"success": True, "message": job_for_user["Jobs"]}, 200
except Exception as err:
return {"succes": False, "message": "Unable to retrieve Job ID (job may have terminated and is no longer in the queue)"}, 500
except Exception as err:
return {"success": False, "message": "Unknown error: " + str(err)}, 500
|
11491621
|
from .c11type import C11Type
class C11TypeBool(C11Type):
def __init__(self):
C11Type.__init__(self)
self.typeName = u'bool'
def setSchema(self, schemaName, schemaValue):
C11Type.setSchema(self, schemaName, schemaValue)
@classmethod
def codeDefaultValue(cls, schemaDefaultValue):
if schemaDefaultValue is not None and schemaDefaultValue is True:
return u'true'
return u'false'
@classmethod
def codeDefaultValueArray(cls, schemaDefaultValues):
if schemaDefaultValues is None\
or not isinstance(schemaDefaultValues, list)\
or len(schemaDefaultValues) <= 0:
return u''
code_default_value = u''
for schema_value in schemaDefaultValues:
if len(code_default_value) > 0:
code_default_value = code_default_value + u', '
if schema_value == True:
code_default_value = code_default_value + u'true'
else:
code_default_value = code_default_value + u'false'
return u'{ %s }' % code_default_value
@classmethod
def codeJsonCheck(cls):
return u'IsBool()'
@classmethod
def codeJsonSet(cls, dataName, variableName):
return u'%s.%s = _JsonValue[GLTFTEXT("%s")].GetBool();' % (dataName, variableName, variableName)
@classmethod
def codeJsonGet(cls, dataName, variableName):
return u'_JsonValue[GLTFTEXT("%s")].SetBool(%s.%s);' % (variableName, dataName, variableName)
|
11491622
|
import setuptools
pkg_name="theiapod"
setuptools.setup(
name=pkg_name,
version="0.1.0",
author="<NAME>",
author_email="<EMAIL>",
description="Self-host gitpod-style workspaces for github repositories using theia",
url="https://github.com/magland/theiapod",
packages=setuptools.find_packages(),
scripts=['bin/theiapod'],
package_data={},
install_requires=[
"pyyaml",
],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
)
)
|
11491635
|
import pytest
import os
from tests.examples import TestExample as base_class
from pandas.testing import assert_frame_equal
from yggdrasil.components import create_component
class TestExampleConditionalIO(base_class):
r"""Test the conditional_io example."""
@pytest.fixture(scope="class")
def example_name(self):
r"""str: Name of example being tested."""
return "conditional_io"
@pytest.fixture
def expected_output_files(self, yamldir):
r"""list: Examples of expected output for the run."""
return [os.path.join(yamldir, 'Output', 'output.txt')]
@pytest.fixture
def output_files(self, yamldir):
r"""list: Output files for the run."""
return [os.path.join(yamldir, 'output.txt')]
@pytest.fixture(scope="class")
def read_file(self):
r"""Read in contents from a file.
Args:
fname (str): Full path to the file that should be read.
Returns:
object: File contents.
"""
def read_file_w(fname):
x = create_component('file', 'table', name='test',
address=fname, direction='recv',
as_array=True, recv_converter='pandas')
msg = x.recv_array()[1]
if msg is not None:
msg = msg.sort_values(by=['InputMass']).reset_index(
drop=True)
x.close()
return msg
return read_file_w
@pytest.fixture(scope="class")
def check_file_contents(self, read_file):
r"""Check that the contents of a file are correct.
Args:
fname (str): Full path to the file that should be checked.
result (str): Contents of the file.
"""
def check_file_contents_w(fname, result):
ocont = read_file(fname)
assert_frame_equal(ocont, result)
return check_file_contents_w
@pytest.fixture(scope="class")
def check_file_size(self):
r"""Check that file is the correct size.
Args:
fname (str): Full path to the file that should be checked.
fsize (int): Size that the file should be in bytes.
"""
def check_file_size_w(*args, **kwargs):
pass
return check_file_size_w
|
11491639
|
import os
from tqdm import trange
import torch
from im2mesh.common import chamfer_distance
from im2mesh.training import BaseTrainer
from im2mesh.utils import visualize as vis
class Trainer(BaseTrainer):
r''' Trainer object for the Point Set Generation Network.
The PSGN network is trained on Chamfer distance. The Trainer object
obtains methods to perform a train and eval step as well as to visualize
the current training state by plotting the respective point clouds.
Args:
model (nn.Module): PSGN model
optiimzer (PyTorch optimizer): The optimizer that should be used
device (PyTorch device): the PyTorch device
input_type (string): The input type (e.g. 'img')
vis_dir (string): the visualisation directory
'''
def __init__(self, model, optimizer, device=None, input_type='img',
vis_dir=None):
self.model = model
self.optimizer = optimizer
self.device = device
self.input_type = input_type
self.vis_dir = vis_dir
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
def train_step(self, data):
r''' Performs a train step.
The chamfer loss is calculated and an appropriate backward pass is
performed.
Args:
data (tensor): training data
'''
self.model.train()
points = data.get('pointcloud').to(self.device)
inputs = data.get('inputs').to(self.device)
loss = self.compute_loss(points, inputs)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def eval_step(self, data):
r''' Performs an evaluation step.
The chamfer loss is calculated and returned in a dictionary.
Args:
data (tensor): input data
'''
self.model.eval()
device = self.device
points = data.get('pointcloud_chamfer').to(device)
inputs = data.get('inputs').to(device)
with torch.no_grad():
points_out = self.model(inputs)
loss = chamfer_distance(points, points_out).mean()
loss = loss.item()
eval_dict = {
'loss': loss,
'chamfer': loss,
}
return eval_dict
def visualize(self, data):
r''' Visualizes the current output data of the model.
The point clouds for respective input data is plotted.
Args:
data (tensor): input data
'''
device = self.device
points_gt = data.get('pointcloud').to(device)
inputs = data.get('inputs').to(device)
with torch.no_grad():
points_out = self.model(inputs)
points_out = points_out.cpu().numpy()
points_gt = points_gt.cpu().numpy()
batch_size = inputs.size(0)
for i in trange(batch_size):
input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
vis.visualize_data(
inputs[i].cpu(), self.input_type, input_img_path)
out_file = os.path.join(self.vis_dir, '%03d.png' % i)
out_file_gt = os.path.join(self.vis_dir, '%03d_gt.png' % i)
vis.visualize_pointcloud(points_out[i], out_file=out_file)
vis.visualize_pointcloud(points_gt[i], out_file=out_file_gt)
def compute_loss(self, points, inputs):
r''' Computes the loss.
The Point Set Generation Network is trained on the Chamfer distance.
Args:
points (tensor): GT point cloud data
inputs (tensor): input data for the model
'''
points_out = self.model(inputs)
loss = chamfer_distance(points, points_out).mean()
return loss
|
11491681
|
from plugin.core.constants import GUID_SERVICES
import logging
log = logging.getLogger(__name__)
class SyncMap(object):
def __init__(self, task):
self.task = task
self._by_guid = {}
self._by_key = {}
def add(self, p_section_key, p_key, guids):
if not guids or not p_key:
return False
for guid in guids:
self.add_one(p_section_key, p_key, guid)
def add_one(self, p_section_key, p_key, guid):
if guid is None or p_key is None:
return False
p_key = int(p_key)
# Flatten `guid`
if type(guid) is not tuple:
guid = (guid.service, guid.id)
if guid[0] not in GUID_SERVICES:
log.info('Unknown primary agent: %r -> %r (section: %r)', guid[0], p_key, p_section_key)
# Store in `_by_guid` map
if guid not in self._by_guid:
self._by_guid[guid] = set()
self._by_guid[guid].add((p_section_key, p_key))
# Store in `_by_key` map
if p_key not in self._by_key:
self._by_key[p_key] = set()
self._by_key[p_key].add(guid)
return True
def by_guid(self, guid):
# Flatten `guid`
if type(guid) is not tuple:
guid = (guid.service, guid.id)
return self._by_guid.get(guid, set())
def by_key(self, rating_key):
if rating_key is None:
return set()
return self._by_key.get(int(rating_key), set())
|
11491702
|
import unittest
import os
import_error = False
try:
from .. import common
except ImportError:
import_error = True
common = None
class TestCase00(unittest.TestCase):
def test_import(self):
self.assertFalse(import_error)
class TestCase01(unittest.TestCase):
def setUp(self):
if import_error:
self.skipTest('ImportError')
def test_backup1(self):
path = common.backup('common.pz', copy=False)
self.assertEqual(path, '')
def test_backup2(self):
path = common.backup(os.path.join(os.path.dirname(__file__), 'test_common.py'), copy=False)
self.assertEqual(os.path.dirname(path), os.path.dirname(__file__))
self.assertEqual(os.path.basename(path), 'test_common.py.1')
|
11491734
|
import FWCore.ParameterSet.Config as cms
from DQMOffline.JetMET.metDQMConfig_cfi import *
#correction for type 1 done in JetMETDQMOfflineSource now
METDQMAnalyzerSequence = cms.Sequence(caloMetDQMAnalyzer*pfMetDQMAnalyzer*pfChMetDQMAnalyzer*pfMetT1DQMAnalyzer)
METDQMAnalyzerSequenceMiniAOD = cms.Sequence(pfMetDQMAnalyzerMiniAOD*pfPuppiMetDQMAnalyzerMiniAOD)
METDQMAnalyzerSequenceCosmics = cms.Sequence(caloMetDQMAnalyzer)
METDQMAnalyzerSequenceHI = cms.Sequence(caloMetDQMAnalyzer*pfMetDQMAnalyzer)
|
11491737
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import yaml
from ansible.plugins.action import ActionBase
from ansible.errors import AnsibleActionFail
from ansible.utils.vars import isidentifier
from ansible.plugins.filter.core import combine
from ansible.plugins.loader import lookup_loader
from ansible_collections.arista.avd.plugins.module_utils.strip_empties import strip_null_from_data
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = {}
result = super().run(tmp, task_vars)
del tmp # tmp no longer has any effect
root_key = ""
if self._task.args:
if "root_key" in self._task.args:
n = self._task.args.get("root_key")
n = self._templar.template(n)
if not isidentifier(n):
raise AnsibleActionFail(f"The argument 'root_key' value of '{n}' is not valid. Keys must start with a letter or underscore character, "
"and contain only letters, numbers and underscores.")
root_key = n
if "templates" in self._task.args:
t = self._task.args.get("templates")
if isinstance(t, list):
template_list = t
else:
raise AnsibleActionFail("The argument 'templates' is not a list")
else:
raise AnsibleActionFail("The argument 'templates' must be set")
else:
raise AnsibleActionFail("The argument 'templates' must be set")
output = {}
template_lookup_module = lookup_loader.get('ansible.builtin.template', loader=self._loader, templar=self._templar)
template_vars = task_vars
for template_item in template_list:
template = template_item.get('template')
if not template:
raise AnsibleActionFail("Invalid template data")
template_options = template_item.get('options', {})
list_merge = template_options.get('list_merge', 'append')
strip_empty_keys = template_options.get('strip_empty_keys', True)
if root_key:
template_vars[root_key] = output
else:
template_vars = combine(task_vars, output, recursive=True)
template_output = template_lookup_module.run([template], template_vars)
template_output_data = yaml.safe_load(template_output[0])
if strip_empty_keys:
template_output_data = strip_null_from_data(template_output_data)
if template_output_data:
output = combine(output, template_output_data, recursive=True, list_merge=list_merge)
if root_key:
result['ansible_facts'] = {root_key: output}
else:
result['ansible_facts'] = output
return result
|
11491760
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from textwrap import dedent as d
import pandas as pd
#df = pd.read_hdf(r'..\examples\algae\MC\mc_out_allyears_sigma1.h5')
df = pd.read_csv(r'..\examples\algae\MC\mc_out_allyears_sigma1.dat', delim_whitespace=True)
data = df.values
param = data[:,0:8]
def generate_table():
data = dict()
for l in range(4):
for k in range(2):
data[k+2*l] = [
{
'x': df.iloc[::100, k+2*l],
'type': 'histogram'
}
]
return html.Table(
[html.Tr([
html.Td(dcc.Graph(id='hist{}'.format(k+2*l),
figure = {'data': data[k+2*l], 'layout': {}}),
style={'width': '500px'}) for k in range(2)
], style={'height': '300px'}) for l in range(4)],
)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H1(children='Conditional probability distributions'),
html.Div([
dcc.Markdown(d("""
**Click** on bars in one histogram to plot conditional
probability distribution for other variables.
""")),
html.Button('Reset', id='reset', n_clicks_timestamp=0),
#html.Pre(id='click-data'),
]),
generate_table()
])
#@<EMAIL>.callback(
# Output('click-data', 'children'),
# [Input('hist0', 'clickData')])
#def display_click_data(clickData):
#return json.dumps(clickData['points'][0], indent=2)
# try:
# return json.dumps(clickData['points'][0]['binNumber'])
# except:
# return 0
def gen_callback(k):
from numpy import array
def update_figure(*args):
defaultdata = {
'data': [
{
'x': df.iloc[::100, k],
'type': 'histogram'
}
]
}
# Find out who triggered the callback,
# see https://github.com/plotly/dash/issues/291
ctx = dash.callback_context
# Update was not triggered at all
if not ctx.triggered:
return defaultdata
trigger = ctx.triggered[0]
# Reset button triggered update
if trigger['prop_id'] == 'reset.n_clicks':
return defaultdata
# Other component triggered update
try:
reducedset = array(trigger['value']['points'][0]['pointNumbers'])
except:
return defaultdata
return {
'data': [
{
'x': df.iloc[reducedset, k],
'type': 'histogram'
}
]
}
return update_figure
for k in range(8):
app.callback(
output = Output('hist{}'.format(k), 'figure'),
inputs = (
[Input('hist{}'.format(l), 'clickData') for l in range(8)]
+ [Input('reset', 'n_clicks')]))(gen_callback(k))
if __name__ == '__main__':
app.run_server(debug=True, host='0.0.0.0')
|
11491761
|
import numpy as np
import matplotlib.pyplot as plt
from pactools.utils.testing import assert_equal, assert_array_almost_equal
from pactools.utils.testing import assert_greater, assert_raises
from pactools.utils.testing import assert_array_not_almost_equal
from pactools.dar_model import DAR, AR, HAR, StableDAR
from pactools.simulate_pac import simulate_pac
ALL_MODELS = [DAR, AR, HAR, StableDAR]
# Parameters used for the simulated sigin in the test
low_fq_range = [1., 3., 5., 7.]
high_fq_range = [25., 50., 75.]
n_low = len(low_fq_range)
n_high = len(high_fq_range)
high_fq = high_fq_range[1]
low_fq = low_fq_range[1]
n_points = 1024
fs = 200.
_sigin = simulate_pac(n_points=n_points, fs=fs, high_fq=high_fq, low_fq=low_fq,
low_fq_width=1., noise_level=0.3, random_state=0)
_sigdriv = simulate_pac(n_points=n_points, fs=fs, high_fq=high_fq,
low_fq=low_fq, low_fq_width=1., noise_level=0.,
random_state=0, high_fq_amp=0, return_driver=True)
_sigdriv_imag = np.imag(_sigdriv)
_sigdriv = np.real(_sigdriv)
_noise = np.random.RandomState(0).randn(n_points)
_model_params = {'ordar': 10, 'ordriv': 2, 'criterion': False}
def fast_fitted_model(klass=DAR, model_params=_model_params, sigin=_sigin,
sigdriv=_sigdriv, sigdriv_imag=_sigdriv_imag, fs=fs,
train_weights=None, test_weights=None):
model_params = model_params.copy()
if klass == StableDAR and 'iter_newton' not in model_params:
model_params['iter_newton'] = 10
return klass(**model_params).fit(
sigin=sigin, sigdriv=sigdriv, sigdriv_imag=sigdriv_imag, fs=fs,
train_weights=train_weights, test_weights=test_weights)
def test_likelihood_ratio_noise():
# Test that likelihood_ratio returns a positive p_value with noise
sdar = fast_fitted_model(StableDAR, sigin=_noise)
dar = fast_fitted_model(DAR, sigin=_noise)
ar = fast_fitted_model(AR, sigin=_noise)
har = fast_fitted_model(HAR, sigin=_noise)
for h0 in [ar, har]:
for h1 in [dar, sdar]:
p_value = h1.likelihood_ratio(h0)
assert_greater(p_value, 0)
def test_likelihood_ratio_pac():
# Test that likelihood_ratio returns a near zero p_value with PAC
sdar = fast_fitted_model(StableDAR, sigin=_sigin)
dar = fast_fitted_model(DAR, sigin=_sigin)
ar = fast_fitted_model(AR, sigin=_sigin)
har = fast_fitted_model(HAR, sigin=_sigin)
for h0 in [ar, har]:
for h1 in [dar, sdar]:
p_value = h1.likelihood_ratio(h0)
assert_greater(1e-3, p_value)
def test_degrees_of_freedom():
# test the number of fitted parameters in the model
model_params = {'ordar': 5, 'ordriv': 2, 'criterion': False}
dar = fast_fitted_model(model_params=model_params)
assert_equal(dar.degrees_of_freedom(), 6 * 6)
model_params = {'ordar': 5, 'ordriv': 0, 'criterion': False}
dar = fast_fitted_model(model_params=model_params)
assert_equal(dar.degrees_of_freedom(), 6 * 1)
model_params = {'ordar': 1, 'ordriv': 2, 'criterion': False}
dar = fast_fitted_model(model_params=model_params)
assert_equal(dar.degrees_of_freedom(), 2 * 6)
model_params = {'ordar': 10, 'ordriv': 2, 'criterion': False}
ar = fast_fitted_model(AR, model_params=model_params)
assert_equal(ar.degrees_of_freedom(), 10 + 1)
model_params = {'ordar': 10, 'ordriv': 2, 'criterion': False}
har = fast_fitted_model(HAR, model_params=model_params)
assert_equal(har.degrees_of_freedom(), 10 + 6)
model_params = {'ordar': 5, 'ordriv': 2, 'criterion': False}
har = fast_fitted_model(model_params=model_params, sigdriv_imag=None)
assert_equal(har.degrees_of_freedom(), 6 * 3)
model_params = {'ordar': 10, 'ordriv': 2, 'criterion': False}
har = fast_fitted_model(HAR, model_params=model_params, sigdriv_imag=None)
assert_equal(har.degrees_of_freedom(), 10 + 3)
def dar_no_fit(ortho, normalize, sigdriv=_sigdriv, sigdriv_imag=_sigdriv_imag,
**model_params):
dar = DAR(ortho=ortho, normalize=normalize, **model_params)
dar.sigin = _sigin[None, :]
dar.sigdriv = sigdriv[None, :]
dar.sigdriv_imag = sigdriv_imag[None, :]
dar._make_basis()
return dar
def test_make_basis_new_sigdriv():
# Test that _make_basis works the same with a new sigdriv,
# using stored orthonormalization transform.
model_params = {'ordar': 5, 'ordriv': 2, 'criterion': False}
for normalize in (True, False):
for ortho in (True, False):
for this_sigdriv, this_sigdriv_imag in ([_sigdriv, _sigdriv_imag],
[_noise, _noise[::-1]]):
dar = dar_no_fit(
ortho=ortho, normalize=normalize, sigdriv=this_sigdriv,
sigdriv_imag=this_sigdriv_imag, **model_params)
newbasis = dar._make_basis(sigdriv=this_sigdriv,
sigdriv_imag=this_sigdriv_imag)
assert_array_almost_equal(newbasis, dar.basis_)
# Different result if we change a parameter
dar_ortho = dar_no_fit(not ortho, normalize, **model_params)
dar_norma = dar_no_fit(ortho, not normalize, **model_params)
for dar2 in [dar_ortho, dar_norma]:
assert_raises(AssertionError, assert_array_almost_equal,
dar.basis_, dar2.basis_)
def test_make_basis_ortho_normalize():
# Test the effect of ortho and normalize parameters
model_params = {'ordar': 5, 'ordriv': 2, 'criterion': False}
for normalize in (True, False):
for ortho in (True, False):
dar = dar_no_fit(ortho, normalize, **model_params)
basis = dar.basis_
basis.shape = (basis.shape[0], -1)
product = np.dot(basis, basis.T)
n_basis = product.shape[0]
n_samples = _sigin.size
# Test that the diagonal is constant if normalized
ref = np.ones(n_basis) * n_samples
if normalize:
assert_array_almost_equal(product.flat[::n_basis + 1], ref)
else:
assert_array_not_almost_equal(product.flat[::n_basis + 1], ref)
# Test that the rest is zero if orthogonalized
product.flat[::n_basis + 1] = 0
ref = np.zeros((n_basis, n_basis))
if ortho:
assert_array_almost_equal(product, ref)
else:
assert_array_not_almost_equal(product, ref)
def test_plot_comodulogram():
# Smoke test with the standard plotting function
model = fast_fitted_model()
model.plot_lines()
model.plot()
plt.close('all')
model = fast_fitted_model(sigdriv_imag=None)
model.plot_lines()
model.plot()
plt.close('all')
def test_weighting_with_ones():
# Test that weighting with ones is identical as no weighting
factor = 1.5
train_weights = np.ones_like(_sigin) * factor
for klass in ALL_MODELS:
model_2 = fast_fitted_model(klass=klass, train_weights=train_weights,
test_weights=train_weights)
model_1 = fast_fitted_model(klass=klass, train_weights=train_weights,
test_weights=None)
model_0 = fast_fitted_model(klass=klass, train_weights=None,
test_weights=None)
assert_array_almost_equal(model_1.AR_, model_0.AR_, decimal=5)
assert_array_almost_equal(model_2.AR_, model_0.AR_, decimal=5)
assert_array_almost_equal(model_1.G_, model_0.G_, decimal=5)
assert_array_almost_equal(model_2.G_, model_0.G_, decimal=5)
for train in [False, True]:
assert_array_almost_equal(
model_0._estimate_log_likelihood(train=train)[0] * factor,
model_1._estimate_log_likelihood(train=train)[0], decimal=4)
assert_array_almost_equal(
model_1._estimate_log_likelihood(train=train)[0],
model_2._estimate_log_likelihood(train=train)[0], decimal=4)
def test_weighting_with_zeros():
# Test that
split = int(n_points // 2)
train_weights = np.ones_like(_sigin)
train_weights[split:] = 0
sigin_half = _sigin[:split]
sigdriv_half = _sigdriv[:split]
sigdriv_imag_half = _sigdriv_imag[:split]
for klass in ALL_MODELS:
print(klass.__name__)
model_1 = fast_fitted_model(klass=klass, train_weights=train_weights)
model_0 = fast_fitted_model(
klass=klass, sigin=sigin_half, sigdriv=sigdriv_half,
sigdriv_imag=sigdriv_imag_half, train_weights=None)
assert_array_almost_equal(model_1.AR_, model_0.AR_)
assert_array_almost_equal(model_1.G_, model_0.G_)
for train in [False, True]:
assert_array_almost_equal(
model_0._estimate_log_likelihood(train=train),
model_1._estimate_log_likelihood(train=train), decimal=5)
|
11491778
|
import face_alignment
import numpy as np
import cv2
import threading
import cv2
class VideoCaptureThreading:
def __init__(self, src=0, width=640, height=480):
self.src = src
self.cap = cv2.VideoCapture(self.src)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self.grabbed, self.frame = self.cap.read()
self.started = False
self.read_lock = threading.Lock()
def set(self, var1, var2):
self.cap.set(var1, var2)
def start(self):
if self.started:
print("[!] Threaded video capturing has already been started.")
return None
self.started = True
self.thread = threading.Thread(target=self.update, args=())
self.thread.start()
return self
def update(self):
while self.started:
grabbed, frame = self.cap.read()
with self.read_lock:
self.grabbed = grabbed
self.frame = frame
def read(self):
with self.read_lock:
frame = self.frame.copy()
grabbed = self.grabbed
return grabbed, frame
def stop(self):
self.started = False
self.thread.join()
def __exit__(self, exec_type, exc_value, traceback):
self.cap.release()
def bb_intersection_over_union(boxA, boxB):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
def extract_bbox(frame, refbbox, fa):
bboxes = fa.face_detector.detect_from_image(frame[..., ::-1])
if len(bboxes) != 0:
bbox = max(
[
(bb_intersection_over_union(bbox, refbbox), tuple(bbox))
for bbox in bboxes
]
)[1]
else:
bbox = np.array([0, 0, 0, 0, 0])
return np.maximum(np.array(bbox), 0)
def get_max_length(bbox):
x1, y1, x2, y2 = bbox
w = x2 - x1
h = y2 - y1
square_length = max(w, h)
return square_length
def get_face_bbox(image, face_detector):
bboxes = face_detector.face_detector.detect_from_image(frame)
if not bboxes:
return []
growth_factor = 0.4
img_h, img_w = image.shape[:2]
bbox = list(map(int, bboxes[0]))
x1, y1, x2, y2, score = bbox
square_length = get_max_length((x1, y1, x2, y2))
dlength = int(growth_factor * square_length)
x1 = max(0, x1 - dlength)
y1 = max(0, y1 - dlength)
x2 = min(img_w - 1, x2 + dlength)
y2 = min(img_h - 1, y2 + int(dlength * 0.5))
output_length = get_max_length((x1, y1, x2, y2))
return (x1, y1, x1 + output_length, y1 + output_length)
fa = face_alignment.FaceAlignment(
face_alignment.LandmarksType._2D, flip_input=True, device="cpu"
)
# cap = cv2.VideoCapture(0)
cap = VideoCaptureThreading(0)
cap.start()
bbox = []
# Capture initial video
while True:
grabbed, frame = cap.read()
if not grabbed:
break
frame = frame[..., ::-1].copy()
# bboxes = fa.face_detector.detect_from_image(frame)
# print(bboxes)
bbox = get_face_bbox(frame, fa)
x1, y1, x2, y2 = bbox
if bbox:
# bbox = list(map(int, bboxes[0]))
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 255, 255))
cv2.imshow("original", frame[..., ::-1])
cv2.imshow("cropped", frame[y1:y2, x1:x2][..., ::-1])
key = cv2.waitKey(1)
if key == 27: # ESC
break
# cap.release()
cap.stop()
cv2.destroyAllWindows()
|
11491792
|
import numpy as np
from scipy.stats import mode, itemfreq
from scipy import delete
import matplotlib.pylab as plt
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC as SVM
from missing_data_imputation import Imputer
# declare csv headers
x = np.genfromtxt('data/adult-train-raw', delimiter=', ', dtype=object)
# remove redundant education-number feature
x = delete(x, (4, 14), 1)
# enumerate parameters and instantiate Imputer
imp = Imputer()
missing_data_cond = lambda x: x == '?'
cat_cols = (1, 3, 4, 5, 6, 7, 8, 12)
n_neighbors = 5
# # drop observations with missing variables
# print 'imputing with drop'
# data_drop = imp.drop(x, missing_data_cond)
# replace missing values with random existing values
print 'imputing with random replacement'
data_replace = imp.replace(x, missing_data_cond)
# replace missing values with feature summary
print 'imputing with feature summarization (mode)'
summ_func = lambda x: mode(x)[0]
data_mode = imp.summarize(x, summ_func, missing_data_cond)
# replace categorical features with one hot row
print 'imputing with one-hot'
data_onehot = imp.binarize_data(x, cat_cols)
# replace missing data with predictions using random forest
print 'imputing with predicted values from random forest'
clf = RandomForestClassifier(n_estimators=100, criterion='gini')
data_rf = imp.predict(x, cat_cols, missing_data_cond, clf)
# replace missing data with predictions using SVM
print 'imputing with predicted values usng SVM'
clf = SVM(
penalty='l2', loss='squared_hinge', dual=True, tol=0.0001, C=1.0, multi_class='ovr',
fit_intercept=True, intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000)
data_svm = imp.predict(x, cat_cols, missing_data_cond, clf)
# replace missing data with predictions using logistic regression
print 'imputing with predicted values usng logistic regression'
clf = LogisticRegression(
penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True,
intercept_scaling=1)
data_logistic = imp.predict(x, cat_cols, missing_data_cond, clf)
# replace missing data with values obtained after factor analysis
print 'imputing with factor analysis'
data_facanal = imp.factor_analysis(x, cat_cols, missing_data_cond)
# replace missing data with knn
print 'imputing with K-Nearest Neighbors'
data_knn = imp.knn(x, n_neighbors, np.mean, missing_data_cond, cat_cols)
def compute_histogram(data, labels):
histogram = itemfreq(sorted(data))
for label in labels:
if label not in histogram[:,0]:
histogram = np.vstack((histogram,
np.array([[label, 0]], dtype=object)))
histogram = histogram[histogram[:,0].argsort()]
return histogram
# compute histograms
labels = np.unique(x[:,1])
freq_data = {}
freq_data['Raw data'] = compute_histogram(x[:,1], labels)
# freq_data['Drop missing'] = compute_histogram(data_drop[:,1], labels)
freq_data['Random replace'] = compute_histogram(data_replace[:,1], labels)
freq_data['Summary'] = compute_histogram(data_mode[:,1], labels)
freq_data['Random forests'] = compute_histogram(data_rf[:,1], labels)
freq_data['SVM'] = compute_histogram(data_svm[:,1], labels)
freq_data['Logistic regression'] = compute_histogram(data_logistic[:,1], labels)
freq_data['PCA'] = compute_histogram(data_facanal[:,1], labels)
freq_data['KNN'] = compute_histogram(data_knn[:,1], labels)
# plot histograms given feature with missing data
n_methods = len(freq_data.keys())
bins = np.arange(len(labels))
width = .25
fig, ax = plt.subplots(figsize=(12,8))
for i in xrange(n_methods):
key = sorted(freq_data.keys())[i]
offset = i*2*width/float(n_methods)
ax.bar(bins+offset, freq_data[key][:,1].astype(int), width, label=key,
color=plt.cm.hot(i/float(n_methods)), align='center')
ax.set_xlabel('Work class categories', size=15)
ax.set_ylabel('Count', size=15)
ax.set_title('Adult training set (N= 32,561)', size=15, fontweight='bold')
ax.set_xticks(bins + width)
ax.set_xticklabels(labels, rotation=45)
plt.legend(loc=2)
plt.tight_layout()
plt.show()
|
11491834
|
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from utils.utils import decode_mu_law
from models.layers import (
UpsampleNetwork
)
from models.subscale import (
CondintionNetwork,
Subscaler
)
from hparams import create_hparams
HPARAMS = create_hparams()
class WaveRNN(nn.Module):
def __init__(self, hparams, debug=False):
super().__init__()
self.n_classes = 2 ** hparams.bits
self.rnn_dims = hparams.rnn_dims
self.fc_dims = hparams.fc_dims
self.pad = hparams.pad
self.upsample_factors = hparams.upsample_factors
self.feat_dims = hparams.feat_dims
self.compute_dims = hparams.compute_dims
self.res_out_dims = hparams.res_out_dims
self.res_blocks = hparams.res_blocks
self.hop_length = hparams.hop_length
self.debug = debug
self.aux_dims = self.res_out_dims // 4
self.lut_x = nn.Embedding(
self.n_classes,
self.fc_dims,
max_norm=1.0
)
self.subscale = Subscaler(hparams)
self.conditioning_network = CondintionNetwork(hparams.condnet_n_layers, self.subscale.context_len,
hparams.condnet_channels, hparams.condnet_kernelsize,
hparams.condnet_drouput)
self.upsample = UpsampleNetwork(self.feat_dims, self.upsample_factors, self.compute_dims,
self.res_blocks, self.res_out_dims, self.pad)
self.fc0 = nn.Linear(self.feat_dims + self.aux_dims + hparams.condnet_channels, self.rnn_dims)
self.rnn1 = nn.GRU(self.rnn_dims, self.rnn_dims, batch_first=True)
self.rnn2 = nn.GRU(self.rnn_dims + self.aux_dims, self.rnn_dims, batch_first=True)
self.fc1 = nn.Linear(self.rnn_dims + self.aux_dims, self.fc_dims)
self.fc2 = nn.Linear(self.fc_dims + self.aux_dims, self.fc_dims)
self.fc3 = nn.Linear(self.fc_dims, self.n_classes)
def int2float(self, x):
x = 2 * x.float() / (self.n_classes - 1.) - 1.
return x
def forward(self, x, mel):
"""Method used during training. Given the chunk of a melspectrogram, and the corresponding
chunk of a waveform, this decodes autoregressively, i.e. predicts softmax probabilities for
the output waveform. Note: T_mel * hop_length = T_wav.
args:
x ([B, T_wav] torch.LongTensor): chunk of a quantized waveform (corresponds to mel)
mel ([B, num_bins, T_mel] torch.FloatTensor): chunk of a mel (corresponds to x)
returns:
soft ([B, T_wav, 2 ** bits] torch.FloatTensor): predicted softmax probabilities.
"""
mel, aux = self.upsample(mel)
mel, aux = self.subscale.pad(mel), self.subscale.pad(aux)
aux_idx = [self.aux_dims * i for i in range(5)]
a1 = aux[:, :, aux_idx[0]:aux_idx[1]]
a2 = aux[:, :, aux_idx[1]:aux_idx[2]]
a3 = aux[:, :, aux_idx[2]:aux_idx[3]]
a4 = aux[:, :, aux_idx[3]:aux_idx[4]]
x = self.int2float(x)
context = self.subscale.extract_context_from_train_batch(x)
# B, T, context_len -> B, context_len, T
context = context.permute(0, 2, 1)
conditioned = self.conditioning_network(context)
# B, n_channels, T -> B, T, n_channels
conditioned = conditioned.permute(0, 2, 1)
x = torch.cat([conditioned, mel, a1], dim=2)
x = self.subscale.stack_substensors(x)
a2 = self.subscale.stack_substensors(a2)
a3 = self.subscale.stack_substensors(a3)
a4 = self.subscale.stack_substensors(a4)
x = self.fc0(x)
res = x
self.rnn1.flatten_parameters()
x, _ = self.rnn1(x)
x = x + res
res = x
x = torch.cat([x, a2], dim=2)
self.rnn2.flatten_parameters()
x, _ = self.rnn2(x)
x = x + res
x = torch.cat([x, a3], dim=2)
x = F.relu(self.fc1(x))
x = torch.cat([x, a4], dim=2)
x = F.relu(self.fc2(x))
x = self.fc3(x)
x = self.subscale.flatten_subtensors(x)
soft = F.log_softmax(x, dim=-1)
if self.debug:
return soft, context, x
return soft
def train_mode_generate(self, x, mel):
if self.debug:
logprobs, context, x = self.forward(x, mel)
else:
logprobs = self.forward(x, mel)
probs = torch.exp(logprobs)
output = self.sampler(probs)
output = self.transform(output)
if self.debug:
return output, context, x
return output
def make_context_batch(self, x, pos_dict):
assert(list(pos_dict.keys()) == list(range(min(pos_dict), max(pos_dict) + 1)))
context_batch = []
for subt in range(min(pos_dict), max(pos_dict) + 1):
pos = pos_dict[subt]
context = self.subscale.extract_context(x, pos)
context_batch.append(context)
context_batch = torch.stack(context_batch)
context_batch = context_batch.permute(0, 2, 1)
return context_batch
# pylint: disable=R0913
def decode(self, context, m_t, a1_t, a2_t, a3_t, a4_t, h1, h2, rnn_cell1, rnn_cell2):
"""Helper function for inference mode when the ground truth waveform is not known.
"""
conditioned = self.conditioning_network(context)
# B, n_channels, 1 -> B, n_channels
conditioned = conditioned.squeeze(2)
x = torch.cat([conditioned, m_t, a1_t], dim=1)
x = self.fc0(x)
h1 = rnn_cell1(x, h1)
x = x + h1
inp = torch.cat([x, a2_t], dim=1)
h2 = rnn_cell2(inp, h2)
x = x + h2
x = torch.cat([x, a3_t], dim=1)
x = F.relu(self.fc1(x))
x = torch.cat([x, a4_t], dim=1)
x = F.relu(self.fc2(x))
x = self.fc3(x)
posterior = F.softmax(1.0 * x, dim=1)
if self.debug:
return (posterior, h1, h2), context, x
return posterior, h1, h2
# pylint: disable=R0913
def transform(self, output):
output = decode_mu_law(output.float().cpu(), self.n_classes)
output = output.cpu().numpy()
return output
def sampler(self, posterior):
distrib = torch.distributions.Categorical(posterior)
x = distrib.sample()
x = self.int2float(x)
return x
def inference(self, mel, use_tqdm=True, gt=None):
"""Given a melspectrogram of arbitrary length, this function generates the corresponding
predicted waveform. Note that T_wav = T_mel * hop_length
args:
mel ([B, num_bins, T_mel] torch.FloatTensor): melspectrogram.
use_tqdm (bool): flag to use tqdm or not. Useful to reduce logging.
returns:
outputs ([B, T_wav] torch.FloatTensor): predicted waveform
"""
rnn_cell1 = self.get_gru_cell(self.rnn1)
rnn_cell2 = self.get_gru_cell(self.rnn2)
with torch.no_grad():
aux_idx = [self.aux_dims * i for i in range(5)]
upsampled_mel, aux = self.upsample(mel[:, :, :])
a1 = aux[:, :, aux_idx[0]:aux_idx[1]]
a2 = aux[:, :, aux_idx[1]:aux_idx[2]]
a3 = aux[:, :, aux_idx[2]:aux_idx[3]]
a4 = aux[:, :, aux_idx[3]:aux_idx[4]]
output = torch.zeros(upsampled_mel.shape[0], upsampled_mel.shape[1])
if gt is not None:
gt = gt[:, :output.shape[1]]
gt = self.int2float(gt)
tester_src = torch.zeros_like(output).long()
tester_tgt = torch.arange(upsampled_mel.shape[1]).repeat(upsampled_mel.shape[0], 1)
pos_cont = {}
xs = [0] * output.shape[1]
for subt in tqdm(range(self.subscale.batch_factor)):
h1 = mel.new(mel.shape[0], self.rnn_dims).zero_()
h2 = mel.new(mel.shape[0], self.rnn_dims).zero_()
for j in tqdm(range(upsampled_mel.shape[1] // self.subscale.batch_factor)):
pos = self.subscale.inv_map_pos(subt, j)
m_t = upsampled_mel[:, pos, :]
a1_t = a1[:, pos, :]
a2_t = a2[:, pos, :]
a3_t = a3[:, pos, :]
a4_t = a4[:, pos, :]
context = gt if gt is not None else output
context = self.subscale.extract_context(context, pos)
context = context.unsqueeze(1)
# B, 1, context_len -> B, context_len, 1
context = context.permute(0, 2, 1)
if self.debug:
(posterior, h1, h2), context, x = self.decode(context, m_t, a1_t, a2_t, a3_t, a4_t, h1, h2,
rnn_cell1, rnn_cell2)
pos_cont[pos] = context
xs[pos] = x
else:
posterior, h1, h2 = self.decode(context, m_t, a1_t, a2_t, a3_t, a4_t, h1, h2, rnn_cell1,
rnn_cell2)
sample = self.sampler(posterior)
output[:, pos] = sample
tester_src[:, pos] = pos
assert(torch.eq(tester_src, tester_tgt).all())
output = self.transform(output)
if self.debug:
return output, pos_cont, torch.stack(xs, 1)
return output
def subscale_inference(self, mel, use_tqdm=True, gt=None):
"""Given a melspectrogram of arbitrary length, this function generates the corresponding
predicted waveform. Note that T_wav = T_mel * hop_length
args:
mel ([B, num_bins, T_mel] torch.FloatTensor): melspectrogram.
use_tqdm (bool): flag to use tqdm or not. Useful to reduce logging.
returns:
outputs ([B, T_wav] torch.FloatTensor): predicted waveform
"""
rnn_cell1 = self.get_gru_cell(self.rnn1)
rnn_cell2 = self.get_gru_cell(self.rnn2)
with torch.no_grad():
aux_idx = [self.aux_dims * i for i in range(5)]
upsampled_mel, aux = self.upsample(mel[:, :, :])
a1 = aux[:, :, aux_idx[0]:aux_idx[1]]
a2 = aux[:, :, aux_idx[1]:aux_idx[2]]
a3 = aux[:, :, aux_idx[2]:aux_idx[3]]
a4 = aux[:, :, aux_idx[3]:aux_idx[4]]
output = torch.zeros(upsampled_mel.shape[0], upsampled_mel.shape[1])
if gt is not None:
gt = gt[:, :output.shape[1]]
gt = self.int2float(gt)
tester_src = torch.zeros_like(output).long()
tester_tgt = torch.arange(upsampled_mel.shape[1]).repeat(upsampled_mel.shape[0], 1)
pos_cont = {}
xs = [0] * output.shape[1]
hidden_states = {
subt: [mel.new(mel.shape[0], self.rnn_dims).zero_(), mel.new(mel.shape[0], self.rnn_dims).zero_()]
for subt in range(self.subscale.batch_factor)
}
n_steps_subt_0 = upsampled_mel.shape[1] // self.subscale.batch_factor
n_steps_remaining = (self.subscale.horizon + 1) * (self.subscale.batch_factor - 1)
n_steps = n_steps_subt_0 + n_steps_remaining
for j in tqdm(range(n_steps)):
m_t, a1_t, a2_t, a3_t, a4_t = [], [], [], [], []
batch_size_upper_lim = min(j // (self.subscale.horizon + 1) + 1, self.subscale.batch_factor)
batch_size_lower_lim = max(((j - n_steps_subt_0) // (self.subscale.horizon + 1) + 1), 0)
# batch_size = batch_size_upper_lim - batch_size_lower_lim
pos_dict = {}
for subt in range(batch_size_lower_lim, batch_size_upper_lim):
shifted_j = j - (self.subscale.horizon + 1) * subt
pos = self.subscale.inv_map_pos(subt, shifted_j)
pos_dict[subt] = pos
m_t.append(upsampled_mel[:, pos, :])
a1_t.append(a1[:, pos, :])
a2_t.append(a2[:, pos, :])
a3_t.append(a3[:, pos, :])
a4_t.append(a4[:, pos, :])
m_t, a1_t, a2_t, a3_t, a4_t = tuple(map(lambda x: torch.stack(x).squeeze(1), [m_t, a1_t, a2_t,
a3_t, a4_t]))
h1 = torch.stack([hidden_states[subt][0]
for subt in range(batch_size_lower_lim, batch_size_upper_lim)]).squeeze(1)
h2 = torch.stack([hidden_states[subt][1]
for subt in range(batch_size_lower_lim, batch_size_upper_lim)]).squeeze(1)
context_source = gt if gt is not None else output
context = self.make_context_batch(context_source, pos_dict)
if self.debug:
(posterior, h1, h2), context, x = self.decode(context, m_t, a1_t, a2_t, a3_t, a4_t, h1, h2,
rnn_cell1, rnn_cell2)
for i, (subt, pos) in enumerate(pos_dict.items()):
pos_cont[pos] = context[i].unsqueeze(0)
xs[pos] = x[i].unsqueeze(0)
else:
posterior, h1, h2 = self.decode(context, m_t, a1_t, a2_t, a3_t, a4_t, h1, h2, rnn_cell1, rnn_cell2)
for i, subt in enumerate(range(batch_size_lower_lim, batch_size_upper_lim)):
hidden_states[subt][0] = h1[i].unsqueeze(0)
hidden_states[subt][1] = h2[i].unsqueeze(0)
sample = self.sampler(posterior)
for i, (subt, pos) in enumerate(pos_dict.items()):
output[:, pos] = sample[i]
tester_src[:, pos] = pos
assert(torch.eq(tester_src, tester_tgt).all())
output = self.transform(output)
if self.debug:
return output, pos_cont, torch.stack(xs, 1)
return output
@staticmethod
def get_gru_cell(gru):
"""Given a GRU object, this function returns the corresponding GRU cell with the
correct weights initialised from the GRU object.
args:
gru (nn.GRU): GRU from which to get the cell.
returns:
gru_cell (nn.GRUCell): GRU cell where the cell has been initialised from gru.
"""
gru_cell = nn.GRUCell(gru.input_size, gru.hidden_size)
gru_cell.weight_hh.data = gru.weight_hh_l0.data
gru_cell.weight_ih.data = gru.weight_ih_l0.data
gru_cell.bias_hh.data = gru.bias_hh_l0.data
gru_cell.bias_ih.data = gru.bias_ih_l0.data
return gru_cell
def load_max_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
logging.warning("This weight is not in model: {}".format(name))
continue
try:
own_state[name].copy_(param)
except RuntimeError:
logging.warning(name)
logging.warning("Model size: {}".format(own_state[name].size()))
logging.warning("Checkpoint size: {}".format(param.size()))
|
11491841
|
import torch as th
from typing import Optional, Tuple
from tpp.utils import batch as bu
from tpp.utils.events import Events
from tpp.utils.utils import smallest_positive
from tpp.utils.index import take_2_by_2
from tpp.utils.history_bst import get_prev_times as get_prev_times_bst
def _get_rank(x: th.Tensor) -> int:
return len(x.shape)
def expand_to_rank(x: th.Tensor, rank: int, dim: int = -1) -> th.Tensor:
"""Expand a tensor to a desired rank.
Args:
x: The tensor to expand.
rank: The target rank.
dim: The dim to expand along. Defaults to `-1`.
Returns:
A tensor expanded to the given rank.
"""
x_rank = _get_rank(x)
if x_rank > rank:
raise ValueError(
"Rank of `x` ({}) greater than desired rank ({})".format(
x_rank, rank))
for _ in range(rank - x_rank):
x = x.unsqueeze(dim)
return x
def build_histories(
query: th.Tensor,
events: Events,
history_size: Optional[int] = 1) -> Tuple[th.Tensor, th.Tensor]:
"""Get the set of times corresponding to the 'history' of a query time of
fixed size.
Args:
query: [B,T] The times to create histories for.
events: [B,L] Times and labels of events to create histories from.
history_size: The size of each history. Defaults to 1.
Returns:
history (th.Tensor): [B,T,H] The history for each query time.
mask (th.Tensor): [B,T] The mask corresponding to whether a
particular query can be used or not based on the required size of
history.
"""
batch_size, max_queries = query.shape
batch_size_s, max_seq_len = events.times.shape
if batch_size_s != batch_size:
raise ValueError(
"The batch size for `query_times` "
"({}) does not match the batch size for `sequences` "
"({}).".format(batch_size, batch_size_s))
if history_size > max_seq_len:
raise ValueError(
"The chosen value for `history_size` "
"({}) is greater than the size of the largest sequence "
"({}).".format(history_size, max_seq_len))
((prev_times, prev_times_idxs),
is_event, pos_delta_mask) = get_prev_times_bst(
query=query, events=events) # ([B,T], [B,T]), [B,T], [B,T]
relative_history_idxs = th.arange(
start=1 - history_size, end=1, device=events.times.device)
batch_idxs_shift = th.arange(
start=0, end=batch_size_s, device=events.times.device) * max_seq_len
batch_idxs_shift = batch_idxs_shift.reshape([batch_size, 1, 1])
history_seq_idxs = prev_times_idxs.reshape([batch_size, max_queries, 1])
history_seq_idxs = history_seq_idxs + relative_history_idxs
batch_history_seq_idxs = history_seq_idxs + batch_idxs_shift
batch_history_seq_idxs = batch_history_seq_idxs.long() # [B,T,H]
history = th.take(events.times, batch_history_seq_idxs)
history_idxs_positive = th.prod(history_seq_idxs >= 0, dim=-1) # [B,T]
history_idxs_positive = history_idxs_positive.type(pos_delta_mask.dtype)
history_mask = pos_delta_mask * history_idxs_positive
history_mask = history_mask.type(history.dtype) # [B,T]
return history, history_mask
def get_prev_times(
query: th.Tensor,
events: Events,
allow_window: Optional[bool] = False
) -> Tuple[Tuple[th.Tensor, th.Tensor], th.Tensor, th.Tensor]:
"""For each query, get the event time that directly precedes it. If no
events precedes it (but the window start does), return the window start.
Otherwise, mask the value.
Args:
query: [B,T] Sequences of query times to evaluate the intensity
function.
events: [B,L] Times and labels of events.
allow_window: If `True`, a previous time can be the window boundary.
Defaults to `False`.
Returns:
`times` is a tuple of tensor of values [B,T] and indices, [B,T] of the
largest time value in the sequence that is strictly smaller than
the query time value, or the window. the index only event indexes
into the events. If the window is returned, it should be dealt with
explicitly at encoding/decoding time.
`is_event` is a tensor [B,T] that indicates whether the time
corresponds to an event or not (a 1 indicates an event and a 0
indicates a window boundary).
`mask` is a tensor [B,T] that indicates whether the time difference to
those times what positive or not.
"""
event_times = events.get_times(prepend_window=allow_window) # [B,L+1]
batch_size, max_seq_len = event_times.shape
time_diffs = bu.batchwise_difference(query, event_times) # [B,T,L+1]
event_mask = events.get_mask(prepend_window=allow_window) # [B,L+1]
event_mask = event_mask.reshape([batch_size, 1, max_seq_len]) # [B,1,L+1]
time_diffs = time_diffs * event_mask # [B,T,L+1]
smallest_time_diffs, mask = smallest_positive(time_diffs, dim=-1) # [B,T]
prev_times_idxs = smallest_time_diffs.indices # [B,T]
prev_times = take_2_by_2(event_times, index=prev_times_idxs) # [B,T]
if allow_window:
# If the first event shares a time with the window boundary, that the
# index returned is the index of the event, rather than the window
# boundary.
idx_is_window = (prev_times_idxs == 0).type(
prev_times_idxs.dtype) # [B,T]
do_idx_shift = events.first_event_on_window.type(
idx_is_window.dtype) # [B]
idx_shift = idx_is_window * do_idx_shift.reshape(-1, 1)
prev_times_idxs = prev_times_idxs + idx_shift
# Check the indexes in case one of the window indexes became an event.
is_event = (prev_times_idxs != 0).type(mask.dtype) # [B,T]
else:
is_event = th.ones_like(prev_times_idxs) # [B,T]
query_above_window = query > events.window_start.reshape(-1, 1)
query_below_window = query <= events.window_end.reshape(-1, 1)
query_within_window = query_above_window & query_below_window
query_within_window = query_within_window.type(mask.dtype)
mask = mask * query_within_window
return (prev_times, prev_times_idxs), is_event, mask # [B,T]
|
11491859
|
import os
import pathlib
from pytest_notebook.nb_regression import NBRegressionFixture
EXEC_CWD = str(pathlib.Path(__file__).resolve().parent.parent)
fixture = NBRegressionFixture(
exec_timeout=120,
exec_cwd=EXEC_CWD,
diff_color_words=True,
diff_ignore=(
"/cells/*/outputs/*/data/text/plain",
"/cells/*/outputs/*/data/image/svg+xml",
"/cells/*/outputs/*/text",
"/cells/*/outputs/",
"/cells/*/outputs/*/data/image/png",
"/cells/*/outputs/*/data/text/html",
"/cells/*/outputs/*/output/data/"
"/cells/*/outputs/*/data/application/vnd.plotly.v1+json",
"/cells/*/outputs/*/execution_count",
"/cells/*/execution_count",
"/cells/1/outputs/",
"/cells/*/execution_count/",
"/cells/*/metadata/",
"/cells/*/outputs/metadata/",
"/metadata/",
),
)
def test_cluster_analysis_notebook():
notebook = os.path.join(EXEC_CWD, "examples", "Cluster_Analysis.ipynb")
fixture.check(notebook, raise_errors=True)
def test_data_summary_notebook():
notebook = os.path.join(EXEC_CWD, "examples", "Data_Summary.ipynb")
fixture.check(notebook, raise_errors=True)
def test_text_preprocessing_notebook():
notebook = os.path.join(EXEC_CWD, "examples", "Text_Preprocessing.ipynb")
fixture.check(notebook, raise_errors=True)
def test_data_heatmap_notebook():
notebook = os.path.join(EXEC_CWD, "examples", "Data_Heatmap.ipynb")
fixture.check(notebook, raise_errors=True)
def test_feature_importance_notebook():
notebook = os.path.join(EXEC_CWD, "examples", "Feature_Importance.ipynb")
fixture.check(notebook, raise_errors=True)
def test_correlation_matrix_notebook():
notebook = os.path.join(EXEC_CWD, "examples", "Correlation_Matrix.ipynb")
fixture.check(notebook, raise_errors=True)
def test_scatter_plots_notebook():
notebook = os.path.join(EXEC_CWD, "examples", "Scatter_Plots.ipynb")
fixture.check(notebook, raise_errors=True)
def test_tutorial_notebook():
notebook = os.path.join(EXEC_CWD, "examples", "Quick_Start.ipynb")
fixture.check(notebook, raise_errors=True)
def test_topic_modeling_notebook():
notebook = os.path.join(EXEC_CWD, "examples", "Topic_Modeling.ipynb")
fixture.check(notebook, raise_errors=True)
|
11491983
|
from toee import *
from utilities import *
from py00439script_daemon import *
from combat_standard_routines import *
def san_start_combat( attachee, triggerer ):
if (attachee.leader_get() != OBJ_HANDLE_NULL and not npc_get(attachee,2)):
leader = attachee.leader_get()
if (group_pc_percent_hp( attachee, leader ) <= 40):
attachee.obj_set_int(obj_f_critter_strategy, 462)
elif (game.party_npc_size() + game.party_pc_size() == 8):
for pp in range(0,8):
if (game.party[pp] != OBJ_HANDLE_NULL):
if (obj_percent_hp(game.party[pp]) <= 50 and game.party[pp].stat_level_get(stat_hp_current) >= -9):
game.global_flags[250 + pp] = 1
game.global_flags[258] = 1
if (game.global_flags[250] == 1):
if (adjacent(attachee, game.party[0])):
game.global_flags[259] = 1
if (game.global_flags[251] == 1):
if (adjacent(attachee, game.party[1])):
game.global_flags[259] = 1
if (game.global_flags[252] == 1):
if (adjacent(attachee, game.party[2])):
game.global_flags[259] = 1
if (game.global_flags[253] == 1):
if (adjacent(attachee, game.party[3])):
game.global_flags[259] = 1
if (game.global_flags[254] == 1):
if (adjacent(attachee, game.party[4])):
game.global_flags[259] = 1
if (game.global_flags[255] == 1):
if (adjacent(attachee, game.party[5])):
game.global_flags[259] = 1
if (game.global_flags[256] == 1):
if (adjacent(attachee, game.party[6])):
game.global_flags[259] = 1
if (game.global_flags[257] == 1):
if (adjacent(attachee, game.party[7])):
game.global_flags[259] = 1
if (game.global_flags[258] == 1):
if (game.global_flags[259] == 1):
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
else:
attachee.obj_set_int(obj_f_critter_strategy, 463)
else:
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
elif (game.party_npc_size() + game.party_pc_size() == 7):
if (obj_percent_hp(game.party[0]) <= 50 and game.party[0].stat_level_get(stat_hp_current) >= -9):
game.global_flags[250] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[1]) <= 50 and game.party[1].stat_level_get(stat_hp_current) >= -9):
game.global_flags[251] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[2]) <= 50 and game.party[2].stat_level_get(stat_hp_current) >= -9):
game.global_flags[252] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[3]) <= 50 and game.party[3].stat_level_get(stat_hp_current) >= -9):
game.global_flags[253] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[4]) <= 50 and game.party[4].stat_level_get(stat_hp_current) >= -9):
game.global_flags[254] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[5]) <= 50 and game.party[5].stat_level_get(stat_hp_current) >= -9):
game.global_flags[255] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[6]) <= 50 and game.party[6].stat_level_get(stat_hp_current) >= -9):
game.global_flags[256] = 1
game.global_flags[258] = 1
if (game.global_flags[250] == 1):
if (adjacent(attachee, game.party[0])):
game.global_flags[259] = 1
if (game.global_flags[251] == 1):
if (adjacent(attachee, game.party[1])):
game.global_flags[259] = 1
if (game.global_flags[252] == 1):
if (adjacent(attachee, game.party[2])):
game.global_flags[259] = 1
if (game.global_flags[253] == 1):
if (adjacent(attachee, game.party[3])):
game.global_flags[259] = 1
if (game.global_flags[254] == 1):
if (adjacent(attachee, game.party[4])):
game.global_flags[259] = 1
if (game.global_flags[255] == 1):
if (adjacent(attachee, game.party[5])):
game.global_flags[259] = 1
if (game.global_flags[256] == 1):
if (adjacent(attachee, game.party[6])):
game.global_flags[259] = 1
if (game.global_flags[258] == 1):
if (game.global_flags[259] == 1):
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
else:
attachee.obj_set_int(obj_f_critter_strategy, 463)
else:
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
elif (game.party_npc_size() + game.party_pc_size() == 6):
if (obj_percent_hp(game.party[0]) <= 50 and game.party[0].stat_level_get(stat_hp_current) >= -9):
game.global_flags[250] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[1]) <= 50 and game.party[1].stat_level_get(stat_hp_current) >= -9):
game.global_flags[251] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[2]) <= 50 and game.party[2].stat_level_get(stat_hp_current) >= -9):
game.global_flags[252] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[3]) <= 50 and game.party[3].stat_level_get(stat_hp_current) >= -9):
game.global_flags[253] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[4]) <= 50 and game.party[4].stat_level_get(stat_hp_current) >= -9):
game.global_flags[254] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[5]) <= 50 and game.party[5].stat_level_get(stat_hp_current) >= -9):
game.global_flags[255] = 1
game.global_flags[258] = 1
if (game.global_flags[250] == 1):
if (adjacent(attachee, game.party[0])):
game.global_flags[259] = 1
if (game.global_flags[251] == 1):
if (adjacent(attachee, game.party[1])):
game.global_flags[259] = 1
if (game.global_flags[252] == 1):
if (adjacent(attachee, game.party[2])):
game.global_flags[259] = 1
if (game.global_flags[253] == 1):
if (adjacent(attachee, game.party[3])):
game.global_flags[259] = 1
if (game.global_flags[254] == 1):
if (adjacent(attachee, game.party[4])):
game.global_flags[259] = 1
if (game.global_flags[255] == 1):
if (adjacent(attachee, game.party[5])):
game.global_flags[259] = 1
if (game.global_flags[258] == 1):
if (game.global_flags[259] == 1):
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
else:
attachee.obj_set_int(obj_f_critter_strategy, 463)
else:
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
elif (game.party_npc_size() + game.party_pc_size() == 5):
if (obj_percent_hp(game.party[0]) <= 50 and game.party[0].stat_level_get(stat_hp_current) >= -9):
game.global_flags[250] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[1]) <= 50 and game.party[1].stat_level_get(stat_hp_current) >= -9):
game.global_flags[251] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[2]) <= 50 and game.party[2].stat_level_get(stat_hp_current) >= -9):
game.global_flags[252] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[3]) <= 50 and game.party[3].stat_level_get(stat_hp_current) >= -9):
game.global_flags[253] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[4]) <= 50 and game.party[4].stat_level_get(stat_hp_current) >= -9):
game.global_flags[254] = 1
game.global_flags[258] = 1
if (game.global_flags[250] == 1):
if (adjacent(attachee, game.party[0])):
game.global_flags[259] = 1
if (game.global_flags[251] == 1):
if (adjacent(attachee, game.party[1])):
game.global_flags[259] = 1
if (game.global_flags[252] == 1):
if (adjacent(attachee, game.party[2])):
game.global_flags[259] = 1
if (game.global_flags[253] == 1):
if (adjacent(attachee, game.party[3])):
game.global_flags[259] = 1
if (game.global_flags[254] == 1):
if (adjacent(attachee, game.party[4])):
game.global_flags[259] = 1
if (game.global_flags[258] == 1):
if (game.global_flags[259] == 1):
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
else:
attachee.obj_set_int(obj_f_critter_strategy, 463)
else:
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
elif (game.party_npc_size() + game.party_pc_size() == 4):
if (obj_percent_hp(game.party[0]) <= 50 and game.party[0].stat_level_get(stat_hp_current) >= -9):
game.global_flags[250] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[1]) <= 50 and game.party[1].stat_level_get(stat_hp_current) >= -9):
game.global_flags[251] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[2]) <= 50 and game.party[2].stat_level_get(stat_hp_current) >= -9):
game.global_flags[252] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[3]) <= 50 and game.party[3].stat_level_get(stat_hp_current) >= -9):
game.global_flags[253] = 1
game.global_flags[258] = 1
if (game.global_flags[250] == 1):
if (adjacent(attachee, game.party[0])):
game.global_flags[259] = 1
if (game.global_flags[251] == 1):
if (adjacent(attachee, game.party[1])):
game.global_flags[259] = 1
if (game.global_flags[252] == 1):
if (adjacent(attachee, game.party[2])):
game.global_flags[259] = 1
if (game.global_flags[253] == 1):
if (adjacent(attachee, game.party[3])):
game.global_flags[259] = 1
if (game.global_flags[258] == 1):
if (game.global_flags[259] == 1):
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
else:
attachee.obj_set_int(obj_f_critter_strategy, 463)
else:
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
elif (game.party_npc_size() + game.party_pc_size() == 3):
if (obj_percent_hp(game.party[0]) <= 50 and game.party[0].stat_level_get(stat_hp_current) >= -9):
game.global_flags[250] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[1]) <= 50 and game.party[1].stat_level_get(stat_hp_current) >= -9):
game.global_flags[251] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[2]) <= 50 and game.party[2].stat_level_get(stat_hp_current) >= -9):
game.global_flags[252] = 1
game.global_flags[258] = 1
if (game.global_flags[250] == 1):
if (adjacent(attachee, game.party[0])):
game.global_flags[259] = 1
if (game.global_flags[251] == 1):
if (adjacent(attachee, game.party[1])):
game.global_flags[259] = 1
if (game.global_flags[252] == 1):
if (adjacent(attachee, game.party[2])):
game.global_flags[259] = 1
if (game.global_flags[258] == 1):
if (game.global_flags[259] == 1):
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
else:
attachee.obj_set_int(obj_f_critter_strategy, 463)
else:
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
elif (game.party_npc_size() + game.party_pc_size() == 2):
if (obj_percent_hp(game.party[0]) <= 50 and game.party[0].stat_level_get(stat_hp_current) >= -9):
game.global_flags[250] = 1
game.global_flags[258] = 1
if (obj_percent_hp(game.party[1]) <= 50 and game.party[1].stat_level_get(stat_hp_current) >= -9):
game.global_flags[251] = 1
game.global_flags[258] = 1
if (game.global_flags[250] == 1):
if (adjacent(attachee, game.party[0])):
game.global_flags[259] = 1
if (game.global_flags[251] == 1):
if (adjacent(attachee, game.party[1])):
game.global_flags[259] = 1
if (game.global_flags[258] == 1):
if (game.global_flags[259] == 1):
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
else:
attachee.obj_set_int(obj_f_critter_strategy, 463)
else:
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
elif (game.party_pc_size() == 1):
if (obj_percent_hp(game.party[0]) <= 50 and game.party[0].stat_level_get(stat_hp_current) >= -9):
game.global_flags[250] = 1
game.global_flags[258] = 1
if (game.global_flags[250] == 1):
if (adjacent(attachee, game.party[0])):
game.global_flags[259] = 1
if (game.global_flags[258] == 1):
if (game.global_flags[259] == 1):
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
else:
attachee.obj_set_int(obj_f_critter_strategy, 463)
else:
attachee.obj_set_int(obj_f_critter_strategy, 464)
if (triggerer.type == obj_t_npc and triggerer.leader_get() == OBJ_HANDLE_NULL):
attachee.turn_towards(triggerer)
else:
for pc in game.party:
if ( pc.has_feat(feat_animal_companion) ):
attachee.turn_towards(pc)
else:
attachee.turn_towards(game.party[0])
game.global_flags[250] = 0
game.global_flags[251] = 0
game.global_flags[252] = 0
game.global_flags[253] = 0
game.global_flags[254] = 0
game.global_flags[255] = 0
game.global_flags[256] = 0
game.global_flags[257] = 0
game.global_flags[258] = 0
game.global_flags[259] = 0
return RUN_DEFAULT
def san_join( attachee, triggerer ):
if (npc_get(attachee,1)):
npc_set(attachee,2)
return RUN_DEFAULT
def san_spell_cast( attachee, triggerer, spell ):
if ( spell.spell == spell_charm_person_or_animal or spell.spell == spell_charm_monster ):
npc_set(attachee,1)
return RUN_DEFAULT
def not_adjacent( companion, target ):
if (companion.distance_to(target) >= 5):
return 1
return 0
def adjacent( companion, target ):
if (companion.distance_to(target) <= 5):
return 1
return 0
|
11491984
|
import os
import textwrap
class CodeGenerator():
def __init__(self):
self.layers = []
def addLayer(self, weights, biases, activation):
self.layers.append((weights, biases, activation))
def save(self, name):
lines = []
lines.append('// Auto generated model data')
lines.append('private void InitModel()')
lines.append('{')
numInputs = self.layers[0][0].shape[1]
lines.append('\tNumInputs = %d;' % numInputs)
numOutputs = [str(layer[0].shape[0]) for layer in self.layers]
lines.append('\tNumOutputs = {%s};' % ', '.join(numOutputs))
for i in range(len(self.layers)):
lines.append('\tLayer%d_W = %s;' % (i, self.convertToCSArray(self.layers[i][0])))
lines.append('\tLayer%d_B = %s;' % (i, self.convertToCSArray(self.layers[i][1])))
lines.append('}')
file = open(name, 'w')
for line in lines:
wrapLines = textwrap.wrap(line, width=120, tabsize=4, subsequent_indent='\t\t')
for fileLine in wrapLines:
file.write(fileLine + '\n')
file.close()
print('Saved generated code:',name)
def convertToCSArray(self, arr):
values = []
if len(arr.shape) == 1:
values = [str(x) for x in arr]
else:
for i in range(arr.shape[0]):
values.append(self.convertToCSArray(arr[i]))
return '{' + ', '.join(values) + '}'
|
11491987
|
import argparse
import subprocess
import time
def track(wait_time=60):
# In the following, a select set of parameters that are passed to ffmpeg is
# described:
# -vframes -> Number of frames to output.
# -q:v -> (alias: qscale) Quality of the output, 100 means as lossy as
# possible (resulting in less filesize).
# -vf scale=1280:-1 -> Rescale to 1280 width (and automatically keep aspect
# ratio). Uses ffmpeg filtering.
args = ("ffmpeg -f avfoundation -framerate 1 -i 1:0 -vframes 1 -q:v 100 "
"-vf scale=1280:-1")
args = args.split(" ")
while True:
time.sleep(wait_time)
filename = "output" + str(int(time.time())) + ".jpeg"
args_ = args.copy()
args_.append(filename)
p = subprocess.Popen(args_)
p.wait()
def watch():
pass # TODO
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'cmd',
nargs='?',
default='track',
help='Command to execute. Can be either track or watch.'
)
parser.add_argument(
'--waittime',
help='How many seconds to wait between each capture.',
default=60,
type=int
)
args = parser.parse_args()
if args.cmd == 'track':
track(args.waittime)
elif args.cmd == 'watch':
watch()
|
11491990
|
import collections
from setuptools import Extension
from buildtools import *
import torch
import os
# ~~~ libnitorch files
# Note that sources are identical between libnitorch_cpu and libnitorch_cuda.
# The same code is compiled in two different ways to generate native and cuda
# code. This trick allows to minimize code duplication.
# Finally, libnitorch links to both these sub-libraries and dispatches
# according to the Tensor's device type.
libnitorch_cpu_sources = ['pushpull_common.cpp']
libnitorch_cuda_sources = ['pushpull_common.cpp']
libnitorch_sources = ['pushpull.cpp']
ext_spatial_sources = ['spatial.cpp']
# That's a bit ugly. TODO: use config files?
libnitorch_cpu_headers = ['common.h',
'interpolation.h', 'interpolation_common.h',
'bounds.h', 'bounds_common.h']
libnitorch_cuda_headers = ['common.h',
'interpolation.h', 'interpolation_common.h',
'bounds.h', 'bounds_common.h']
libnitorch_headers = ['pushpull_common.h', 'interpolation.h', 'bounds.h']
ext_spatial_headers = ['pushpull.h', 'interpolation.h', 'bounds.h']
# TODO
# . There is still quite a lot to do in setup and buildtools in order to make
# things clean and work on multiple platforms.
# . I have to add abi checks and other smart tricks as in
# torch.utils.cpp_extension
MINIMUM_GCC_VERSION = (5, 0, 0)
MINIMUM_MSVC_VERSION = (19, 0, 24215)
# ~~~ helpers
# Most of the helpers are in build tools. The remaining helpers defined
# here are specific to the version of pytorch that we compile against.
def torch_version(astuple=True):
version = list(torch.__version__.split('+')[0].split('.'))
# strip alpha tags
for n, v in enumerate(version):
for x in 'abcdefghijklmnopqrstuvwxy':
if x in v:
v = v[:v.index(x)]
version[n] = v
version = tuple(int(v) for v in version)
if len(version) == 2:
version = version + (0,)
if not astuple:
version = version[0]*10000 + version[1]*100 + version[0]
return version
def torch_cuda_version(astuple=True):
version = torch.version.cuda.split('.')
version = tuple(int(v) for v in version)
if len(version) == 2:
version = version + (0,)
if not astuple:
version = version[0]*10000 + version[1]*100 + version[0]
return version
def torch_cudnn_version(astuple=True):
version = torch.backends.cudnn.version()
version = (version//1000, version//100 % 10, version % 100)
if not astuple:
version = version[0]*10000 + version[1]*100 + version[0]
return version
def torch_parallel_backend():
# check if set by user
valid_backends = ('AT_PARALLEL_OPENMP', 'AT_PARALLEL_NATIVE', 'AT_PARALLEL_NATIVE_TBB')
backend = os.environ.get('NI_PARALLEL_BACKEND', None)
if backend:
if backend not in valid_backends:
backend = None
return backend
# else, find backend used by pytorch
match = re.search('^ATen parallel backend: (?P<backend>.*)$',
torch._C._parallel_info(), re.MULTILINE)
if match is None:
return None
backend = match.group('backend')
if backend == 'OpenMP':
return 'AT_PARALLEL_OPENMP'
elif backend == 'native thread pool':
return 'AT_PARALLEL_NATIVE'
elif backend == 'native thread pool and TBB':
return 'AT_PARALLEL_NATIVE_TBB'
else:
return None
def torch_abi():
return str(int(torch._C._GLIBCXX_USE_CXX11_ABI))
def torch_omp_lib():
torch_dir = os.path.dirname(os.path.abspath(torch.__file__))
torch_library_dir = os.path.join(torch_dir, 'lib')
if is_darwin():
libtorch = os.path.join(torch_library_dir, 'libtorch.dylib')
linked_libs = os.popen('otool -L "{}"'.format(libtorch)).read()
if 'libiomp5' in linked_libs:
return 'iomp5'
elif 'libomp' in linked_libs:
return 'omp'
else:
return None
def torch_libraries(use_cuda=False):
version = torch_version(astuple=False)
if version < 10500:
libraries = ['c10', 'torch', 'torch_python']
if use_cuda:
libraries += ['cudart', 'c10_cuda']
else:
libraries = ['c10', 'torch_cpu', 'torch_python', 'torch']
if use_cuda:
libraries += ['cudart', 'c10_cuda', 'torch_cuda']
if not use_cuda and torch_parallel_backend() == 'AT_PARALLEL_OPENMP':
libraries += omp_libraries()
return libraries
def torch_library_dirs(use_cuda=False, use_cudnn=False):
torch_dir = os.path.dirname(os.path.abspath(torch.__file__))
torch_library_dir = os.path.join(torch_dir, 'lib')
library_dirs = [torch_library_dir]
if use_cuda:
if is_windows():
library_dirs += [os.path.join(cuda_home(), 'lib/x64')]
elif os.path.exists(os.path.join(cuda_home(), 'lib64')):
library_dirs += [os.path.join(cuda_home(), 'lib64')]
elif os.path.exists(os.path.join(cuda_home(), 'lib')):
library_dirs += [os.path.join(cuda_home(), 'lib')]
if use_cudnn:
if is_windows():
library_dirs += [os.path.join(cudnn_home(), 'lib/x64')]
elif os.path.exists(os.path.join(cudnn_home(), 'lib64')):
library_dirs += [os.path.join(cudnn_home(), 'lib64')]
elif os.path.exists(os.path.join(cudnn_home(), 'lib')):
library_dirs += [os.path.join(cudnn_home(), 'lib')]
if not use_cuda and torch_parallel_backend() == 'AT_PARALLEL_OPENMP':
library_dirs += omp_library_dirs()
return library_dirs
def torch_include_dirs(use_cuda=False, use_cudnn=False):
torch_dir = os.path.dirname(os.path.abspath(torch.__file__))
torch_include_dir = os.path.join(torch_dir, 'include')
include_dirs = [torch_include_dir,
os.path.join(torch_include_dir, 'torch', 'csrc', 'api', 'include'),
os.path.join(torch_include_dir, 'TH'),
os.path.join(torch_include_dir, 'THC')]
if use_cuda:
cuda_include_dir = os.path.join(cuda_home(), 'include')
if cuda_include_dir != '/usr/include':
include_dirs += [cuda_include_dir]
if use_cudnn:
include_dirs += [os.path.join(cudnn_home(), 'include')]
if not use_cuda and torch_parallel_backend() == 'AT_PARALLEL_OPENMP':
include_dirs += omp_include_dirs()
return include_dirs
def cuda_check():
local_version = cuda_version()
torch_version = torch_cuda_version()
ok = (local_version[0] == torch_version[0] and
local_version[1] == torch_version[1])
if not ok:
print('Your version of CUDA is v{}.{} while PyTorch was compiled with'
'CUDA v{}.{}. NiTorch cannot be compiled with CUDA.'.format(
local_version[0], local_version[1],
torch_version[0], torch_version[1]))
return ok
def cudnn_check():
local_version = cudnn_version()
torch_version = torch_cudnn_version()
ok = (local_version[0] == torch_version[0] and
local_version[1] == torch_version[1])
if not ok:
print('Your version of CuDNN is v{}.{} while PyTorch was compiled with'
'CuDNN v{}.{}. NiTorch cannot be compiled with CuDNN.'.format(
local_version[0], local_version[1],
torch_version[0], torch_version[1]))
return ok
def cuda_arch_flags():
"""
Determine CUDA arch flags to use.
For an arch, say "6.1", the added compile flag will be
``-gencode=arch=compute_61,code=sm_61``.
For an added "+PTX", an additional
``-gencode=arch=compute_xx,code=compute_xx`` is added.
See select_compute_arch.cmake for corresponding named and supported arches
when building with CMake.
"""
# Note: keep combined names ("arch1+arch2") above single names, otherwise
# string replacement may not do the right thing
named_arches = collections.OrderedDict([
('Kepler+Tesla', '3.7'),
('Kepler', '3.5+PTX'),
('Maxwell+Tegra', '5.3'),
('Maxwell', '5.0;5.2+PTX'),
('Pascal', '6.0;6.1+PTX'),
('Volta', '7.0+PTX'),
('Turing', '7.5+PTX'),
('Ampere', '8.0;8.6+PTX'),
])
supported_arches = ['3.5', '3.7', '5.0', '5.2', '5.3', '6.0', '6.1', '6.2',
'7.0', '7.2', '7.5', '8.0', '8.6']
valid_arch_strings = supported_arches + [s + "+PTX" for s in supported_arches]
# The default is sm_30 for CUDA 9.x and 10.x
# First check for an env var (same as used by the main setup.py)
# Can be one or more architectures, e.g. "6.1" or "3.5;5.2;6.0;6.1;7.0+PTX"
# See cmake/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake
arch_list = os.environ.get('TORCH_CUDA_ARCH_LIST', 'mine')
# If not given, look into libtorch_cuda
if not arch_list or arch_list.lower() == 'all':
cuobjdump = os.path.join(cuda_home(), 'bin', 'cuobjdump')
torchdir = os.path.dirname(os.path.abspath(torch.__file__))
libtorch = os.path.join(torchdir, 'lib')
if is_windows():
libtorch = os.path.join(libtorch, 'torch_cuda.lib')
else:
assert not is_darwin()
libtorch = os.path.join(libtorch, 'libtorch_cuda.so')
arch_list = os.popen(cuobjdump + " '" + libtorch + \
"' -lelf | awk -F. '{print $3}' | " \
"grep sm | sort -u").read().split('\n')
arch_list = [arch[3] + '.' + arch[4] for arch in arch_list if arch]
ptx_list = os.popen(cuobjdump + " '" + libtorch + \
"' -lptx | awk -F. '{print $3}' | " \
"grep sm | sort -u").read().split('\n')
ptx_list = [arch[3] + '.' + arch[4] for arch in ptx_list if arch]
arch_list = [arch + '+PTX' if arch in ptx_list else arch
for arch in arch_list]
elif arch_list == 'mine':
# this bit was in the torch extension util but I have replaced
# it with the bit above that looks into libtorch
capability = torch.cuda.get_device_capability()
arch_list = ['{}.{}'.format(capability[0], capability[1])]
else:
# Deal with lists that are ' ' separated (only deal with ';' after)
arch_list = arch_list.replace(' ', ';')
# Expand named arches
for named_arch, archval in named_arches.items():
arch_list = arch_list.replace(named_arch, archval)
arch_list = arch_list.split(';')
flags = []
for arch in arch_list:
if arch not in valid_arch_strings:
raise ValueError("Unknown CUDA arch ({}) or GPU not supported".format(arch))
else:
num = arch[0] + arch[2]
flags.append('-gencode=arch=compute_{},code=sm_{}'.format(num, num))
if arch.endswith('+PTX'):
flags.append('-gencode=arch=compute_{},code=compute_{}'.format(num, num))
return list(set(flags))
def torch_extension_flags(name):
return ['-DTORCH_EXTENSION_NAME={}'.format(name),
'-DTORCH_API_INCLUDE_EXTENSION_H']
def gcc_clang_flags():
flags = ['-fPIC', '-std=c++14']
if is_darwin() and darwin_cc_type() == 'apple_clang':
flags += ['-stdlib=libc++']
return flags
def msvc_flags():
return ['/MD', '/wd4819', '/EHsc']
def nvcc_flags():
return [
'-x=cu',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
'--expt-relaxed-constexpr']
def darwin_cc_type():
CC = os.environ.get('CC', 'clang')
CC_name = os.popen(CC + ' --version').read().split(' ')[0]
if CC_name == 'Apple':
CC_type = 'apple_clang'
elif CC_name == 'clang':
CC_type = 'other_clang'
else:
CC_type = 'other'
return CC_type
def find_omp_darwin():
"""Set the correct openmp flag on MacOS.
LLVM's clang has both GCC's and Intel's implementations of OpenMP
(gomp, omp5), which can be specifically used with the flag
'-fopenmp=libiomp5' or '-fopenmp=libgomp'. If no implementation is
specified, the behaviour depends on the version of LLVM
(it used to be gomp, now I believe it is iomp5). Note that iomp5 is
binary compatible with gomp (does that mean we can link with both
without conflicts?).
Apple's clang does not ship any OpenMP implementation, so the user
needs to install one herself, which we need to find and reference
properly.
Return (cflag, lflag, lib_name, lib_dir)."""
# TODO: LLVM's clang embeds OpenMP for version >= 3.8.0
# I need to add a special case for earlier versions.
# There are other ABI incompatibilities as well:
# https://openmp.llvm.org
# Various references that helped me in this maze:
# https://iscinumpy.gitlab.io/post/omp-on-high-sierra/
# https://stackoverflow.com/questions/37362414/
# https://reviews.llvm.org/D2841
def find_lib(names):
for name in names:
if not name:
continue
dirs = ['.', '/usr/', '/usr/local/', '/opt/local/', '/usr/local/opt/libomp/']
if os.environ.get('LD_LIBRARY_PATH'):
dirs += os.environ.get('LD_LIBRARY_PATH').split(':')
for dir in dirs:
if os.path.exists(os.path.join(dir, 'lib', 'lib' + name + '.dylib')):
return name, dir
return None, None
# First, check which clang we're dealing with
# (gcc, apple clang or external clang)
CC_type = darwin_cc_type()
# If not apple clang: openmp should be packaged with the compiler:
if CC_type != 'apple_clang':
flag = '-fopenmp'
if CC_type == 'other_clang':
if torch_omp_lib() == 'iomp5':
flag = '-fopenmp=libiomp5'
elif torch_omp_lib() == 'omp':
flag = '-fopenmp=libgomp'
return [flag], [flag], [], None
# Else, opnemp is no different than any other dependency
# First, check if omp/iomp5 has been installed (e.g., using homebrew)
lib_name, lib_dir = find_lib([torch_omp_lib(), 'iomp5', 'omp'])
if lib_name is None:
# OpenMP not found.
# Let's just hope that the compiler knows what it's doing.
return ['-fopenmp'], ['-fopenmp'], [], None
else:
return ['-Xpreprocessor', '-fopenmp'], [], [], lib_dir
# return ['-Xpreprocessor', '-fopenmp'], [lib_name], [], lib_dir
# It is super weird:
# - precompiled torch wheels on mac link against libomp (or libiomp5)
# - but openmp was not detected in their compilation chain, so
# it was actually compiled *without* openmp (pragmas were not used,
# parallel loops are actually sequential, (set/get)_num_threads
# is always 1)
# - I can't get my mac to link against the correct omp lib (it
# links against libomp even though I ask it to link against
# libiomp5)
# - If I don't link against openmp at all, things seem to work (!!)
# - I really need to rewrite our compilation stuff anyway
def omp_flags():
if is_windows():
return ['/openmp']
elif is_darwin():
return find_omp_darwin()[0]
else:
return ['-fopenmp']
def omp_link_flags():
if is_darwin():
return find_omp_darwin()[1]
else:
return []
def omp_libraries():
if is_darwin():
return find_omp_darwin()[2]
else:
return []
def omp_library_dirs():
if is_darwin():
ompdir = find_omp_darwin()[3]
return [os.path.join(ompdir, 'lib')] if ompdir else []
else:
return []
def omp_include_dirs():
if is_darwin():
ompdir = find_omp_darwin()[3]
return [os.path.join(ompdir, 'include')] if ompdir else []
else:
return []
def common_flags():
if is_windows():
return msvc_flags()
else:
return gcc_clang_flags()
def torch_flags(cuda=False):
version = torch_version()
version = version[0]*10000+version[1]*100+version[2]
flags = ['-DNI_TORCH_VERSION=' + str(version)]
backend = torch_parallel_backend()
flags += [
'-D' + torch_parallel_backend() + '=1',
'-D_GLIBCXX_USE_CXX11_ABI=' + torch_abi()]
if not cuda and backend == 'AT_PARALLEL_OPENMP':
flags += omp_flags()
return flags
def torch_link_flags(cuda=False):
backend = torch_parallel_backend()
flags = []
if not cuda and backend == 'AT_PARALLEL_OPENMP':
flags += omp_link_flags()
return flags
def common_links_flags():
if is_darwin() and darwin_cc_type() == 'apple_clang':
return ['-stdlib=libc++']
return []
def cuda_flags():
flags = nvcc_flags() + cuda_arch_flags()
if is_windows():
for flag in common_flags():
flags = ['-Xcompiler', flag] + flags
else:
for flag in common_flags():
flags += ['--compiler-options', flag]
return flags
def abspathC(files):
scriptdir = os.path.abspath(os.path.dirname(__file__))
sourcedir = os.path.join(scriptdir, 'nitorch', '_C')
return [os.path.join(sourcedir, f) for f in files]
def prepare_extensions():
build_extensions = []
# ~~~ checks
use_cuda = bool(int(os.environ.get('NI_USE_CUDA', '1')))
use_cuda = use_cuda and cuda_home() and cuda_check()
use_cudnn = False # cudnn_home() and cudnn_check()
nitorch_lib = []
nitorch_libext = []
# ~~~ setup libraries
NiTorchCPULibrary = SharedLibrary(
name='lib.nitorch_cpu',
sources=abspathC(libnitorch_cpu_sources),
depends=abspathC(libnitorch_cpu_headers),
libraries=torch_libraries(),
library_dirs=torch_library_dirs(),
include_dirs=torch_include_dirs(),
extra_compile_args=common_flags() + torch_flags(),
extra_link_args=common_links_flags() + torch_link_flags(),
language='c++',
)
build_extensions += [NiTorchCPULibrary]
nitorch_libext += [NiTorchCPULibrary]
nitorch_lib += ['nitorch_cpu']
if use_cuda:
NiTorchCUDALibrary = SharedLibrary(
name='lib.nitorch_cuda',
sources=abspathC(libnitorch_cuda_sources),
depends=abspathC(libnitorch_cuda_headers),
libraries=torch_libraries(use_cuda),
library_dirs=torch_library_dirs(use_cuda, use_cudnn),
include_dirs=torch_include_dirs(use_cuda, use_cudnn),
extra_compile_args=cuda_flags() + torch_flags(cuda=True),
extra_link_args=torch_link_flags(cuda=True),
language='cuda',
)
build_extensions += [NiTorchCUDALibrary]
nitorch_libext += [NiTorchCUDALibrary]
nitorch_lib += ['nitorch_cuda']
NiTorchLibrary = SharedLibrary(
name='lib.nitorch',
sources=abspathC(libnitorch_sources),
depends=nitorch_libext + abspathC(libnitorch_headers),
libraries=torch_libraries() + nitorch_lib,
library_dirs=torch_library_dirs(),
include_dirs=torch_include_dirs(),
extra_compile_args=common_flags() + torch_flags() + (['-DNI_WITH_CUDA'] if use_cuda else []),
extra_link_args=common_links_flags(),
runtime_library_dirs=[link_relative('.')],
language='c++',
)
build_extensions += [NiTorchLibrary]
nitorch_libext = [NiTorchLibrary]
nitorch_lib = ['nitorch']
# ~~~ setup extensions
python_library_dirs = [os.path.join(sys.exec_prefix, 'lib')]
SpatialExtension = Extension(
name='_C.spatial',
sources=abspathC(ext_spatial_sources),
depends=nitorch_libext + abspathC(ext_spatial_headers),
libraries=torch_libraries(use_cuda) + nitorch_lib,
library_dirs=torch_library_dirs(use_cuda, use_cudnn) + python_library_dirs,
include_dirs=torch_include_dirs(use_cuda, use_cudnn),
extra_compile_args=common_flags() + torch_flags() + torch_extension_flags('spatial'),
extra_link_args=common_links_flags(),
runtime_library_dirs=[link_relative(os.path.join('..', 'lib'))]
)
build_extensions += [SpatialExtension]
return build_extensions
|
11492066
|
from typing import Optional
import re
from collections import OrderedDict
from requests import Response as ServerResponse
from instagram_api.response.mapper import ApiResponse
from .account_disabled import AccountDisabledException
from .bad_request import BadRequestException
from .challenge_required import ChallengeRequiredException
from .checkpoint_required import CheckpointRequiredException
from .consent_required import ConsentRequiredException
from .endpoint import EndpointException
from .feedback_required import FeedbackRequiredException
from .forced_password_reset import ForcedPasswordResetException
from .incorrect_password import IncorrectPasswordException
from .instagram import InstagramException
from .invalid_sms_code import InvalidSmsCodeException
from .invalid_user import InvalidUserException
from .login_required import LoginRequiredException
from .not_found import NotFoundException
from .sentry_block import SentryBlockException
# Получаем класс самым простым из доступных методов
SRE_Pattern = re.compile('test').__class__
__all__ = ['ServerMessageRaiser']
class ServerMessageRaiser:
EXCEPTION_MAP = OrderedDict([
#
# WARNING: We MUST be sure to list these exception messages in an order
# which guarantees that they will be properly detected without being
# detected as something else!
#
# For example, the "challenge_required" string ALSO exists inside of
# "checkpoint_challenge_required", so if we check for ChallengeRequired
# problems above CheckpointRequired, then we would ALWAYS detect
# checkpoints as "challenge required" since that string exists in both
# of them.
#
# Always list all exceptions in an order that guarantees that they
# cannot be misdetected as each other! The exceptions with the longest
# strings, in case of similar strings, MUST be checked/listed EARLIER!
#
# So in that example, CheckpointRequired MUST be listed above
# ChallengeRequired!
#
(LoginRequiredException, [
'login_required'
]),
(CheckpointRequiredException, [
'checkpoint_required', # message
'checkpoint_challenge_required', # error_type
]),
(ChallengeRequiredException, [
'challenge_required'
]),
(FeedbackRequiredException, [
'feedback_required'
]),
(ConsentRequiredException, [
'consent_required'
]),
(IncorrectPasswordException, [
# "The password you entered is incorrect".
re.compile(r'password(.+?)incorrect', re.I | re.U), # message
'bad_password', # error_type
]),
(InvalidSmsCodeException, [
# "Please check the security code we sent you and try again".
re.compile(r'check(.+?)security(.+?)code', re.I | re.U), # message
'sms_code_validation_code_invalid', # error_type
]),
(AccountDisabledException, [
# "Your account has been disabled for violating our terms".
re.compile(r'account(.+?)disabled(.+?)violating', re.I | re.U),
]),
(SentryBlockException, [
'sentry_block',
]),
(InvalidUserException, [
# "The username you entered doesn't appear to belong to an account"
re.compile(r'username(.+?)doesn\'t(.+?)belong', re.I | re.U), # message
'invalid_user', # error_type
]),
(ForcedPasswordResetException, [
re.compile(r'reset(.+?)password', re.I | re.U),
]),
])
@classmethod
def auto_raise(cls,
prefix_string: Optional[str],
server_message: str,
api_response: ApiResponse = None,
http_response: ServerResponse = None):
messages = [server_message]
server_error_type = None
if isinstance(api_response, ApiResponse):
if api_response.error_type is not None:
server_error_type = api_response.error_type
messages.append(server_error_type)
exception_class = None
def check_patterns(message, patterns):
for pattern in patterns:
if isinstance(pattern, SRE_Pattern):
if pattern.search(message):
return True
elif isinstance(pattern, str):
if pattern in message:
return True
else:
raise ValueError(f'Unknown pattern type: {type(pattern)}, {pattern}')
return False
def iter_exceptions(message):
for exception, patterns in cls.EXCEPTION_MAP.items():
if check_patterns(message, patterns):
return exception
for msg in messages:
exception_class = iter_exceptions(msg)
if exception_class is not None:
break
if exception_class is None:
http_status_code = http_response.status if http_response is not None else None
if http_status_code == 400:
exception_class = BadRequestException
elif http_status_code == 404:
exception_class = NotFoundException
else:
exception_class = EndpointException
display_message = server_message if server_message else server_error_type
if display_message is None:
display_message = 'Request failed.'
message_text = f'{prefix_string}: {display_message}' if prefix_string else display_message
message_text = cls.prettify_message(message_text)
args = [message_text]
kwargs = {}
if isinstance(api_response, ApiResponse) and issubclass(exception_class, InstagramException):
kwargs.update({
'response': api_response,
})
raise exception_class(*args, **kwargs)
@staticmethod
def prettify_message(message: str):
last_char = message[-1] if message else ''
if last_char not in ['', '.', '!', '?']:
message += '.'
message = message.capitalize()
message = message.replace('_', ' ')
return message
|
11492073
|
import pytest
import fuzz_lightyear
from fuzz_lightyear.exceptions import ConflictingHandlers
from fuzz_lightyear.supplements.abstraction import get_abstraction
class TestMakeRequest:
def test_basic(self):
def request_handler(operation_id):
assert operation_id == 'status'
fuzz_lightyear.make_request(request_handler)
get_abstraction().request_method(
'status',
)
def test_passes_other_arguments(self):
def request_handler(operation_id, *args, **kwargs):
assert kwargs['headers']['session'] == 'id_token'
fuzz_lightyear.make_request(request_handler)
get_abstraction().request_method(
'status',
headers={
'session': 'id_token',
},
)
def test_throws_error_if_multiple_handlers(self):
def request_handler_one(*args):
pass
def request_handler_two(*args):
pass
fuzz_lightyear.make_request(request_handler_one)
with pytest.raises(ConflictingHandlers):
fuzz_lightyear.make_request(request_handler_two)
def test_custom_swagger_client():
def declaration():
return 1
fuzz_lightyear.custom_swagger_client(declaration)
assert get_abstraction().client == 1
|
11492159
|
import numpy as np
import pandas as pd
from sfrmaker.routing import find_path, make_graph
def valid_rnos(rnos):
"""Check that unique reach numbers (rno in MODFLOW 6)
are consecutive and start at 1.
"""
sorted_reaches = sorted(rnos)
consecutive = np.diff(sorted_reaches).sum() \
== len(rnos) - 1
onebased = np.min(sorted_reaches) == 1
return consecutive & onebased
def valid_nsegs(nsegs, outsegs=None, increasing=True):
"""Check that segment numbers are valid.
Parameters
----------
nsegs : list of segment numbers
outsegs : list of corresponding routing connections
Required if increasing=True.
increasing : bool
If True, segment numbers must also only increase downstream.
"""
# cast to array if list or series
nsegs = np.atleast_1d(nsegs)
outsegs = np.atleast_1d(outsegs)
consecutive_and_onebased = valid_rnos(nsegs)
if increasing:
assert outsegs is not None
graph = make_graph(nsegs, outsegs, one_to_many=False)
monotonic = []
for s in nsegs:
try:
seg_sequence = find_path(graph.copy(), s)[:-1] # last number is 0 for outlet
except:
j=2
monotonic.append(np.all(np.diff(np.array(seg_sequence)) > 0))
monotonic = np.all(monotonic)
return consecutive_and_onebased & monotonic
else:
return consecutive_and_onebased
def is_to_one(toid_sequence):
if isinstance(toid_sequence, dict):
toid_sequence = list(toid_sequence.values())
squeezed = np.squeeze(list(toid_sequence))
if squeezed.ndim == 0:
return True
else:
return np.isscalar(np.squeeze(list(toid_sequence))[0])
def rno_nseg_routing_consistent(nseg, outseg, iseg, ireach, rno, outreach):
"""Check that routing of segments (MODFLOW-2005 style) is consistent
with routing between unique reach numbers (rno; MODFLOW 6 style)
Parameters
----------
nseg : list or 1D numpy array
outseg : list or 1D numpy array
iseg : list or 1D numpy array
ireach : list or 1D numpy array
rno : list or 1D numpy array
outreach : list or 1D numpy array
Returns
-------
consistent : bool
"""
df = pd.DataFrame({'nseg': nseg,
'outseg': outseg,
'iseg': iseg,
'ireach': ireach,
'rno': rno,
'outreach': outreach})
df.sort_values(by=['iseg', 'ireach'], inplace=True)
segment_routing = dict(zip(nseg, outseg))
seg_groups = df.groupby('iseg')
# segments associated with reach numbers that are first reaches
first_reaches = seg_groups.first()
rno1_segments = dict(zip(first_reaches.rno, first_reaches.index))
segments_consistent = []
for s, g in seg_groups:
# since the segment is sorted,
# rno[i+1] should == outreach[i]
if len(g) > 1:
preceding_consistent = np.array_equal(g.rno.values[1:],
g.outreach.values[:-1])
# segments of length 1 don't have any internal routing to check
else:
preceding_consistent = True
# check that last reach goes to same segment
# as outseg in segment_routing
last_outreach = g.outreach.iloc[-1]
next_segment = rno1_segments.get(last_outreach, 0)
last_consistent = next_segment == segment_routing[s]
if not preceding_consistent & last_consistent:
j=2
segments_consistent.append(preceding_consistent &
last_consistent)
return np.all(segments_consistent)
def routing_numbering_is_valid(nseg, outseg, iseg, ireach,
rno, outreach, increasing_nseg=True):
"""Check that routing numbering for an SFR dataset is valid.
* verify that segment numbering is consecutive and starts at 1
* optionally verify that segment number only increase downstream
* verify that unique reach numbering (e.g. rno in MODFLOW 6)
is consecutive and starts at 1
* check that routing is consistent between segment connections
(MODFLOW-2005 convention of nseg -> outseg)
and reach connections (MODFLOW 6 convention based on rno)
An additional check would be all non-outlet connections are
listed in nseg and/or rno, but these can be assumed to be outlets
(converted to 0) without modifying the nseg or rno.
Parameters
----------
nseg : list or 1D numpy array
outseg : list or 1D numpy array
iseg : list or 1D numpy array
ireach : list or 1D numpy array
rno : list or 1D numpy array
outreach : list or 1D numpy array
increasing_nseg : bool
If True, segment numbers must also only increase downstream.
Returns
-------
valid
"""
return valid_rnos(rno) & \
valid_nsegs(nseg, outseg, increasing=increasing_nseg) & \
rno_nseg_routing_consistent(nseg, outseg, iseg, ireach,
rno, outreach)
def routing_is_circular(fromid, toid):
"""Verify that segments or reaches never route to themselves.
Parameters
----------
fromid : list or 1D array
e.g. COMIDS, segments, or rnos
toid : list or 1D array
routing connections
"""
fromid = np.atleast_1d(fromid)
toid = np.atleast_1d(toid)
graph = make_graph(fromid, toid, one_to_many=False)
paths = {fid: find_path(graph, fid) for fid in graph.keys()}
# a fromid should not appear more than once in its sequence
for k, v in paths.items():
if v.count(k) > 1:
return True
return False
def same_sfr_numbering(reach_data1, reach_data2):
"""Compare two sets of reach data.
Parameters
----------
reach_data1 : DataFrame
Must have columns:
i : zero-based row
j : zero-based column
iseg : segment number
ireach : reach number
reach_data2 : DataFrame
Must have same columns as reach_data1
Returns
-------
issame : bool
Whether the two datasets have the same numbering for i, j andn iseg/ireach.
Notes
-----
k (layer) is not tested because k can be different for the same SFR package depending on the context.
For example, a reach might have k=1 in the input file, and k=3 in the output file if
the flux was placed in the highest active layer.
"""
cols = ['i', 'j', 'iseg', 'ireach']
rd1 = reach_data1[cols].sort_values(by=['iseg', 'ireach']).copy()
rd2 = reach_data2[cols].sort_values(by=['iseg', 'ireach']).copy()
col_equal = []
for c in rd1.columns:
col_equal.append(np.array_equal(rd1[c], rd2[c]))
return np.all(col_equal)
def reach_elevations_decrease_downstream(reach_data):
"""Verify that reach values decrease monotonically in the downstream direction."""
rd = reach_data.reset_index()
return check_monotonicity(rd.rno, rd.outreach, rd.strtop)
#elev = dict(zip(rd.rno, rd.strtop))
#dnelev = {rid: elev[rd.outreach[i]] if rd.outreach[i] != 0
#else -9999 for i, rid in enumerate(rd.rno)}
#diffs = np.array([(dnelev[i] - elev[i]) if dnelev[i] != -9999
# else -.001 for i in rd.rno])
#return np.max(diffs) <= 0
def check_monotonicity(ids, toids, values, decrease=True):
"""Verify that values decrease or increase monotonically
in the downstream direction.
Parameters
----------
ids : sequence
Sequence of line identifiers (e.g. COMIDs)
toids : sequence
Sequence of downstream routing connections (line identifiers)
values : numeric
Values to check.
decrease : bool
If True, verify that values strictly decrease in the downstream direction,
if False, verify that values strictly increase in the downstream direction.
Returns
-------
is_monotonic : bool
Whether or not values change monotonically in the downstream direction.
"""
if isinstance(ids, pd.Series):
ids = ids.values
if isinstance(toids, pd.Series):
toids = toids.values
# a fill value that is larger than any value in values
# (in absolute terms)
default = -10 * np.max(np.abs(values))
values = np.array(values)
if not decrease:
values *= -1
values_dict = dict(zip(ids, values))
downstream_values = {rid: values_dict[toids[i]] if toids[i] != 0
else default for i, rid in enumerate(ids)}
diffs = np.array([(downstream_values[i] - values_dict[i]) if downstream_values[i] != default
else -.001 for i in ids])
return np.max(diffs) <= 0
|
11492202
|
import copy
import json
import re
import numpy as np
import torch
import transformers.data.processors.squad as squad
from tqdm import tqdm
class SquadExample:
"""
A single training/test example for the Squad dataset, as loaded from disk.
Args:
qas_id: The example's unique identifier
question_text: The question string
context_text: The context string
answer_text: The answer string
start_position_character: The character position of the start of the answer
title: The title of the example
answers: None by default, this is used during evaluation. Holds answers as well as their start positions.
is_impossible: False by default, set to True if the example has no possible answer.
"""
def __init__(
self,
qas_id,
question_text,
context_text,
answer_text,
start_position_character,
title,
answers=[],
is_impossible=False,
doc_tokens=None,
char_to_word_offset=None,
):
self.qas_id = qas_id
self.question_text = question_text
self.context_text = context_text
self.answer_text = answer_text
self.title = title
self.is_impossible = is_impossible
self.answers = answers
self.start_position, self.end_position = 0, 0
if doc_tokens is None:
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
# Split on whitespace so that different tokens may be attributed to their original position.
for c in self.context_text:
if squad._is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
self.doc_tokens = doc_tokens
self.char_to_word_offset = char_to_word_offset
# Start and end positions only has a value during evaluation.
if start_position_character is not None and not is_impossible:
self.start_position = char_to_word_offset[start_position_character]
self.end_position = char_to_word_offset[
min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)
]
def squad_convert_example_to_features(
example,
tokenizer,
max_seq_length,
doc_stride,
max_query_length,
padding_strategy,
is_training,
tok_to_orig_index=None,
orig_to_tok_index=None,
all_doc_tokens=None,
):
features = []
if is_training and not example.is_impossible:
# Get start and end position
start_position = example.start_position
end_position = example.end_position
# If the answer cannot be found in the text, then skip this example.
actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)])
cleaned_answer_text = " ".join(squad.whitespace_tokenize(example.answer_text))
if actual_text.find(cleaned_answer_text) == -1:
return [], None, None, None
if tok_to_orig_index is None:
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
if tokenizer.__class__.__name__ in [
"RobertaTokenizer",
"LongformerTokenizer",
"BartTokenizer",
"RobertaTokenizerFast",
"LongformerTokenizerFast",
"BartTokenizerFast",
]:
sub_tokens = tokenizer.tokenize(token, add_prefix_space=True)
else:
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = squad._improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text
)
spans = []
truncated_query = tokenizer.encode(
example.question_text, add_special_tokens=False, truncation=True, max_length=max_query_length
)
# Tokenizers who insert 2 SEP tokens in-between <context> & <question> need to have special handling
# in the way they compute mask of added tokens.
tokenizer_type = type(tokenizer).__name__.replace("Tokenizer", "").lower()
sequence_added_tokens = (
tokenizer.model_max_length - tokenizer.max_len_single_sentence + 1
if tokenizer_type in squad.MULTI_SEP_TOKENS_TOKENIZERS_SET
else tokenizer.model_max_length - tokenizer.max_len_single_sentence
)
sequence_pair_added_tokens = tokenizer.model_max_length - tokenizer.max_len_sentences_pair
span_doc_tokens = all_doc_tokens
while len(spans) * doc_stride < len(all_doc_tokens):
# Define the side we want to truncate / pad and the text/pair sorting
if tokenizer.padding_side == "right":
texts = truncated_query
pairs = span_doc_tokens
truncation = squad.TruncationStrategy.ONLY_SECOND.value
else:
texts = span_doc_tokens
pairs = truncated_query
truncation = squad.TruncationStrategy.ONLY_FIRST.value
encoded_dict = tokenizer.encode_plus( # TODO(thom) update this logic
texts,
pairs,
truncation=truncation,
padding=padding_strategy,
max_length=max_seq_length,
return_overflowing_tokens=True,
stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens,
return_token_type_ids=True,
)
paragraph_len = min(
len(all_doc_tokens) - len(spans) * doc_stride,
max_seq_length - len(truncated_query) - sequence_pair_added_tokens,
)
if tokenizer.pad_token_id in encoded_dict["input_ids"]:
if tokenizer.padding_side == "right":
non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)]
else:
last_padding_id_position = (
len(encoded_dict["input_ids"]) - 1 - encoded_dict["input_ids"][::-1].index(tokenizer.pad_token_id)
)
non_padded_ids = encoded_dict["input_ids"][last_padding_id_position + 1 :]
else:
non_padded_ids = encoded_dict["input_ids"]
tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)
token_to_orig_map = {}
for i in range(paragraph_len):
index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i
token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i]
encoded_dict["paragraph_len"] = paragraph_len
encoded_dict["tokens"] = tokens
encoded_dict["token_to_orig_map"] = token_to_orig_map
encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens
encoded_dict["token_is_max_context"] = {}
encoded_dict["start"] = len(spans) * doc_stride
encoded_dict["length"] = paragraph_len
spans.append(encoded_dict)
if "overflowing_tokens" not in encoded_dict or (
"overflowing_tokens" in encoded_dict and len(encoded_dict["overflowing_tokens"]) == 0
):
break
span_doc_tokens = encoded_dict["overflowing_tokens"]
for doc_span_index in range(len(spans)):
for j in range(spans[doc_span_index]["paragraph_len"]):
is_max_context = squad._new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j)
index = (
j
if tokenizer.padding_side == "left"
else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j
)
spans[doc_span_index]["token_is_max_context"][index] = is_max_context
for span in spans:
# Identify the position of the CLS token
cls_index = span["input_ids"].index(tokenizer.cls_token_id)
p_mask = np.array([])
# # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# # Original TF implementation also keep the classification token (set to 0)
# p_mask = np.ones_like(span["token_type_ids"])
# if tokenizer.padding_side == "right":
# p_mask[len(truncated_query) + sequence_added_tokens :] = 0
# else:
# p_mask[-len(span["tokens"]) : -(len(truncated_query) + sequence_added_tokens)] = 0
# pad_token_indices = np.where(span["input_ids"] == tokenizer.pad_token_id)
# special_token_indices = np.asarray(
# tokenizer.get_special_tokens_mask(span["input_ids"], already_has_special_tokens=True)
# ).nonzero()
# p_mask[pad_token_indices] = 1
# p_mask[special_token_indices] = 1
# # Set the cls index to 0: the CLS index can be used for impossible answers
# p_mask[cls_index] = 0
span_is_impossible = example.is_impossible
start_position = 0
end_position = 0
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = span["start"]
doc_end = span["start"] + span["length"] - 1
out_of_span = False
if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = cls_index
end_position = cls_index
span_is_impossible = True
else:
if tokenizer.padding_side == "left":
doc_offset = 0
else:
doc_offset = len(truncated_query) + sequence_added_tokens
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
features.append(
squad.SquadFeatures(
span["input_ids"],
span["attention_mask"],
span["token_type_ids"],
cls_index,
p_mask.tolist(),
example_index=0, # Can not set unique_id and example_index here. They will be set after multiple processing.
unique_id=0,
paragraph_len=span["paragraph_len"],
token_is_max_context=span["token_is_max_context"],
tokens=span["tokens"],
token_to_orig_map=span["token_to_orig_map"],
start_position=start_position,
end_position=end_position,
is_impossible=span_is_impossible,
qas_id=example.qas_id,
)
)
return features, tok_to_orig_index, orig_to_tok_index, all_doc_tokens
def sample_writer(data, config, tokenizer, is_train):
"""process and write single 'paragraph-context' from squad-formed QA dataset"""
context = data["context"]
example = None
tok_to_orig_index = None
orig_to_tok_index = None
all_doc_tokens = None
write_data = []
for qas in data["qas"]:
is_impossible = qas.get("is_impossible", False)
example = SquadExample(
qas_id=qas["id"],
question_text=qas["question"],
context_text=context,
answer_text="" if is_impossible else qas["answers"][0]["text"],
start_position_character=0 if is_impossible else qas["answers"][0]["answer_start"],
title="",
answers=qas["answers"],
is_impossible=is_impossible,
doc_tokens=(None if example is None else example.doc_tokens),
char_to_word_offset=(None if example is None else example.char_to_word_offset),
)
features, tok_to_orig_index, orig_to_tok_index, all_doc_tokens = squad_convert_example_to_features(
example=example,
tokenizer=tokenizer,
max_seq_length=config.max_seq_length,
doc_stride=config.doc_stride,
max_query_length=config.max_query_length,
padding_strategy="max_length",
is_training=is_train,
tok_to_orig_index=tok_to_orig_index,
orig_to_tok_index=orig_to_tok_index,
all_doc_tokens=all_doc_tokens,
)
for i, feature in enumerate(features):
write_datum = {
"input_ids": feature.input_ids,
"attention_mask": feature.attention_mask,
"token_type_ids": feature.token_type_ids,
"start_position": feature.start_position,
"end_position": feature.end_position,
}
if not is_train:
write_datum["id"] = feature.qas_id
write_datum["unique_id"] = "{}_{}".format(feature.qas_id, i)
write_datum["tokens"] = feature.tokens
write_datum["token_to_orig_map"] = feature.token_to_orig_map
write_datum["paragraph_len"] = feature.paragraph_len
write_datum["token_is_max_context"] = feature.token_is_max_context
if i == 0: # only store example data for the first feature from the example
write_datum["is_impossible"] = example.is_impossible
write_datum["answers"] = example.answers
write_datum["doc_tokens"] = example.doc_tokens
else:
write_datum["is_impossible"] = None
write_datum["answers"] = None
write_datum["doc_tokens"] = None
write_data.append(write_datum)
return write_data
def process_korquad_1(config, data_file, train):
with open(data_file) as handle:
load_data = json.load(handle)["data"]
data = []
for datum in load_data:
data.extend(datum["paragraphs"])
return data
def process_korquad_2(config, data_file, train):
TAGS = [
"<!DOCTYPE html>",
"<html>",
"</html>",
"<meta>",
"<head>",
"</head>",
"<body>",
"</body>",
"<label>",
"</label>",
"<div>",
"</div>",
"<a>",
"</a>",
"<span>",
"</span>",
"<link>",
"<img>",
"<sup>",
"</sup>",
"<input>",
"<noscript></noscript>",
"<p>",
"</p>",
"<cite>",
"</cite>",
"<tbody>",
"</tbody>",
"</br>",
"<br/>",
"<h1>",
"</h1>",
"<h2>",
"</h2>",
"<h3>",
"</h3>",
"<form>",
"</form>",
"<small>",
"</small>",
"<big>",
"</big>",
"<b>",
"</b>",
"<abbr>",
"</abbr>",
"[편집]",
]
def remove_tags(text):
# only retain meaningful html tags to extract answers
# tags like <table> <tr> <td> <ul> <li> will remain
text = re.sub("<title>.*?</title>", " ", text)
text = text.replace('rowspan="', "r") # summarize attribute name for col, row spans
text = text.replace('colspan="', "c") # which indicate merged table cells
for tag in TAGS:
text = text.replace(tag, "")
text = re.sub(" +", " ", text)
text = re.sub("\n+", "\n", text)
return text
with open(data_file) as handle:
data = json.load(handle)["data"]
qas_num = []
for datum in data:
context = datum["context"]
datum["context"] = remove_tags(context)
del datum["raw_html"]
if train and not config.all_korquad_2_sample: # only use first sample from context for train split
datum["qas"] = datum["qas"][:1]
for qas in datum["qas"]:
del qas["answer"]["html_answer_text"]
del qas["answer"]["html_answer_start"]
# qas["id"] = "{}-{}".format(i, qas["id"])
answer_prev_text = context[: qas["answer"]["answer_start"]]
answer_prev_len = len(answer_prev_text)
remove_tag_text = remove_tags(answer_prev_text)
# adjust span position according to text without tags
qas["answer"]["answer_start"] -= answer_prev_len - len(remove_tag_text)
qas["answer"]["text"] = remove_tags(qas["answer"]["text"])
qas["answers"] = [qas.pop("answer")]
qas_num.append(len(datum["qas"]))
# some paragraph in korquad_2 have too many question samples
# it will slow down the progress of multiprocessing job
# so, i split them
limit_qas_num = int(np.percentile(np.array(qas_num), 50))
flat_data = []
for datum in data:
qas_num = len(datum["qas"])
if qas_num > limit_qas_num:
for j in range(0, qas_num, limit_qas_num):
_datum = copy.deepcopy(datum)
_datum["qas"] = datum["qas"][j : j + limit_qas_num]
flat_data.append(_datum)
else:
flat_data.append(datum)
data = flat_data
return data
def process_kluemrc(config, data_file, train):
data = []
with open(data_file) as handle:
for datum in json.load(handle)["data"]:
datum = datum["paragraphs"]
for qas in datum:
for q in qas["qas"]:
q["id"] = q.pop("guid")
data.extend(datum)
return data
def process_tydiqa(config, data_file, train):
data = []
total = sum([1 for _ in open(data_file)])
for line in tqdm(open(data_file), total=total, dynamic_ncols=True):
datum = json.loads(line)
if datum["language"].lower().strip() != "korean":
continue
span_byte_map = {}
prev_bytes = 0
for i, char in enumerate(datum["document_plaintext"]):
byte_len = len(char.encode("utf-8"))
for j in range(byte_len):
span_byte_map[prev_bytes + j] = i
prev_bytes += byte_len
answers = []
bool_answers = []
is_impossible = False
for annot in datum["annotations"]:
spans = annot["minimal_answer"]
start = spans["plaintext_start_byte"]
end = spans["plaintext_end_byte"]
yesno = None if annot["yes_no_answer"] == "NONE" else annot["yes_no_answer"]
if yesno is not None:
bool_answers.append(yesno)
continue
if spans["plaintext_start_byte"] == -1:
is_impossible = True
else:
start = span_byte_map[start]
end = span_byte_map[end]
answers.append(
{
"text": datum["document_plaintext"][start:end],
"answer_start": start,
}
)
# skip boolqa samples
if len(bool_answers) != 0:
continue
if len(answers) != 0:
is_impossible = False
else:
is_impossible = True
data.append(
{
"context": datum["document_plaintext"],
"qas": [
{
"id": len(data),
"question": datum["question_text"],
"is_impossible": is_impossible,
"answers": answers,
}
],
}
)
return data
process_map = {
"korquad_1": process_korquad_1,
"korquad_2": process_korquad_2,
"tydiqa": process_tydiqa,
"kluemrc": process_kluemrc,
}
def collate_fn(features):
input_ids = [sample["input_ids"] for sample in features]
attention_mask = [sample["attention_mask"] for sample in features]
token_type_ids = [sample["token_type_ids"] for sample in features]
start_position = [sample["start_position"] for sample in features]
end_position = [sample["end_position"] for sample in features]
input_ids = torch.tensor(np.array(input_ids).astype(np.int64), dtype=torch.long)
attention_mask = torch.tensor(np.array(attention_mask).astype(np.int8), dtype=torch.long)
token_type_ids = torch.tensor(np.array(token_type_ids).astype(np.int8), dtype=torch.long)
start_position = torch.tensor(np.array(start_position).astype(np.int64), dtype=torch.long)
end_position = torch.tensor(np.array(end_position).astype(np.int64), dtype=torch.long)
inputs = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"start_positions": start_position,
"end_positions": end_position,
}
if "unique_id" in features[0]:
inputs["unique_id"] = [sample["unique_id"] for sample in features]
return inputs
|
11492204
|
import pytest
from typus import en_typus, ru_typus
from typus.chars import *
QUOTES = (
''.join((LAQUO, RAQUO, DLQUO, LDQUO)),
''.join((LDQUO, RDQUO, LSQUO, RSQUO)),
)
TYPUSES = (
(ru_typus, {}),
(en_typus, str.maketrans(*QUOTES)),
)
@pytest.fixture(name='assert_typus', scope='module', params=TYPUSES)
def get_assert_typus(request):
typus, charmap = request.param
def assert_typus(source, expected):
assert typus(source) == expected.translate(charmap)
return assert_typus
def test_debug():
assert ru_typus('1m', debug=True) == '1_m'
@pytest.mark.parametrize('source, expected', (
('00 "11" 00', '00 «11» 00'),
# clashes with digit_spaces
(
'''00" "11 '22' 11"? "11 '22 "33 33?"' 11" 00 "11 '22' 11" 0"''',
f'00{DPRIME} «11 „22“ 11»? «11 „22 «33{NBSP}33?»“ 11» '
f'00 «11 „22“ 11» 0{DPRIME}'
),
))
def test_quotes(assert_typus, source, expected):
assert_typus(source, expected)
@pytest.mark.parametrize('source, expected', (
('--', '--'),
('foo - foo', f'foo{MDASH_PAIR}foo'),
# Leading comma case
(', - foo', f',{MDASH}{THNSP}foo'),
(', -- foo', f',{MDASH}{THNSP}foo'),
# if line begins, adds nbsp after mdash
('-- foo', f'{MDASH}{NBSP}foo'),
# if line ends, adds nbsp before mdash
('foo --', f'foo{NBSP}{MDASH}'),
('foo -- bar', f'foo{MDASH_PAIR}bar'),
# Python markdown replaces dash with ndash, don't know why
(f'foo {NDASH} foo', f'foo{MDASH_PAIR}foo'),
# This one for ru_typus
('foo - "11" 00', f'foo{MDASH_PAIR}«11» 00'),
('2 - 2foo', f'2{MDASH_PAIR}2foo'), # no units clash
('2 - 2', f'2{NBSP}{MINUS}{NBSP}2'), # + minus
('Winnie-the-Pooh', 'Winnie-the-Pooh'),
))
def test_mdash(assert_typus, source, expected):
assert_typus(source, expected)
@pytest.mark.parametrize('source, expected', (
('"4"', '«4»'),
('4\'', '4' + SPRIME),
('4"', '4' + DPRIME),
('" 22"', '" 22' + DPRIME),
))
def test_primes(assert_typus, source, expected):
assert_typus(source, expected)
@pytest.mark.parametrize('source, expected', (
('25-foo', '25-foo'),
('2-3', f'2{NDASH}3'),
('2,5-3', f'2,5{NDASH}3'),
('0.5-3', f'0.5{NDASH}3'),
('2-3 foo', f'2{NDASH}3{NBSP}foo'), # + ranges
('(15-20 items)', f'(15{NDASH}20{NBSP}items)'),
# Float
('0,5-3', f'0,5{NDASH}3'),
('-0,5-3', f'{MINUS}0,5{NDASH}3'),
('-5.5-3', f'{MINUS}5.5{NDASH}3'),
('-5,5-3', f'{MINUS}5,5{NDASH}3'),
('-5,5-3.5', f'{MINUS}5,5{NDASH}3.5'),
('2 - 3', f'2{NBSP}{MINUS}{NBSP}3'),
('2-3 x 4', f'2{MINUS}3{NBSP}{TIMES}{NBSP}4'),
('2-3 * 4', f'2{MINUS}3{NBSP}{TIMES}{NBSP}4'),
('2-3 - 4', f'2{MINUS}3{NBSP}{MINUS}{NBSP}4'),
))
def test_ranges(assert_typus, source, expected):
assert_typus(source, expected)
@pytest.mark.parametrize('source, expected', (
# Minus
(f'3{NBSP}-{NBSP}2', f'3{NBSP}{MINUS}{NBSP}2'),
# This one clashes with range
('2-3', f'2{NDASH}3'),
# This one clashes with mdash
(f'x{NBSP}-{NBSP}3', f'x{NNBSP}{MDASH}{THNSP}3'),
('-3', f'{MINUS}3'),
# Star
('3*2', f'3{TIMES}2'),
('*3', f'{TIMES}3'),
(f'3{NBSP}*{NBSP}2', f'3{NBSP}{TIMES}{NBSP}2'),
(f'x{NBSP}*{NBSP}2', f'x{NBSP}{TIMES}{NBSP}2'),
# 'x'
('3x2', f'3{TIMES}2'),
('x3', f'{TIMES}3'),
(f'3{NBSP}x{NBSP}2', f'3{NBSP}{TIMES}{NBSP}2'),
(f'x{NBSP}x{NBSP}2', f'x{NBSP}{TIMES}{NBSP}2'),
# and Russian "х"
('3х2', f'3{TIMES}2'),
('х3', f'{TIMES}3'),
(f'3{NBSP}х{NBSP}2', f'3{NBSP}{TIMES}{NBSP}2'),
(f'x{NBSP}х{NBSP}2', f'x{NBSP}{TIMES}{NBSP}2'),
))
def test_math(assert_typus, source, expected):
assert_typus(source, expected)
@pytest.mark.parametrize('source, expected', (
('aaa 2a', f'aaa 2a'), # doesnt clash with units
))
def test_pairs(assert_typus, source, expected):
assert_typus(source, expected)
@pytest.mark.parametrize('source, expected', (
('4444444 fooo', '4444444 fooo'),
('444 foo', f'444{NBSP}foo'),
('444 +', f'444{NBSP}+'),
('444 4444 bucks', f'444{NBSP}4444 bucks'),
('4444444 foo', f'4444444 foo'), # no untis clash
('444 -', f'444{NBSP}{MDASH}'),
))
def test_digit_spaces(assert_typus, source, expected):
assert_typus(source, expected)
def test_example(assert_typus):
source = (
'Излучение, как следует из вышесказанного, концентрирует '
'внутримолекулярный предмет - деятельности . "...ff \'Можно?\' '
'предположить, что силовое - "поле "мент "d" ально" отклоняет" '
'сенсибельный \'квазар !..\' cc", не учитывая мнения авторитетов. '
'Искусство испускает данный электрон, учитывая опасность, '
'<code> "d" test -- test(c)</code> которую представляли '
'собой писания Дюринга для не окрепшего еще немецкого рабочего '
'движения. Смысл жизни -- амбивалентно (с) дискредитирует '
'закон (r) исключённого(tm) третьего (тм)... \n\n\n'
'1500 мА*ч\n\n'
'1-2=4\n'
'- Химическое соединение (p) ненаблюдаемо контролирует экран-ый '
'квазар (р). Идеи 3/4 гедонизма занимают b & b центральное место '
'в утилитаризме(sm) "<NAME>", однако <- гравитирующая -> '
'сфера масштабирует фотон, +-2мм изменяя привычную == реальность. '
'Силовое *3 поле -3 реально 3 * 2 /= 6 3x3 восстанавливает '
'трансцендентальный 3" 2\' принцип 1000р. восприятия.'
'"...\'test\'" (c) m&m\'s\n\n\n'
)
expected = (
'Излучение, как следует из_вышесказанного, концентрирует '
'внутримолекулярный предмет\u202f—\u2009деятельности. «…ff „Можно?“ '
'предположить, что силовое\u202f—\u2009„поле «мент „d“ ально» '
'отклоняет“ '
'сенсибельный „квазар!..“ cc», не_учитывая мнения авторитетов. '
'Искусство испускает данный электрон, учитывая опасность, '
'<code> "d" test -- test(c)</code> которую представляли собой '
'писания Дюринга для не_окрепшего еще немецкого рабочего '
'движения. Смысл жизни\u202f—\u2009амбивалентно ©_дискредитирует '
'закон® исключённого™ третьего™…\n\n'
'1500_мА•ч\n\n'
'1−2=4\n'
'—_Химическое соединение℗ ненаблюдаемо контролирует экран-ый '
'квазар℗. Идеи ¾_гедонизма занимают b_&_b_центральное место '
'в_утилитаризме℠ «Милля и_Бентама», однако ←_гравитирующая_→ '
'сфера масштабирует фотон, ±2_мм изменяя привычную_≡_реальность. '
'Силовое ×3_поле −3_реально 3_×_2_≠_6 3×3 восстанавливает '
'трансцендентальный 3″ 2′ принцип 1000_₽ восприятия.'
'«…„test“» ©_m&m’s'
).replace('_', NBSP)
assert_typus(source, expected)
|
11492211
|
from pandas_confusion import ConfusionMatrix
def write_prediction_file(path, test_unique_ids, Y_test_indices, y_prediction_classes):
with open(path, 'w') as prediction_handler:
prediction_handler.write('{}\t{}\t{}\n'.format("UniqueID", "Gold_Label", "Prediction"))
for index, val in enumerate(test_unique_ids):
prediction_handler.write('{}\t{}\t{}\n'.format(val, Y_test_indices[index], y_prediction_classes[index]))
def write_score_file(score_file, f1_mean, f1, model, Y_test_indices, y_prediction_classes):
with open(score_file, 'w') as score_handler:
score_handler.write("Micro-averaged F1: {}\n".format(f1_mean))
score_handler.write("Individual scores: {}\n".format(f1))
score_handler.write("Confusion matrix:\n")
score_handler.write(str(ConfusionMatrix(Y_test_indices, y_prediction_classes)))
score_handler.write("\n\n\nModel summary\n")
model.summary(print_fn=lambda x: score_handler.write(x + '\n'))
|
11492227
|
import numpy as np
from bokeh.core.properties import Float
from bokeh.io import save
from bokeh.model import DataModel
from bokeh.models import ColumnDataSource, CustomJS
from bokeh.plotting import figure
class Params(DataModel):
amp = Float(default=0.1, help="Amplitude")
freq = Float(default=0.1, help="Frequency")
phase = Float(default=0, help="Phase")
offset = Float(default=-5, help="Offset")
params = Params(amp=2, freq=3, phase=0.4, offset=1)
A = params.amp
k = params.freq
phi = params.phase
B = params.offset
x = np.linspace(0, 10, 100)
y = A*np.sin(k*x + phi) + B
source = ColumnDataSource(data=dict(x=x, y=y))
plot = figure(tags=[params], y_range=(-10, 10), title="Data models example")
plot.line("x", "y", source=source, line_width=3, line_alpha=0.6)
callback = CustomJS(args=dict(source=source, params=params), code="""
const data = source.data
const A = params.amp
const k = params.freq
const phi = params.phase
const B = params.offset
const {x, y} = data
for (let i = 0; i < x.length; i++) {
y[i] = A*Math.sin(k*x[i] + phi) + B
}
source.change.emit()
""")
params.js_on_change("amp", callback)
params.js_on_change("freq", callback)
params.js_on_change("phase", callback)
params.js_on_change("offset", callback)
save(plot)
|
11492280
|
import os
from pathlib import Path
def write_nblink(fpath, route):
rpath = Path(route, fpath)
new_name = fpath.name.replace(".ipynb", ".nblink")
fpath = fpath.with_name(new_name)
with open(fpath, "w") as f:
f.write("{ \n")
f.write(f' "path": "{rpath.as_posix()}"\n')
f.write("}\n")
dir_docs = Path("./")
dir_examples_root = Path("../")
dir_examples = Path(dir_examples_root, "examples")
for root, subdirectories, files in os.walk(dir_examples):
for file in files:
fpath = Path(root, file)
if ".ipynb_checkpoints" not in str(fpath) and fpath.suffix == ".ipynb":
# Find path relative to examples root, e.g.: examples/filters/x.ipynb
rpath = fpath.relative_to(dir_examples_root)
# Create a similar directory structure under docs/
rpath.parent.mkdir(parents=True, exist_ok=True)
# Generate and write .nblink file in the created directory structure
num_dirs_deep = len(rpath.parent.parts)
route = Path("../" * num_dirs_deep, dir_examples_root)
write_nblink(rpath, route)
|
11492282
|
import re
import SourceModel.SM_CaseStmt
import SourceModel.SM_Class
import SourceModel.SM_Constants as SMCONSTS
import SourceModel.SM_Define
import SourceModel.SM_Define
import SourceModel.SM_Element
import SourceModel.SM_Exec
import SourceModel.SM_FileResource
import SourceModel.SM_IfStmt
import SourceModel.SM_IncludeResource
import SourceModel.SM_LCOM
import SourceModel.SM_Node
import SourceModel.SM_PackageResource
import SourceModel.SM_ServiceResource
import SourceModel.SM_User
from SmellDetector import Utilities
class SM_File:
def __init__(self, file=""):
if file != "":
curFile = open(file, 'rt', errors='ignore')
self.fileText = curFile.read()
self.resourceBodyText = self.fileText
self.fileName = file
curFile.close()
def setText(self, text):
self.fileText = text
def getNoOfClassDeclarations(self):
return self.countEntityDeclaration(SMCONSTS.CLASS_REGEX, "class")
def getNoOfDefineDeclarations(self):
return self.countEntityDeclaration(SMCONSTS.DEFINE_REGEX, "define")
def getNoOfFileDeclarations(self):
return self.countEntityDeclaration(SMCONSTS.FILE_REGEX, "file")
def getNoOfPackageDeclarations(self):
return self.countEntityDeclaration(SMCONSTS.PACKAGE_REGEX, "package")
def getNoOfServiceDeclarations(self):
return self.countEntityDeclaration(SMCONSTS.SERVICE_REGEX, "service")
def getNoOfExecDeclarations(self):
return self.countEntityDeclaration(SMCONSTS.EXEC_REGEX, "exec")
def getLinesOfCode(self):
counter = self.countEntityDeclaration(SMCONSTS.LOC_REGEX, "newLine")
if counter > 0:
return counter+1
if (len(self.fileText) > 0):
return 1
return 0
def getLinesOfCodeWithoutComments(self):
totalLines = self.getLinesOfCode()
totalCommentsLines = self.getLinesOfComments()
return totalLines - totalCommentsLines
def getLinesOfComments(self):
counter = self.countEntityDeclaration(SMCONSTS.COMMENT_REGEX, "newLine")
return counter
def countEntityDeclaration(self, regEx, entityType):
compiledRE = re.compile(regEx)
Utilities.myPrint("Identified " + entityType + " declarations: " + str(compiledRE.findall(self.fileText)) + \
" Size: " + str(len(compiledRE.findall(self.fileText))))
return len(compiledRE.findall(self.fileText))
def getFileResourceList(self):
compiledRE = re.compile(SMCONSTS.FILE_REGEX)
fileResourceList = []
for match in (compiledRE.findall(self.fileText)):
fileResourceText = self.extractResourceText(match)
Utilities.myPrint("Extracted file declaration: " + fileResourceText)
fileResourceObj = SourceModel.SM_FileResource.SM_FileResource(fileResourceText)
fileResourceList.append(fileResourceObj)
return fileResourceList
def extractResourceText(self, initialString):
index = self.fileText.find(initialString)
if index < 0:
return initialString
compiledRE1 = re.compile(r'\{')
compiledRE2 = re.compile(r'\}')
curBracketCount = len(compiledRE1.findall(initialString)) - len(compiledRE2.findall(initialString))
curIndex = index + len(initialString) + 1
if curBracketCount == 0:
#This is to find the first "{" since currently there is no { which may happen in case of multi-line def
found = False
while curIndex < len(self.fileText) and not found:
if self.fileText[curIndex] == '{':
found = True
curBracketCount = 1
curIndex += 1
while curBracketCount > 0 and curIndex < len(self.fileText):
if self.fileText[curIndex] == '}':
curBracketCount -= 1
if self.fileText[curIndex] == '{':
curBracketCount += 1
curIndex +=1
return self.fileText[index:curIndex]
def getServiceResourceList(self):
compiledRE = re.compile(SMCONSTS.SERVICE_REGEX)
serviceResourceList = []
for match in (compiledRE.findall(self.fileText)):
serviceResourceText = self.extractResourceText(match)
Utilities.myPrint("Extracted service declaration: " + serviceResourceText)
serviceResourceObj = SourceModel.SM_ServiceResource.SM_ServiceResource(serviceResourceText)
serviceResourceList.append(serviceResourceObj)
return serviceResourceList
def getPackageResourceList(self):
compiledRE = re.compile(SMCONSTS.PACKAGE_REGEX)
packageResourceList = []
for match in (compiledRE.findall(self.fileText)):
packageResourceText = self.extractResourceText(match)
Utilities.myPrint("Extracted package declaration: " + packageResourceText)
packageResourceObj = SourceModel.SM_PackageResource.SM_PackageResource(packageResourceText)
packageResourceList.append(packageResourceObj)
return packageResourceList
def getClassDeclarationList(self):
compiledRE = re.compile(SMCONSTS.CLASS_REGEX)
compiledClassNameRE = re.compile(SMCONSTS.CLASS_NAME_REGEX)
classList = []
for match in compiledRE.findall(self.fileText):
className = compiledClassNameRE.findall(match)[0]
#print("Class name: %s" % (className))
classText = self.extractResourceText(match)
Utilities.myPrint("Extracted class declaration: " + classText)
classObj = SourceModel.SM_Class.SM_Class(classText, className)
classList.append(classObj)
return classList
def getDefineDeclarationList(self):
compiledRE = re.compile(SMCONSTS.DEFINE_REGEX)
defineList = []
for match in compiledRE.findall(self.fileText):
defineText, s, e = self.extractElementText(match)
Utilities.myPrint("Extracted define declaration: " + defineText)
defineObj = SourceModel.SM_Define.SM_Define(defineText)
defineList.append(defineObj)
return defineList
def getLCOM(self):
return SourceModel.SM_LCOM.getLCOM(self.getOuterElementList())
def getBodyTextSize(self):
loc = self.getLinesOfCode()
return loc, len(self.resourceBodyText)
def getOuterClassList(self):
outerElementList = self.getOuterElementList()
classList = []
for element in outerElementList:
if type(element) is SourceModel.SM_Class.SM_Class:
classList.append(element)
return classList
def getOuterDefineList(self):
outerElementList = self.getOuterElementList()
defineList = []
for element in outerElementList:
if type(element) is SourceModel.SM_Define.SM_Define:
defineList.append(element)
return defineList
# exElementList = []
# exElementList.extend(self.getElementList(SMCONSTS.DEFINE_REGEX))
# filteredList = self.filterOutInnerElements(exElementList)
# return filteredList
def getOuterElementList(self):
exElementList = []
exElementList.extend(self.getElementList(SMCONSTS.CLASS_REGEX))
exElementList.extend(self.getElementList(SMCONSTS.SERVICE_REGEX))
exElementList.extend(self.getElementList(SMCONSTS.CASE_REGEX))
exElementList.extend(self.getElementList(SMCONSTS.DEFINE_REGEX))
exElementList.extend(self.getElementList(SMCONSTS.EXEC_REGEX))
exElementList.extend(self.getElementList(SMCONSTS.FILE_REGEX))
exElementList.extend(self.getElementList(SMCONSTS.IF_REGEX))
exElementList.extend(self.getElementList(SMCONSTS.PACKAGE_REGEX))
exElementList.extend(self.getElementList(SMCONSTS.USER_REGEX))
filteredList = self.filterOutInnerElements(exElementList)
return filteredList
def getElementList(self, regex):
compiledRE = re.compile(regex)
exElementList = []
for str in (compiledRE.findall(self.fileText)):
elementText, startIndex, endIndex = self.extractElementText(str)
elementObj = self.getElementObject(elementText, regex)
exElementList.append(ExElement(elementObj, startIndex, endIndex))
return exElementList
# TODO: Handle variables
# Unwrap classes from list
def getIncludeClasses(self):
compiledIncludeRE = re.compile(SMCONSTS.DECLARE_INCLUDE_REGEX)
compiledResourceRE = re.compile(SMCONSTS.DECLARE_RESOURCE_REGEX)
declareClassList = []
declareClassName = ""
for match in (compiledIncludeRE.findall(self.fileText)):
#print(match)
declareClassText = match
cleanInclude = re.sub(r'^\s*include \[?(.+)\]?\s*$', r'\1', declareClassText)
#print("Clean include: %s" % cleanInclude)
class_name = r'(?:Class\[)?\'?\:{0,2}([\w\d\:\-_\$]+)\'?\]?'
classRE = re.compile(class_name)
if ',' in cleanInclude:
classes = cleanInclude.split(',')
for c in classes:
for m in classRE.findall(c):
# Find a variable value in text
if m.startswith('$'):
#print("Variable: %s" % m)
varRE = r'(?:^|\n)\s*\$[\w\d\-_]+\s?=\s?\'?\"?([\w\d\-_]+)\'?\"?\n'
compiledVarRE = re.compile(varRE)
for v in (compiledVarRE.findall(self.fileText)):
#print(v)
declareClassName = v
Utilities.myPrint("Extracted include class declaration: " + declareClassText)
declareResourceObj = SourceModel.SM_IncludeResource.SM_IncludeResource(declareClassText, declareClassName)
declareClassList.append(declareResourceObj)
break
#print("Variable %s value)
#print("Extracted class name: %s" % m)
else:
declareClassName = m
Utilities.myPrint("Extracted include class declaration: " + declareClassText)
declareResourceObj = SourceModel.SM_IncludeResource.SM_IncludeResource(declareClassText, declareClassName)
declareClassList.append(declareResourceObj)
else:
for c in classRE.findall(cleanInclude):
#print("Extracted class name: %s" % c)
declareClassName = c
#print("%s" % includeClassText)
Utilities.myPrint("Extracted include class declaration: " + declareClassText)
declareResourceObj = SourceModel.SM_IncludeResource.SM_IncludeResource(declareClassText, declareClassName)
declareClassList.append(declareResourceObj)
for match in (compiledResourceRE.findall(self.fileText)):
#print(match)
declareClassText = match
declareClassName = declareClassText
#print("%s" % includeClassText)
Utilities.myPrint("Extracted resource class declaration: " + declareClassText)
declareResourceObj = SourceModel.SM_IncludeResource.SM_IncludeResource(declareClassText, declareClassName)
declareClassList.append(declareResourceObj)
return declareClassList
def extractElementText(self, initialString):
compiledRE1 = re.compile(r'\{')
compiledRE2 = re.compile(r'\}')
curBracketCount = len(compiledRE1.findall(initialString)) - len(compiledRE2.findall(initialString))
index = self.fileText.find(initialString)
if index < 0:
return initialString, 0, len(initialString)
curIndex = index + len(initialString) + 1
if curBracketCount == 0:
#And now we need to find the corresponding ')' to avoid any errors where curly brackets are matched
#in the parameters itself.
found = False
while curIndex < len(self.fileText) and not found:
if self.fileText[curIndex] == ')':
found = True
curIndex +=1
#This is to find the first "{" since currently there is no { which may happen in case of multi-line class def
found = False
while curIndex < len(self.fileText) and not found:
if self.fileText[curIndex] == '{':
found = True
curBracketCount = 1
curIndex += 1
while curBracketCount > 0 and curIndex < len(self.fileText):
if self.fileText[curIndex] == '}':
curBracketCount -= 1
if self.fileText[curIndex] == '{':
curBracketCount += 1
curIndex +=1
return self.fileText[index:curIndex], index, curIndex
def getElementObject(self, elementText, regex):
if regex == SMCONSTS.CLASS_REGEX:
return SourceModel.SM_Class.SM_Class(elementText)
if regex == SMCONSTS.DEFINE_REGEX:
return SourceModel.SM_Define.SM_Define(elementText)
if regex == SMCONSTS.EXEC_REGEX:
return SourceModel.SM_Exec.SM_Exec(elementText)
if regex == SMCONSTS.FILE_REGEX:
return SourceModel.SM_FileResource.SM_FileResource(elementText)
if regex == SMCONSTS.PACKAGE_REGEX:
return SourceModel.SM_PackageResource.SM_PackageResource(elementText)
if regex == SMCONSTS.SERVICE_REGEX:
return SourceModel.SM_ServiceResource.SM_ServiceResource(elementText)
if regex == SMCONSTS.DECLARE_INCLUDE_REGEX or regex == SMCONSTS.DECLARE_RESOURCE_REGEX:
return SourceModel.SM_IncludeResource.SM_IncludeResource(elementText)
if regex == SMCONSTS.IF_REGEX:
return SourceModel.SM_IfStmt.SM_IfStmt(elementText)
if regex == SMCONSTS.CASE_REGEX:
return SourceModel.SM_CaseStmt.SM_CaseStmt(elementText)
if regex == SMCONSTS.USER_REGEX:
return SourceModel.SM_User.SM_User(elementText)
def sort(self, exClassElementList):
result = []
while len(exClassElementList) > 0:
largest = self.findLargest(exClassElementList)
result.append(largest)
exClassElementList.remove(largest)
return result
def findLargest(self, exClassElementList):
if len(exClassElementList) > 0:
largest = exClassElementList[0]
for item in exClassElementList:
if (item.endIndex - item.startIndex) > (largest.endIndex - item.startIndex):
largest = item
return largest
def filterOutInnerElements(self, exClassElementList):
filteredList = []
exClassElementList = self.sort(exClassElementList)
for element in exClassElementList:
found = False
for filteredItem in filteredList:
if element.startIndex >= filteredItem.startIndex and element.endIndex <= filteredItem.endIndex:
found = True
break
if found == False:
filteredList.append(element)
classElementList = []
for item in filteredList:
classElementList.append(item.elementObj)
return classElementList
def getMaxNestingDepth(self):
maxNestingDepth = 0
curIndex = 0
curBracketCount = 0
while curIndex < len(self.fileText):
if self.fileText[curIndex] == '}':
curBracketCount -= 1
if self.fileText[curIndex] == '{':
curBracketCount += 1
if curBracketCount > maxNestingDepth:
maxNestingDepth = curBracketCount
curIndex +=1
return maxNestingDepth
def getHardCodedStatments(self):
compiledRE = re.compile(SMCONSTS.HARDCODED_VALUE_REGEX)
hardCodedStmtList = compiledRE.findall(self.fileText)
filteredList = []
for item in hardCodedStmtList:
#print(item)
if not (item.__contains__("$") or item.__contains__("Package") or item.__contains__("Service") \
or item.__contains__("File")):
filteredList.append(item)
#print(filteredList)
return filteredList
def getClassHierarchyInfo(self):
classDecls = self.getClassDeclarationList()
classList = []
parentClassList = []
for aClass in classDecls:
classes, pClasses = aClass.getClassHierarchyInfo()
if len(classes) > 0:
classList.append(classes)
if len(pClasses) > 0:
parentClassList.append(pClasses)
return classList, parentClassList
def getNodeDeclarations(self):
compiledRE = re.compile(SMCONSTS.NODE_REGEX)
nodeResourceList = []
for match in (compiledRE.findall(self.fileText)):
nodeResourceText = self.extractResourceText(match)
Utilities.myPrint("Extracted node declaration: " + nodeResourceText)
nodeResourceObj = SourceModel.SM_Node.SM_Node(nodeResourceText)
nodeResourceList.append(nodeResourceObj)
return nodeResourceList
class ExElement(object):
def __init__(self, elementObj, startIndex, endIndex):
self.elementObj = elementObj
self.startIndex = startIndex
self.endIndex = endIndex
|
11492315
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import xgboost as xgb
from xgboost import XGBClassifier, XGBRegressor
from xgboost import plot_importance
from catboost import CatBoostRegressor
from matplotlib import pyplot
import shap
from time import time
from tqdm import tqdm_notebook as tqdm
from collections import Counter
from scipy import stats
import lightgbm as lgb
from sklearn.metrics import cohen_kappa_score, mean_squared_error
from sklearn.model_selection import KFold, StratifiedKFold
import gc
import json
from random import choice
import tensorflow as tf
from sklearn.preprocessing import StandardScaler, MinMaxScaler, OneHotEncoder
pd.set_option('display.max_columns', 1000)
# fast cappa eval function for lgb
def eval_qwk_lgb_regr(y_true, y_pred):
dist = Counter(reduce_train['accuracy_group'])
for k in dist:
dist[k] /= len(reduce_train)
reduce_train['accuracy_group'].hist()
acum = 0
bound = {}
for i in range(3):
acun += dist[i]
bound[i] = np.percentile(y_pred, acum * 100)
def classify(x):
if x <= bound[0]:
return 0
elif x<= bound[1]:
return 1
elif x <= bound[2]:
return 2
else :
return 3
y_pred = np.array(list(map(classify, y_pred))).reshape(y_true.shape)
return 'cappa', cohen_kappa_score(y_true, y_pred, weights = 'quadratic'), True
def cohenkappa(y_pred, y):
y = y.get_label().astype("int")
ypred = ypred.reshape((4, -1)).argmax(axis = 0)
loss = cohen_kappa_score(y, y_pred, weights = 'quadradtic')
return "cappa", loss, True
def read_data():
print('Reading train.csv file...')
train = pd.read_csv('train.csv')
print('Reading test.csv file...')
test = pd.read_csv('test.csv')
print('Reading train_labels.csv file...')
train_labels = pd.read_csv('train_labels.csv')
print('Reading specs.csv file...')
specs = pd.read_csv('specs.csv')
print('Reading sample_submission.csv file...')
sample_submission = pd.read_csv('sample_submission')
return train, test, train_labels, specs, sample_submission
def encode_title(train, test, train_labels):
# encode title
train['title_event_code'] = list(map(lambda x,y : str(x) + '_' +str(y), train['title'], train['event_code']))
test['title_event_code'] = list(map(lambda x,y : str(x) + '_' + str(y), test['title'], test['event_code']))
all_title_event_code = list(set(train['title_event_code'].unique()). union(test["title_event_code"].unique()))
# make a list with all unique "titles"
list_of_user_activities = list(set(train['title'].unique()).union(set(test['title'].unique())))
# make a list with all the unique 'event code'
list_of_event_code = list(set(train['event_code'].unique()).union(set(test['event_code'].unique())))
list_of_event_id = list(set(train['event_id'].unique()).union(set(test['event_id'].unique())))
# make a list with all the unique worlds
list_of_worlds = list(set(train['world'].unique()).union(set(test['world'].unique())))
# create a dictionat numerating the titles
activities_map = dict(zip(list_of_user_activities, np.arange(len(list_of_user_activities))))
activities_labels = dict(zip(np.arange(len(list_of_user_activities)), list_of_user_activities))
activities_world = dict(zip(list_of_worlds, np.arange(len(list_of_worlds))))
assess_titles = list(set(train[train['type'] == 'Assessment']['title'].value_counts().index).union(
set(test[test['type'] == 'Assessment']['title'].value_counts().index)))
# replace the text titles with numbers
train['title'] = train['title'].map(activities_map)
test['title'] = test['title'].map(activities_map)
train['world'] = train['world'].map(activities_world)
test['world'] = test['world'].map(activities_world)
train_labels['title'] = train_labels['title'].map(activities_map)
win_code = dict(zip(activities_map.values(), (4100 * np.ones(len(activities_map))).astype('int')))
win_code[activities_map['Bird Measurer (Assessment)']] = 4110
# convert text into datetime
train['timestamp'] = pd.to_datetime(train['timestamp'])
test['timestamp'] = pd.to_datetime(test['timestamp'])
return train, test, train_labels, win_code, list_of_user_activities, list_of_event_code, activities_labels, assess_titles, list_of_event_id, all_title_event_code
# this is the function that convert the raw data into processed features
def get_data(user_sample, test_set=False):
# Constants and parameters declaration
last_activity = 0
user_activities_count = {'Clip': 0, 'Activity': 0, 'Assessment': 0, 'Game': 0}
# new features: time spent in each activity
last_session_time_sec = 0
accuracy_groups = {0: 0, 1: 0, 2: 0, 3: 0}
all_assessments = []
accumulated_accuracy_group = 0
accumulated_accuracy = 0
accumulated_correct_attempts = 0
accumulated_uncorrect_attempts = 0
accumulated_actions = 0
counter = 0
time_first_activity = float(user_sample['timestamp'].values[0])
durations = []
last_accuracy_title = {'acc_' + title: -1 for title in assess_titles}
event_code_count: Dict[str, int] = {ev: 0 for ev in list_of_event_code}
event_id_count: Dict[str, int] = {eve: 0 for eve in list_of_event_id}
title_count: Dict[str, int] = {eve: 0 for eve in activities_labels.values()}
title_event_code_count: Dict[str, int] = {t_eve: 0 for t_eve in all_title_event_code}
# last features
sessions_count = 0
# itarates through each session of one instalation_id
for i, session in user_sample.groupby('game_session', sort=False):
# i = game_session_id
# session is a DataFrame that contain only one game_session
# get some sessions information
session_type = session['type'].iloc[0]
session_title = session['title'].iloc[0]
session_title_text = activities_labels[session_title]
# for each assessment, and only this kind off session, the features below are processed
# and a register are generated
if (session_type == 'Assessment') & (test_set or len(session) > 1):
# search for event_code 4100, that represents the assessments trial
all_attempts = session.query(f'event_code == {win_code[session_title]}')
# then, check the numbers of wins and the number of losses
true_attempts = all_attempts['event_data'].str.contains('true').sum()
false_attempts = all_attempts['event_data'].str.contains('false').sum()
# copy a dict to use as feature template, it's initialized with some itens:
# {'Clip':0, 'Activity': 0, 'Assessment': 0, 'Game':0}
features = user_activities_count.copy()
features.update(last_accuracy_title.copy())
features.update(event_code_count.copy())
features.update(event_id_count.copy())
features.update(title_count.copy())
features.update(title_event_code_count.copy())
features.update(last_accuracy_title.copy())
features['installation_session_count'] = sessions_count
variety_features = [('var_event_code', event_code_count),
('var_event_id', event_id_count),
('var_title', title_count),
('var_title_event_code', title_event_code_count)]
for name, dict_counts in variety_features:
arr = np.array(list(dict_counts.values()))
features[name] = np.count_nonzero(arr)
# get installation_id for aggregated features
features['installation_id'] = session['installation_id'].iloc[-1]
# add title as feature, remembering that title represents the name of the game
features['session_title'] = session['title'].iloc[0]
# the 4 lines below add the feature of the history of the trials of this player
# this is based on the all time attempts so far, at the moment of this assessment
features['accumulated_correct_attempts'] = accumulated_correct_attempts
features['accumulated_uncorrect_attempts'] = accumulated_uncorrect_attempts
accumulated_correct_attempts += true_attempts
accumulated_uncorrect_attempts += false_attempts
# the time spent in the app so far
if durations == []:
features['duration_mean'] = 0
features['duration_std'] = 0
else:
features['duration_mean'] = np.mean(durations)
features['duration_std'] = np.std(durations)
durations.append((session.iloc[-1, 2] - session.iloc[0, 2]).seconds)
# the accurace is the all time wins divided by the all time attempts
features['accumulated_accuracy'] = accumulated_accuracy / counter if counter > 0 else 0
accuracy = true_attempts / (true_attempts + false_attempts) if (true_attempts + false_attempts) != 0 else 0
accumulated_accuracy += accuracy
last_accuracy_title['acc_' + session_title_text] = accuracy
# a feature of the current accuracy categorized
# it is a counter of how many times this player was in each accuracy group
if accuracy == 0:
features['accuracy_group'] = 0
elif accuracy == 1:
features['accuracy_group'] = 3
elif accuracy == 0.5:
features['accuracy_group'] = 2
else:
features['accuracy_group'] = 1
features.update(accuracy_groups)
accuracy_groups[features['accuracy_group']] += 1
# mean of the all accuracy groups of this player
features['accumulated_accuracy_group'] = accumulated_accuracy_group / counter if counter > 0 else 0
accumulated_accuracy_group += features['accuracy_group']
# how many actions the player has done so far, it is initialized as 0 and updated some lines below
features['accumulated_actions'] = accumulated_actions
# there are some conditions to allow this features to be inserted in the datasets
# if it's a test set, all sessions belong to the final dataset
# it it's a train, needs to be passed throught this clausule: session.query(f'event_code == {win_code[session_title]}')
# that means, must exist an event_code 4100 or 4110
if test_set:
all_assessments.append(features)
elif true_attempts + false_attempts > 0:
all_assessments.append(features)
counter += 1
sessions_count += 1
# this piece counts how many actions was made in each event_code so far
def update_counters(counter: dict, col: str):
num_of_session_count = Counter(session[col])
for k in num_of_session_count.keys():
x = k
if col == 'title':
x = activities_labels[k]
counter[x] += num_of_session_count[k]
return counter
event_code_count = update_counters(event_code_count, "event_code")
event_id_count = update_counters(event_id_count, "event_id")
title_count = update_counters(title_count, 'title')
title_event_code_count = update_counters(title_event_code_count, 'title_event_code')
# counts how many actions the player has done so far, used in the feature of the same name
accumulated_actions += len(session)
if last_activity != session_type:
user_activities_count[session_type] += 1
last_activitiy = session_type
# if it't the test_set, only the last assessment must be predicted, the previous are scraped
if test_set:
return all_assessments[-1]
# in the train_set, all assessments goes to the dataset
return all_assessments
# data split
def get_train_and_test(train_test):
compiled_train = []
compiled_test = []
for i, (ins_id, user_sample) in tqdm(enumerate(train.groupby('installation_id', sort = False)), total = 17000):
compiled_train += get_data(user_sample)
for ins_id, user_sample in tqdm(test.groupby('installation_id', sort=False), total=1000):
test_data = get_data(user_sample, test_set=True)
compiled_test.append(test_data)
reduce_train = pd.DataFrame(compiled_train)
reduce_test = pd.DataFrame(compiled_test)
categoricals = ['session_title']
return reduce_train, reduce_test, categoricals
class Base_Model(object):
def __init__(self, train_df, test_df, features, categoricals = [], n_splits = 5, verbose = True):
self.train_df = train_df
self.test_df = test_df
self.features = features
self.categoricals = categoricals
self.target = 'accuracy_group'
self.cv = self.get_cv()
self.verbose = verbose
self.params = self.get_params()
self.y_pred, self.score, self.model = self.fit()
def train_model(self, train_set, val_set):
raise NotImplementedError
def get_cv(self):
cv = StratifiedKFold(n_splits = slef.n_splits, shuffle = True, random_state = 42)
return cv.split(self.train_df, self.train_df[self.target])
def get_params(self):
raise NotImplementedError
def convert_dataset(self, x_train, y_train, x_val, y_val):
raise NotImplementedError
def convert_x(self, x):
return x
def fit(self):
oof_pred = np.zeros((len(reduce_train), ))
y_pred = np.zeros((len(reduce_test), ))
for fold, (train_idx, val_idx) in enumerate(self.cv):
x_train, x_val = self.train_df[self.features].iloc[train_idx], self.train_df[self.features].iloc[val_idx]
y_train, y_val = self.train_df[self.target][train_idx], self.train_df[self.target][val_idx]
train_set, val_set = self.convert_dataset(x_train, y_train, x_val, y_val)
model = self.train_model(train_set, val_set)
conv_x_val = self.convert_x(x_val)
oof_pred[val_idx] = model.predict(conv_x_val).reshape(oof_pred[val_idx].shape)
x_test = self.convert_x(self.test_df[self.features])
y_pred += model.predict(x_test).reshape(y_pred.shape) / self.n_splits
print('Partial score of fold {} is: {}'.format(fold, eval_qwk_lgb_regr(y_val, oof_pred[val_idx])[1]))
_, loss_score, _ = eval_qwk_lgb_regr(self.train_df[self.target], oof_pred)
if self.verbose:
print('Our oof cohen kappa score is: ', loss_score)
return y_pred, loss_score, model
# light GBM model
class Lgb_Model(Base_Model):
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
return lgb.train(self.params, train_set, valid_sets=[train_set, val_set], verbose_eval=verbosity)
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = lgb.Dataset(x_train, y_train, categorical_feature=self.categoricals)
val_set = lgb.Dataset(x_val, y_val, categorical_feature=self.categoricals)
return train_set, val_set
def get_params(self):
params = {'n_estimators': 5000,
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'rmse',
'subsample': 0.75,
'subsample_freq': 1,
'learning_rate': 0.01,
'feature_fraction': 0.9,
'max_depth': 15,
'lambda_l1': 1,
'lambda_l2': 1,
'early_stopping_rounds': 100
}
return params
class Xgb_Model(Base_Model):
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
return xgb.train(self.params, train_set, num_boost_round = 5000, evals = [(train_set, 'train'), (val_set, 'val')], verbose_eval = verbosity, early_stopping_rounds = 100)
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = xgb.DMatrix(x_train, y_val)
val_set = xgb.DMatrix(x_val, y_val)
return train_set, val_set
def convert_x(self, x):
return xgb.DMatrix(x)
def get_params(self):
params = {'colsample_bytree': 0.8,
'learning_rate': 0.01,
'max_depth': 10,
'subsample': 1,
'objective': 'reg:squarederror',
# 'eval_metric':'rmse',
'min_child_weight': 3,
'gamma': 0.25,
'n_estimators': 5000}
return params
# CatBoost Model
class Catb_Model(Base_Model):
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
clf = CatBoostRegressor(**self.params)
clf.fit(train_set['X'],
train_set['y'],
eval_set = (val_set['X'], val_set['y']),
verbose = verbosity,
cat_features = self.categoricals)
return clf
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = {'X' : x_train, 'y' : y_val}
val_set = {'X': x_val, 'y' : y_val}
return train_set, val_set
def get_params(self):
params = {'loss_function': 'RMSE',
'task_type': "CPU",
'iterations': 5000,
'od_type': "Iter",
'depth': 10,
'colsample_bylevel': 0.5,
'early_stopping_rounds': 300,
'l2_leaf_reg': 18,
'random_seed': 42,
'use_best_model': True
}
return params
class Nn_Model(Base_Model):
def __init__(self, train_df, test_df, features, categoricals = [], n_split = 5, verbose = True):
features = features.copy()
if len(categoricals) > 0 :
for cat in categoricals:
enc = OneHotEncoder()
train_cats = enc.fit_transform(train_df[[cat]])
test_cats = enc.transform(test_df[[cat]])
cal_cols = ['{}_{}'.format(cat, str(col)) for col in enc.activate_features_]
features += cat_cols
train_cats = pd.DataFrame(train_cats.toarray(), columns = cat_cols)
test_cats = pd.DataFrame(test_cats.toarray(), columns = cat_cols)
train_df = pd.concat([train_df, train_cats], axis = 1)
test_df = pd.concat([test_df, test_cats], axis = 1)
scaler = MinMaxScaler()
train_df[features] = scaler.fit_transform(train_df[features])
test_df[features] = scaler.fit_transform(test_df[features])
print(train_df[features].shape)
super().__init__(train_df, test_df, features, categoricals, n_splits, verbose)
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
model = tf.keras.models.Sequential([
tf.keras.layers.Input(shape = (train_set['X'].shape[1],))
tf.keras.layers.Dense(200, activation = 'relu')
tf.keras.layers.LayerNormalization(),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(100, activation = 'relu'),
tf.keras.layers.LayerNormalization(),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(50, activation = 'relu'),
tf.keras.layers.LayerNormalization(),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(25, activation = 'relu'),
tf.keras.layers.LayerNormalization(),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(1, activation = 'relu')
])
model.compile(optimizer = tf.keras.optimizers.Adam(learnig_rate = 4e-4), loss = 'mse')
print(model.summary())
save_best = tf.keras.callbacks.ModelCheckpoint('nn_model.w8', save_weights_only=True, save_best_only=True,
verbose=1)
early_stop = tf.keras.callbacks.EarlyStopping(patience=20)
model.fit(train_set['X'],
train_set['y'],
validation_data=(val_set['X'], val_set['y']),
epochs=100,
callbacks=[save_best, early_stop])
model.load_weights('nn_model.w8')
return model
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = {'X': x_train, 'y': y_train}
val_set = {'X': x_val, 'y': y_val}
return train_set, val_set
def get_params(self):
return None
class Cnn_Model(Base_Model):
def __init__(self, train_df, test_df, features, categoricals = [], n_splits = 5, verbose = True):
features = features.copy()
if len(categoricals) > 0:
for cat in categoricals:
enc = OneHotEncoder()
train_cats = enc.fit_transform(train_df[[cat]])
test_cats = enc.transform(test_df[[cat]])
cat_cols = ['{}_{}'.format(cat, str(col)) for col in enc.active_features_]
features += cat_cols
train_cats = pd.DataFrame(train_cats.toarray(), columns=cat_cols)
test_cats = pd.DataFrame(test_cats.toarray(), columns=cat_cols)
train_df = pd.concat([train_df, train_cats], axis=1)
test_df = pd.concat([test_df, test_cats], axis=1)
# scaler
scalar = MinMaxScaler()
train_df[features] = scalar.fit_transform(train_df[features])
test_df[features] = scalar.transform(test_df[features])
self.create_feat_2d(features)
super().__init__(train_df, test_df, features, categoricals, n_splits, verbose)
def create_feat_2d(self, features, n_feats_repeat=50):
self.n_feats = len(features)
self.n_feats_repeat = n_feats_repeat
self.mask = np.zeros((self.n_feats_repeat, self.n_feats), dtype=np.int32)
for i in range(self.n_feats_repeat):
l = list(range(self.n_feats))
for j in range(self.n_feats):
c = l.pop(choice(range(len(l))))
self.mask[i, j] = c
self.mask = tf.convert_to_tensor(self.mask)
print(self.mask.shape)
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
inp = tf.keras.layers.Input(shape=(self.n_feats))
x = tf.keras.layers.Lambda(lambda x: tf.gather(x, self.mask, axis=1))(inp)
x = tf.keras.layers.Reshape((self.n_feats_repeat, self.n_feats, 1))(x)
x = tf.keras.layers.Conv2D(18, (50, 50), strides=50, activation='relu')(x)
x = tf.keras.layers.Flatten()(x)
# x = tf.keras.layers.Dense(200, activation='relu')(x)
# x = tf.keras.layers.LayerNormalization()(x)
# x = tf.keras.layers.Dropout(0.3)(x)
x = tf.keras.layers.Dense(100, activation='relu')(x)
x = tf.keras.layers.LayerNormalization()(x)
x = tf.keras.layers.Dropout(0.3)(x)
x = tf.keras.layers.Dense(50, activation='relu')(x)
x = tf.keras.layers.LayerNormalization()(x)
x = tf.keras.layers.Dropout(0.3)(x)
out = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inp, out)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4), loss='mse')
print(model.summary())
save_best = tf.keras.callbacks.ModelCheckpoint('nn_model.w8', save_weights_only=True,
save_best_only=True, verbose=1)
early_stop = tf.keras.callbacks.EarlyStopping(patience=20)
model.fit(train_set['X'],
train_set['y'],
validation_data=(val_set['X'], val_set['y']),
epochs=100,
callbacks=[save_best, early_stop])
model.load_weights('nn_model.w8')
return model
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = {'X': x_train, 'y': y_train}
val_set = {'X': x_val, 'y': y_val}
return train_set, val_set
def get_params(self):
return None
# read data
train, test, train_labels, specs, sample_submission = read_data()
# get usefull dict with maping encode
train, test, train_labels, win_code, list_of_user_activities, list_of_event_code, activities_labels, assess_titles, list_of_event_id, all_title_event_code = encode_title(
train, test, train_labels)
# tranform function to get the train and test set
reduce_train, reduce_test, categoricals = get_train_and_test(train, test)
# feature engineering function
features = reduce_train.loc[(reduce_train.sum(axis=1) != 0), (reduce_train.sum(axis=0) != 0)].columns # delete useless columns
features = [x for x in features if x not in ['accuracy_group', 'installation_id']]
counter = 0
to_remove = []
for feat_a in features:
for feat_b in features:
if feat_a != feat_b and feat_a not in to_remove and feat_b not in to_remove:
c = np.corrcoef(reduce_train[feat_a], reduce_train[feat_b])[0][1]
if c > 0.995:
counter += 1
to_remove.append(feat_b)
print('{}: FEAT_A: {} FEAT_B: {} - Correlation: {}'.format(counter, feat_a, feat_b, c))
to_exclude = []
ajusted_test = reduce_test.copy()
for feature in ajusted_test.columns:
if feature not in ['accuracy_group', 'installation_id', 'accuracy_group', 'session_title']:
data = reduce_train[feature]
train_mean = data.mean()
data = ajusted_test[feature]
test_mean = data.mean()
try:
error = stract_hists(feature, adjust=True)
ajust_factor = train_mean / test_mean
if ajust_factor > 10 or ajust_factor < 0.1:# or error > 0.01:
to_exclude.append(feature)
print(feature, train_mean, test_mean, error)
else:
ajusted_test[feature] *= ajust_factor
except:
to_exclude.append(feature)
print(feature, train_mean, test_mean)
features = [x for x in features if x not in (to_exclude + to_remove)]
# modeling
#cat_model = Catb_Model(reduce_train, ajusted_test, features, categoricals=categoricals)
lgb_model = Lgb_Model(reduce_train, ajusted_test, features, categoricals=categoricals)
xgb_model = Xgb_Model(reduce_train, ajusted_test, features, categoricals=categoricals)
#cnn_model = Cnn_Model(reduce_train, ajusted_test, features, categoricals=categoricals)
nn_model = Nn_Model(reduce_train, ajusted_test, features, categoricals=categoricals)
weights = {'lbg': 0.60, 'cat': 0, 'xgb': 0.20, 'nn': 0.20}
final_pred = (lgb_model.y_pred * weights['lbg']) + (xgb_model.y_pred * weights['xgb']) + (nn_model.y_pred * weights['nn'])
#final_pred = cnn_model.y_pred
print(final_pred.shape)
dist = Counter(reduce_train['accuracy_group'])
for k in dist:
dist[k] /= len(reduce_train)
reduce_train['accuracy_group'].hist()
acum = 0
bound = {}
for i in range(3):
acum += dist[i]
bound[i] = np.percentile(final_pred, acum * 100)
print(bound)
def classify(x):
if x <= bound[0]:
return 0
elif x <= bound[1]:
return 1
elif x <= bound[2]:
return 2
else:
return 3
final_pred = np.array(list(map(classify, final_pred)))
sample_submission['accuracy_group'] = final_pred.astype(int)
sample_submission.to_csv('submission.csv', index=False)
sample_submission['accuracy_group'].value_counts(normalize=True)
sample_submission.to_csv('submission_1', header = False, index = False)
|
11492324
|
import pytest
from collections import namedtuple
from stack.kvm import Hypervisor, VmException
from unittest.mock import create_autospec, patch
from stack.commands import DatabaseConnection
from stack.argument_processors.vm import VmArgProcessor
from stack.commands.set.host.power.imp_kvm import Implementation
from stack.commands.set.host.power import Command
class TestKVMImp:
mock_tuple = namedtuple('output', 'out debug success')
def mock_vm_exception(self, *args):
raise VmException('Oh no something went wrong!')
@pytest.fixture
def kvm_imp(self):
"""
A fixture for mocking the Implementation instance
"""
mock_command = create_autospec(
spec = Command,
instance = True
)
mock_command.db = create_autospec(
spec = DatabaseConnection,
spec_set = True,
instance = True
)
return Implementation(mock_command)
@patch.object(target = Implementation, attribute = 'get_hypervisor_by_name', autospec = True)
def test_no_kvm_host(self, mock_get_hypervisor_name, kvm_imp):
mock_get_hypervisor_name.return_value = None
output = kvm_imp.run(args = ['foo', 'on', self.mock_tuple])
assert output == self.mock_tuple('', 'No KVM host specified for virtual machine "foo"', False)
@patch('stack.kvm.Hypervisor', autospec = True)
def test_kvm_imp_on(self, mock_hypervisor, kvm_imp):
hypervisor = mock_hypervisor.return_value.__enter__.return_value
hypervisor.start_domain.return_value = None
output = kvm_imp.run(args = ['foo', 'on', self.mock_tuple])
hypervisor.start_domain.assert_called_once_with('foo')
assert output == self.mock_tuple('', '', True)
@patch('stack.kvm.Hypervisor', autospec = True)
def test_kvm_imp_off(self, mock_hypervisor, kvm_imp):
hypervisor = mock_hypervisor.return_value.__enter__.return_value
hypervisor.stop_domain.return_value = None
output = kvm_imp.run(args = ['foo', 'off', self.mock_tuple])
hypervisor.stop_domain.assert_called_once_with('foo')
assert output == self.mock_tuple('', '', True)
@patch('stack.kvm.Hypervisor', autospec = True)
def test_kvm_imp_reset(self, mock_hypervisor, kvm_imp):
hypervisor = mock_hypervisor.return_value.__enter__.return_value
hypervisor.start_domain.return_value = None
hypervisor.stop_domain.return_value = None
output = kvm_imp.run(args = ['foo', 'reset', self.mock_tuple])
hypervisor.stop_domain.assert_called_once_with('foo')
hypervisor.start_domain.assert_called_once_with('foo')
assert output == self.mock_tuple('', '', True)
VM_STATUS_DEFINED = [
([{'status': 'on'}], mock_tuple('Chassis Power is on', '', True)),
([{'status': 'off'}], mock_tuple('Chassis Power is off', '', True)),
([{'status': 'undefined'}], mock_tuple('', f'Cannot find host foo defined on hypervisor', False)),
([{'status': 'Connection failed'}], mock_tuple('', f'Cannot find host foo defined on hypervisor', False))
]
@patch('stack.kvm.Hypervisor', autospec = True)
@pytest.mark.parametrize('return_status, output', VM_STATUS_DEFINED)
def test_kvm_imp_status(self, mock_hypervisor, kvm_imp, return_status, output):
kvm_imp.owner.call.return_value = return_status
run_output = kvm_imp.run(args = ['foo', 'status', self.mock_tuple])
kvm_imp.owner.call.assert_called_once()
assert all([
output.out in run_output.out,
output.debug in run_output.debug,
output.success == run_output.success
])
@patch('stack.kvm.Hypervisor', autospec = True)
def test_kvm_imp_vm_exception(self, mock_hypervisor, kvm_imp):
hypervisor = mock_hypervisor.return_value.__enter__.return_value
hypervisor.start_domain.side_effect = self.mock_vm_exception
output = kvm_imp.run(args = ['foo', 'on', self.mock_tuple])
hypervisor.start_domain.assert_called_once_with('foo')
assert output == self.mock_tuple('', 'Oh no something went wrong!', False)
|
11492346
|
import argparse
import collections
import os
import pickle
import pytest
import re
import signal
import subprocess
import sys
import time
import numpy as np
import cntk as C
TIMEOUT_SECONDS = 300
NUM_WORKERS = 4
NUM_BATCHES = 10
BATCH_SIZE_PER_WORKER = 20
def mpiexec_execute(script, mpiexec_params, params, timeout_seconds=TIMEOUT_SECONDS):
cmd = ['mpiexec'] + mpiexec_params + ['python', script] + params
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
if sys.version_info[0] < 3:
out = p.communicate()[0]
else:
try:
out = p.communicate(timeout=timeout_seconds)[0] # in case we have a hang
except subprocess.TimeoutExpired:
os.kill(p.pid, signal.CTRL_C_EVENT)
raise RuntimeError('Timeout in mpiexec, possibly hang')
str_out = out.decode(sys.getdefaultencoding())
return str_out
BlockMomentumConfig = collections.namedtuple('BlockMomentumConfig', 'block_momentum_as_time_constant block_learning_rate block_size distributed_after')
DataParallelConfig = collections.namedtuple('DataParallelConfig', 'num_quantization_bits distributed_after')
class SimpleTrainer:
def __init__(self, mode, config):
self.create_model()
self.create_trainer(mode, config)
def create_model(self):
self.input_dim = 1000
self.embed_dim = 30
i = C.input_variable((self.input_dim,), is_sparse=True)
self.p = C.parameter(shape=(self.input_dim, self.embed_dim), init=1)
o = C.times(i, self.p)
self.z = C.reduce_sum(o)
def create_trainer(self, mode, config):
learner = self.create_distributed_learner(mode, config)
self.trainer = C.Trainer(self.z, (self.z, None), learner, []) if learner else None
def create_distributed_learner(self, mode, config):
local_learner = C.sgd(self.z.parameters, C.learning_parameter_schedule_per_sample(0.01))
try:
if mode == 'data_parallel':
if config is None:
config = DataParallelConfig(num_quantization_bits=32, distributed_after=0)
learner = C.data_parallel_distributed_learner(local_learner, num_quantization_bits=config.num_quantization_bits, distributed_after=config.distributed_after)
elif mode == 'block_momentum':
if config is None:
# the default config to match data parallel SGD
config = BlockMomentumConfig(block_momentum_as_time_constant=0, block_learning_rate=1, block_size=NUM_WORKERS, distributed_after=0)
learner = C.block_momentum_distributed_learner(local_learner, block_momentum_as_time_constant=config.block_momentum_as_time_constant, block_learning_rate=config.block_learning_rate, block_size=config.block_size, distributed_after=config.distributed_after)
else:
learner = local_learner
except RuntimeError:
learner = None
return learner
def train_minibatch(self, input_indices):
data = C.Value.one_hot(input_indices, num_classes=self.input_dim)
self.trainer.train_minibatch(data)
def set_np_random_seed(rank, batch):
np.random.seed(rank + 10 * batch)
def distributed_worker(outdir, gpu, mode, config):
if gpu:
# test with only one GPU
C.try_set_default_device(C.gpu(0))
else:
# CPU sparse aggregation is not implemented, so turn it off
# note we only need to explicitly do this when running with CPU device on a GPU build
# For CPU build it's disabled by default
C.cntk_py.use_sparse_gradient_aggregation_in_data_parallel_sgd(False)
trainer = SimpleTrainer(mode, config)
for batch in range(NUM_BATCHES):
set_np_random_seed(C.Communicator.rank(), batch)
indices = (np.random.random((BATCH_SIZE_PER_WORKER,))*(trainer.input_dim-1)).astype(np.int)
trainer.train_minibatch(indices)
checkpoint_file = os.path.join(outdir, mode+str(batch))
trainer.trainer.save_checkpoint(checkpoint_file)
trainer.trainer.restore_from_checkpoint(checkpoint_file)
# save a checkpoint to force sync after last minibatch
trainer.trainer.save_checkpoint(os.path.join(outdir, mode+'_last'))
np.save(os.path.join(outdir, mode+str(C.Communicator.rank())), trainer.p.value)
TRAINING_SETTINGS = [
('data_parallel', None),
('block_momentum', None),
('block_momentum', BlockMomentumConfig(block_momentum_as_time_constant=4000, block_learning_rate=2, block_size=NUM_WORKERS*BATCH_SIZE_PER_WORKER*3, distributed_after=NUM_WORKERS*BATCH_SIZE_PER_WORKER*2)),
('data_parallel', DataParallelConfig(num_quantization_bits=1, distributed_after=NUM_WORKERS*BATCH_SIZE_PER_WORKER*2)),
]
@pytest.mark.parametrize("mode, config", TRAINING_SETTINGS)
def test_distributed_training_accuracy(tmpdir, device_id, mode, config):
ref_trainer = SimpleTrainer(None, None)
# test if mode is available
if not ref_trainer.create_distributed_learner(mode, config):
pytest.skip("unsupported distributed learner mode")
# run distributed training and check if all workers get the same model
launch_args = ['--outputdir', str(tmpdir), '--mode', mode]
if config:
config_filename = os.path.join(str(tmpdir),'config.pkl')
with open(config_filename, 'wb') as pkl:
pickle.dump(config, pkl)
launch_args += ['--config', config_filename]
if device_id >= 0:
launch_args += ['--gpu']
mpiexec_execute(__file__, ['-n', str(NUM_WORKERS)], launch_args)
p0 = np.load(os.path.join(str(tmpdir), mode+'0.npy'))
for rank in range(NUM_WORKERS):
p = np.load(os.path.join(str(tmpdir), mode+str(rank)+'.npy'))
assert np.allclose(p0, p)
# only compares with single worker with default config
if config is not None:
return
# reference training on single worker, by concatenating data on all workers
for batch in range(NUM_BATCHES):
indices = None
for rank in range(NUM_WORKERS):
set_np_random_seed(rank, batch)
rank_indices = (np.random.random((BATCH_SIZE_PER_WORKER,))*(ref_trainer.input_dim-1)).astype(np.int)
indices = np.concatenate([indices, rank_indices]) if indices is not None else rank_indices
ref_trainer.train_minibatch(indices)
assert np.allclose(p0, ref_trainer.p.value)
#mpiexec entrance
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-outputdir', '--outputdir')
parser.add_argument('-mode', '--mode')
parser.add_argument('-gpu', '--gpu', action='store_true')
parser.add_argument('-config', '--config', required=False, default=None)
args = vars(parser.parse_args())
config = None
if args['config'] is not None:
with open(args['config'], 'rb') as pkl:
config = pickle.load(pkl)
distributed_worker(args['outputdir'], args['gpu'], args['mode'], config)
C.Communicator.finalize()
|
11492350
|
name = 'medcat'
# Hacky patch to the built-in copy module coz otherwise, thinc.config.Config.copy will fail on Python <= 3.6.
# (fixed in python 3.7 https://docs.python.org/3/whatsnew/3.7.html#re)
import sys # noqa
if sys.version_info.major == 3 and sys.version_info.minor <= 6:
import copy
import re
copy._deepcopy_dispatch[type(re.compile(''))] = lambda r, _: r # type: ignore
|
11492416
|
from django.urls import include
from django.views import defaults
__all__ = ['handler400', 'handler403', 'handler404', 'handler500', 'include']
handler400 = defaults.bad_request
handler403 = defaults.permission_denied
handler404 = defaults.page_not_found
handler500 = defaults.server_error
|
11492429
|
import tensorflow as tf
import numpy as np
import coremltools as ct
from coremltools.models.neural_network import quantization_utils
import coremltools.proto.FeatureTypes_pb2 as FeatureTypes_pb2
path = "/Users/william/Downloads/AttGAN_384/generator.pb"
# Load the protobuf file from the disk and parse it to retrieve the
# graph_def
with tf.io.gfile.GFile(path, "rb") as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
# Import the graph_def into a new Graph
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="generator")
x = np.random.rand(1, 384, 384, 3)
y = np.ones([1, 13], dtype=float)
with tf.compat.v1.Session(graph = graph) as sess:
tf_out = sess.run('generator/xb:0',
feed_dict={'generator/xa:0': x, 'generator/b_:0':y})
mlmodel = ct.convert(graph, source="tensorflow",
inputs=[ct.TensorType(shape=x.shape), ct.TensorType(shape=y.shape)])
model_fp8 = quantization_utils.quantize_weights(mlmodel, nbits=8)
spec = model_fp8.get_spec()
input = spec.description.input[1]
input.type.imageType.colorSpace = FeatureTypes_pb2.ImageFeatureType.RGB
input.type.imageType.height = 384
input.type.imageType.width = 384
ct.utils.rename_feature(spec, 'generator/xa', 'input')
ct.utils.rename_feature(spec, 'generator/b_', 'style')
ct.utils.rename_feature(spec, 'generator/xb', 'output')
# ct.utils.save_spec(spec, "attgen8.mlmodel")
model_fp8._spec = spec
model_fp8.save("attgen8.mlmodel")
# Core ML model prediction
# coreml_out_dict = model_fp8.predict({"input" : x, "style": y}, useCPUOnly=True)
# coreml_out = list(coreml_out_dict.values())[0]
# np.testing.assert_allclose(tf_out, coreml_out, rtol=1e-3, atol=1e-2)
|
11492446
|
from generativepy.drawing import make_image, setup
from generativepy.color import Color
from generativepy.geometry import Circle, Square, Text
'''
Create bezier curve using the geometry module.
'''
def draw(ctx, width, height, frame_no, frame_count):
setup(ctx, width, height, width=5, background=Color(0.8))
# Create a circular clip region and draw some squares in it
ctx.save()
Circle(ctx).of_center_radius((1.9, 1.9), 1).clip()
Square(ctx).of_corner_size((1, 1), .8).fill(Color('red'))
Square(ctx).of_corner_size((1, 2), .8).fill(Color('green'))
Square(ctx).of_corner_size((2, 1), .8).fill(Color('blue'))
Square(ctx).of_corner_size((2, 2), .8).fill(Color('black'))
ctx.restore()
ctx.save()
Text(ctx).of("ABC", (1.5, 3.5)).font("Times").size(1.5).align_left().align_top().clip()
circles = [(2, 3.8, 'orange'), (2, 4.5, 'cyan'), (3, 3.8, 'green'),
(3, 4.5, 'purple'), (4, 3.8, 'yellow'), (4, 4.5, 'blue')]
for x, y, color in circles:
Circle(ctx).of_center_radius((x, y), 0.7).fill(Color(color))
ctx.restore()
make_image("/tmp/geometry-clip.png", draw, 500, 500)
|
11492484
|
import cv2
import numpy as np
from pyaffx import Affine
from _griffig import BoxData, OrthographicImage
from ..utility.image import draw_line, get_inference_image
from ..utility.model_data import ModelArchitecture
class Heatmap:
def __init__(self, inference, a_space=None):
self.inference = inference
if a_space is not None:
self.inference.a_space = a_space
def calculate_heat(self, reward, size_cropped, size_result):
size_reward_center = (reward.shape[1] / 2, reward.shape[2] / 2)
scale = self.inference.size_area_cropped[0] / self.inference.size_result[0] * ((size_cropped[0] - 30) / reward.shape[1])
a_space_idx = range(len(self.inference.a_space))
heat_values = np.zeros(size_result[::-1], dtype=np.float)
for i in a_space_idx:
a = self.inference.a_space[i]
rot_mat = cv2.getRotationMatrix2D(size_reward_center, -a * 180.0 / np.pi, scale)
rot_mat[0][2] += size_result[0] / 2 - size_reward_center[0]
rot_mat[1][2] += size_result[1] / 2 - size_reward_center[1]
heat_values += cv2.warpAffine(reward[i], rot_mat, size_result, borderValue=0)
norm = (5 * heat_values.max() + len(a_space_idx)) / 6
return (heat_values / norm * 255.0).astype(np.uint8)
@staticmethod
def get_background(image, use_rgb):
if len(image.mat.shape) >= 3 and image.mat.shape[-1] >= 3:
if use_rgb:
back = cv2.cvtColor(image.mat[:, :, :3], cv2.COLOR_RGB2GRAY)
return cv2.cvtColor(back, cv2.COLOR_GRAY2RGB)
return cv2.cvtColor(image.mat[:, :, 3:], cv2.COLOR_GRAY2RGB)
return cv2.cvtColor(image.mat, cv2.COLOR_GRAY2RGB)
def render(
self,
image: OrthographicImage,
object_image: OrthographicImage = None,
goal_image: OrthographicImage = None,
box_data: BoxData = None,
alpha=0.5,
use_rgb=True, # Otherwise depth
save_path=None,
reward_index=None,
draw_lateral=False,
draw_shifts=False,
draw_indices=None,
alpha_human=0.0,
):
input_images = self.inference.get_input_images(image, box_data)
# if goal_image:
# input_images += self.inference.get_input_images(goal_image, box_data)
if self.inference.model_data.architecture == ModelArchitecture.ActorCritic:
estimated_reward, actor_result = self.inference.model(input_images)
elif self.inference.model_data.architecture == ModelArchitecture.PlanarSemantic:
input_object_images = [get_inference_image(object_image, Affine(a=a), (224, 224), (224, 224), (224, 224), return_mat=True) for a in self.inference.a_space]
input_object_images = np.array(input_object_images) / np.iinfo(object_image.mat.dtype).max
estimated_grasp_reward, estimated_object_reward = self.inference.model([input_images, [input_object_images]])
estimated_reward = estimated_object_reward * estimated_grasp_reward
else:
estimated_reward = self.inference.model(input_images)
actor_result = None
if reward_index is not None:
estimated_reward = estimated_reward[reward_index]
if estimated_reward.shape[-1] > 4: # self.inference.model_data.output[0] == 'reward+human':
estimated_reward = (1 - alpha_human) * estimated_reward[:, :, :, :4] + alpha_human * estimated_reward[:, :, :, 4:]
# reward_reduced = np.maximum(estimated_reward, 0)
reward_reduced = np.mean(estimated_reward, axis=3)
# reward_reduced = estimated_reward[:, :, :, 0]
# For heatmapping the actor
# reward_reduced = actor_result[:, :, :, 2]
# reward_reduced = (reward_reduced - np.min(reward_reduced)) / np.ptp(reward_reduced)
# reward_reduced += 0.5
size_cropped = input_images[0].shape[1::-1]
size_result = image.mat.shape[1::-1]
heat = self.calculate_heat(reward_reduced, size_cropped, size_result)
heat = cv2.applyColorMap(heat.astype(np.uint8), cv2.COLORMAP_JET)
background = self.get_background(image, use_rgb)
if background.dtype == np.uint16:
background = background.astype(np.float32) / 255
else:
background = background.astype(np.float32)
result = (1 - alpha) * background + alpha * heat
result = OrthographicImage(result.astype(np.float32), image.pixel_size, image.min_depth, image.max_depth)
if draw_indices is not None:
self.draw_indices(result, reward_reduced, draw_indices)
if draw_lateral:
for _ in range(10):
index = np.unravel_index(estimated_reward.argmax(), estimated_reward.shape)
action = actor_result[index[0], index[1], index[2]]
self.draw_lateral(result, estimated_reward.shape, index, action)
estimated_reward[np.unravel_index(estimated_reward.argmax(), estimated_reward.shape)] = 0
if draw_shifts:
for _ in range(10):
self.draw_arrow(result, reward_reduced, np.unravel_index(reward_reduced.argmax(), reward_reduced.shape))
reward_reduced[np.unravel_index(reward_reduced.argmax(), reward_reduced.shape)] = 0
if save_path:
cv2.imwrite(str(save_path), result.mat)
return result.mat
def draw_lateral(self, image: OrthographicImage, reward_shape, index, action):
pose = self.inference.pose_from_index(index, reward_shape, image)
arrow_color = (255*255, 255*255, 255*255)
draw_line(image, pose, Affine(0.0, 0.0), Affine(a=pose.a, b=action[1], c=action[2]) * Affine(0.0, 0.0, -0.14), color=arrow_color, thickness=1)
def draw_indices(self, image: OrthographicImage, reward_shape, indices):
point_color = (255, 255, 255)
for index in indices:
pose = self.inference.pose_from_index(index, reward_shape, image)
pose.x /= reward_shape[1] / 40
pose.y /= reward_shape[2] / 40
draw_line(image, pose, Affine(-0.001, 0), Affine(0.001, 0), color=point_color, thickness=1)
draw_line(image, pose, Affine(0, -0.001), Affine(0, 0.001), color=point_color, thickness=1)
def draw_arrow(self, image: OrthographicImage, reward_shape, index):
pose = self.inference.pose_from_index(index, reward_shape, image)
arrow_color = (255, 255, 255)
draw_line(image, pose, Affine(0, 0), Affine(0.036, 0), color=arrow_color, thickness=2)
draw_line(image, pose, Affine(0.036, 0.0), Affine(0.026, -0.008), color=arrow_color, thickness=2)
draw_line(image, pose, Affine(0.036, 0.0), Affine(0.026, 0.008), color=arrow_color, thickness=2)
|
11492525
|
from guided_filter.datasets.google_image import dataFile
# -*- coding: utf-8 -*-
## @package guided_filter.results.performance
#
# Simple performance test.
# @author tody
# @date 2015/08/26
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
from guided_filter.io_util.image import loadRGB
from guided_filter.cv.image import to32F
from guided_filter.util.timer import timing_func, Timer
from guided_filter.core.filters import GuidedFilter, FastGuidedFilter
from guided_filter.results.results import resultFile
## Bilateral filter class with the same interface with GuidedFilter class.
#
# Input I for the constructor will be discarded.
class BilateralFilter:
def __init__(self, I, radius=5, epsilon=0.2):
self._epsilon = epsilon
self._radius = radius
def filter(self, I):
return cv2.bilateralFilter(I, 0, self._epsilon, self._radius)
## Peformance test for the input image and the target filter.
def performanceTestFilter(C_noise, filter):
t = Timer()
filter.filter(C_noise)
t.stop()
return t.seconds()
## Generate filter variations for the target filter class, sigmas.
def generateFilterVariations(C_32F, filter_class, sigmas):
filters = []
for sigma in sigmas:
filters.append(filter_class(C_32F, radius=sigma))
return filters
## Performance test for the target sigmas.
def performanceTestSigmas(C_32F, sigmas, filter_types, ax):
h, w, cs = C_32F.shape
C_noise = np.float32(C_32F + 0.3 * np.random.rand(h, w, cs))
C_noise = np.clip(C_noise, 0.0, 1.0)
for type_name, filter_class_color in filter_types.items():
filter_class, color = filter_class_color
filters = generateFilterVariations(C_32F, filter_class, sigmas)
times = []
for filter in filters:
times.append(performanceTestFilter(C_noise, filter))
ax.plot(sigmas, times, label=type_name, color=color)
ax.set_xlabel('radius $r$')
ax.set_ylabel('time (secs)')
ax.legend(bbox_to_anchor=(0.88, 0.8), loc=2)
## Performance test for the image file.
def performanceTest(image_file):
C_8U = loadRGB(image_file)
C_32F = to32F(C_8U)
h, w = C_32F.shape[:2]
image_size_str = "Image size: %s x %s" %(w, h)
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(8, 8))
fig.subplots_adjust(left=0.1, right=0.7, top=0.86, hspace=0.4)
fig.suptitle("Peformance of guided filter\n%s" % image_size_str)
filter_types = {"Bilateral Filter": (BilateralFilter, "r"),
"Guided Filter": (GuidedFilter, "g"),
"Fast Guided Filter": (FastGuidedFilter, "b")}
sigmas = range(3, 31, 2)
axes[0].set_title('For small radius $r$')
performanceTestSigmas(C_32F, sigmas, filter_types, axes[0])
sigmas = range(10, 100, 5)
filter_types = {"Guided Filter": (GuidedFilter, "g"),
"Fast Guided Filter": (FastGuidedFilter, "b")}
axes[1].set_title('For large radius $r$')
performanceTestSigmas(C_32F, sigmas, filter_types, axes[1])
result_name = "performance"
result_file = resultFile(result_name)
plt.savefig(result_file)
## Performance tests for the data names, IDs.
def performanceTests(data_names, data_ids):
for data_name in data_names:
print "Performance tests: %s" % data_name
for data_id in data_ids:
print "Data ID: %s" % data_id
image_file = dataFile(data_name, data_id)
performanceTest(image_file)
if __name__ == '__main__':
data_names = ["flower"]
data_ids = [0]
performanceTests(data_names, data_ids)
|
11492528
|
import collections
import csv
import glob
import os
import pandas as pd
import cea
import cea.config
import cea.inputlocator
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
COLUMNS_SCHEDULES = ['DAY',
'HOUR',
'OCCUPANCY',
'APPLIANCES'
'LIGHTING',
'WATER',
'HEATING',
'COOLING',
'PROCESSES',
'SERVERS']
DAY = ['WEEKDAY'] * 24 + ['SATURDAY'] * 24 + ['SUNDAY'] * 24
HOUR = list(range(1, 25)) + list(range(1, 25)) + list(range(1, 25))
def read_cea_schedule(path_to_cea_schedule):
"""
reader for the files ``locator.get_building_weekly_schedules``
:param str path_to_cea_schedule: path to the cea schedule file to read.
(E.g. inputs/building-properties/schedules/B001.csv)
:return: schedule data, schedule complementary data
"""
with open(path_to_cea_schedule) as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i == 0:
metadata = row[1]
elif i == 1:
monthly_multiplier = [round(float(x), 2) for x in row[1:]]
else:
# skip all the other rows
break
schedule_data = pd.read_csv(path_to_cea_schedule, skiprows=2).T
schedule_data = dict(zip(schedule_data.index, schedule_data.values))
schedule_complementary_data = {'METADATA': metadata, 'MONTHLY_MULTIPLIER': monthly_multiplier}
return schedule_data, schedule_complementary_data
def save_cea_schedule(schedule_data, schedule_complementary_data, path_to_building_schedule):
METADATA = ['METADATA'] + [schedule_complementary_data['METADATA']]
MULTIPLIER = ['MONTHLY_MULTIPLIER'] + list(schedule_complementary_data['MONTHLY_MULTIPLIER'])
COLUMNS_SCHEDULES = schedule_data.keys()
RECORDS_SCHEDULES = map(list, zip(*schedule_data.values()))
with open(path_to_building_schedule, "w", newline="", encoding="utf-8") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow(METADATA)
csvwriter.writerow(MULTIPLIER)
csvwriter.writerow(COLUMNS_SCHEDULES)
for row in RECORDS_SCHEDULES:
csvwriter.writerow(row)
def get_all_schedule_names(schedules_folder):
"""Get all schedule names from path"""
schedule_files = glob.glob(os.path.join(schedules_folder, "*.csv"))
return [os.path.splitext(os.path.basename(schedule_file))[0] for schedule_file in schedule_files]
# TODO: Replace usages of `read_cea_schedule` to this function
def schedule_to_dataframe(schedule_path):
out = collections.OrderedDict()
with open(schedule_path) as f:
reader = csv.reader(f)
out['METADATA'] = pd.DataFrame({'metadata': [next(reader)[1]]})
out['MONTHLY_MULTIPLIER'] = pd.DataFrame({m + 1: [round(float(v), 2)] for m, v in enumerate(next(reader)[1:])},
columns=[m for m in range(1, 13)])
# Filter empty columns
columns = [col for col in next(reader) if col != '']
schedule_data = pd.read_csv(schedule_path, skiprows=2, usecols=columns).set_index(
['DAY', 'HOUR']).unstack().reindex(['WEEKDAY', 'SATURDAY', 'SUNDAY'])
for t, df in schedule_data.groupby(axis=1, level=0, sort=False):
df.columns = [i for i in range(1, 25)]
out[t] = df.reset_index()
return out
def schedule_to_file(schedule, schedule_path):
schedule_df = pd.DataFrame()
metadata = ['METADATA']
multiplier = ['MONTHLY_MULTIPLIER']
for key, data in schedule.items():
if key == 'METADATA':
metadata += [schedule['METADATA']['metadata'].iloc[0]]
elif key == 'MONTHLY_MULTIPLIER':
multiplier += [i for i in schedule['MONTHLY_MULTIPLIER'].iloc[0].values]
else:
schedule_column_data = data.set_index(['DAY']).reindex(['WEEKDAY', 'SATURDAY', 'SUNDAY']).stack()
schedule_column_data.index.names = ['DAY', 'HOUR']
schedule_df[key] = schedule_column_data
schedule_df = schedule_df.reset_index()
with open(schedule_path, "w", newline="", encoding="utf-8") as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(metadata)
csv_writer.writerow(multiplier)
csv_writer.writerow(schedule_df.columns)
for row in schedule_df.values:
csv_writer.writerow(row)
print('Schedule file written to {}'.format(schedule_path))
def main(config):
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
path_to_building_schedule = locator.get_database_standard_schedules_use('MULTI_RES')
# print(read_cea_schedule(path_to_building_schedule))
print(schedule_to_dataframe(path_to_building_schedule))
if __name__ == '__main__':
main(cea.config.Configuration())
|
11492533
|
import os
import pytest
from waterbutler.core import streams
DUMMY_FILE = os.path.join(os.path.dirname(__file__), 'fixtures/dummy.txt')
class TestFileStreamReader:
@pytest.mark.asyncio
async def test_file_stream_reader(self):
with open(DUMMY_FILE, 'r') as fp:
reader = streams.FileStreamReader(fp)
assert reader.size == 27
data = await reader.read()
assert data == 'abcdefghijklmnopqrstuvwxyz\n'
at_eof = reader.at_eof()
assert not at_eof
data = await reader.read()
assert data == b''
at_eof = reader.at_eof()
assert at_eof
reader.close()
at_eof = reader.at_eof()
assert at_eof
with pytest.raises(ValueError):
fp.read()
@pytest.mark.asyncio
async def test_file_stream_reader_after_seek(self):
with open(DUMMY_FILE, 'r') as fp:
fp.seek(3)
reader = streams.FileStreamReader(fp)
assert reader.size == 27 # still gives full size
assert fp.tell() == 3 # returns to original seek position
data = await reader.read()
assert data == 'abcdefghijklmnopqrstuvwxyz\n' # always reads full data
at_eof = reader.at_eof()
assert not at_eof
data = await reader.read()
assert data == b''
at_eof = reader.at_eof()
assert at_eof
@pytest.mark.asyncio
async def test_file_stream_reader_subset(self):
with open(DUMMY_FILE, 'r') as fp:
reader = streams.FileStreamReader(fp)
data = await reader.read(10)
assert data == 'abcdefghij'
at_eof = reader.at_eof()
assert not at_eof
data = await reader.read(2)
assert data == 'kl'
at_eof = reader.at_eof()
assert not at_eof
data = await reader.read()
assert data == 'mnopqrstuvwxyz\n'
at_eof = reader.at_eof()
assert not at_eof
data = await reader.read()
assert data == b''
at_eof = reader.at_eof()
assert at_eof
class TestPartialFileStreamReader:
@pytest.mark.asyncio
@pytest.mark.parametrize("byte_range,size,is_partial,content_range,expected", [
((0, 26), 27, False, 'bytes 0-26/27', 'abcdefghijklmnopqrstuvwxyz\n'),
((0, 5), 6, True, 'bytes 0-5/27', 'abcdef'),
((2, 10), 9, True, 'bytes 2-10/27', 'cdefghijk'),
((20, 26), 7, True, 'bytes 20-26/27', 'uvwxyz\n'),
((2, 2), 1, True, 'bytes 2-2/27', 'c'),
])
async def test_partial_file_stream_reader(self, byte_range, size, is_partial, content_range,
expected):
with open(DUMMY_FILE, 'r') as fp:
reader = streams.PartialFileStreamReader(fp, byte_range)
assert reader.size == size
assert reader.total_size == 27
assert reader.partial == is_partial
assert reader.content_range == content_range
data = await reader.read()
assert data == expected
at_eof = reader.at_eof()
assert not at_eof
data = await reader.read()
assert data == b''
at_eof = reader.at_eof()
assert at_eof
@pytest.mark.asyncio
@pytest.mark.parametrize("byte_range,size,is_partial,content_range,expected", [
((0, 26), 27, False, 'bytes 0-26/27', 'abcdefghijklmnopqrstuvwxyz\n'),
((0, 5), 6, True, 'bytes 0-5/27', 'abcdef'),
((2, 10), 9, True, 'bytes 2-10/27', 'cdefghijk'),
((20, 26), 7, True, 'bytes 20-26/27', 'uvwxyz\n'),
((2, 2), 1, True, 'bytes 2-2/27', 'c'),
])
async def test_partial_file_stream_reader_with_size(self, byte_range, size, is_partial,
content_range, expected):
"""Test that range is respected even when large size values are passed to ``.read()``."""
with open(DUMMY_FILE, 'r') as fp:
reader = streams.PartialFileStreamReader(fp, byte_range)
assert reader.size == size
assert reader.total_size == 27
assert reader.partial == is_partial
assert reader.content_range == content_range
data = await reader.read(500)
assert data == expected
at_eof = reader.at_eof()
assert not at_eof
data = await reader.read(500)
assert data == b''
at_eof = reader.at_eof()
assert at_eof
|
11492549
|
from colorclass import Color
from terminaltables import SingleTable
class ArticlePrinter(object):
"""Class which can be used to print Article stats on the command line"""
def __init__(self, article_obj):
self.article = article_obj
def print_article_stats(self):
"""This method is called to present overall article stats on a command line."""
table_data = [
[Color('{autocyan}Overall Stats{/autocyan}')],
['Reading time', str(self.article.reading_time) + ' mins'],
['Flesch Reading Ease', self.article.get_flesch_reading_score()],
['Dale Chall Readability Score', self.article.get_dale_chall_reading_score()],
['Paragraphs', self.article.total_paragraphs],
['Avg sentences per paragraph', self.article.avg_sentences_per_para],
['Total sentences in longest paragraph', self.article.len_of_longest_paragraph],
['Sentences', self.article.total_sentences],
['Avg words per sentence', self.article.avg_words_per_sentence],
['Longest sentence', "%s..." % str(self.article.longest_sentence)[0:30]],
['Words in longest sentence', self.article.len_of_longest_sentence],
['Words', self.article.total_words],
['"and" frequency"', self.article.get_and_frequency()],
['Compulsive Hedgers', len(self.article.get_compulsive_hedgers())],
['Intensifiers', len(self.article.get_intensifiers())],
['Vague words', len(self.article.get_vague_words())],
]
table_instance = SingleTable(table_data)
table_instance.inner_heading_row_border = True
table_instance.inner_row_border = True
table_instance.justify_columns = {0: 'left', 1: 'center'}
print(table_instance.table)
self.print_detail()
def print_paragraph_stats(self):
"""This method, along with print_article_stats(), can be called to present paragraph stats on a command line.
Ideally first call print_article_stats() and then this method.
It shows sentence, average words per sentence, longest sentence, and readability scores (Flesch reading ease and
Dale Chall readability scores) of paragraphs.
"""
sentence_tag = Color('{blue}sentences{/blue}')
word_tag = Color('{blue}words{/blue}')
avg_word_tag = Color('{blue}Avg words per sentence{/blue}')
long_tag = Color('{red}longest{/red}')
table_data = [
[Color('{autocyan}Paragraph Stats{/autocyan}')],
['Paragraph #', '']
]
for item, para in enumerate(self.article.paragraphs):
sentences = Color('{red}%s{/red}' % str(len(para))) if len(para) > 5 else str(len(para))
avg_words_per_sentence = Color(
'{red}%s{/red}' % str(para.avg_words_per_sentence)) if para.avg_words_per_sentence > 25 else str(
para.avg_words_per_sentence)
table_data.append([item + 1,
'{sentences} {sent_tag}. {words} {word_tag}. {avg_words} {avg_word_tag}. '
'"{longest_sent}..." is the {long_tag} sentence.'.format(
sentences=sentences, sent_tag=sentence_tag, words=para.total_words,
word_tag=word_tag, avg_words=avg_words_per_sentence, avg_word_tag=avg_word_tag,
longest_sent=str(para.longest_sentence)[0:10], long_tag=long_tag
)])
table_data.append(["", "Flesh Reading score={flesch_reading}, Dale Chall Readability= {dale_chall}".format(
flesch_reading=para.get_flesch_reading_score(), dale_chall=para.get_dale_chall_reading_score()
)])
table_instance = SingleTable(table_data)
table_instance.inner_heading_row_border = True
table_instance.inner_row_border = True
table_instance.justify_columns = {0: 'center', 1: 'left'}
print(table_instance.table)
def _print_detail_of(self, words_list, heading, display_count=True):
words = [str(word) for word in words_list]
if len(words) >= 1:
words_unique_list = list(set(words))
format_str = "{word} ({count})"
if display_count:
msg = '{red} **- %s: %s {/red}\r\n' % (heading, ', '.join(format_str.format(word=str(word),
count=words.count(word)) for word in words_unique_list))
else:
msg = '{red} **- %s: %s {/red}\r\n' % (heading, ', '.join(word for word in words_unique_list))
print(Color(msg))
def print_detail(self):
self._print_detail_of(self.article.get_compulsive_hedgers(), "Compulsive Hedgers", display_count=False)
self._print_detail_of(self.article.get_intensifiers(), "Intensifiers", display_count=False)
self._print_detail_of(self.article.get_vague_words(), "Vague words")
self._print_detail_of(self.article.ten_words_with_most_syllables(), "10 words with most syllables", display_count=False)
self._print_detail_of(self.article.get_n_most_repeated_words(20), "20 most repeated words", display_count=False)
|
11492587
|
def gcd(u, v):
while v > 0:
tmp = v
v = u % v
u = tmp
return u
t = int(input())
for _ in range(t):
a, b, x, y = map(int, input().split())
# gcd definitions says:
# gcd (a + m * b, b) = gcd(a, b)
# such as m being an integer
# hence:
# gcd (a + b, b)
# gcd (a - b, b)
# gcd (a, a + b)
# gcd (a, a - b)
# must all be equal to gcd(a, b)
# thus it means it's possible to
# move from (a,b) point to (x, y) point
# iff gcd(a, b) == gcd(x, y)
if gcd(a, b) == gcd(x, y):
print("YES")
else:
print("NO")
|
11492593
|
import torch
import numpy as np
from torch import nn, optim
from .data_helper import get_data_loader
history = {}
def root_mean_squared_error(y_true, y_pred):
""" RMSE implementation.
:param y_true: The correct labels
:param y_pred: The predicted labels
:return: RMSE score
"""
return np.sqrt(((y_true - y_pred) ** 2).mean())
def data_result_size(data):
""" Count all samples in all objects for all workers in a grid search response.
:param data: Grid search response data
:return: Total number of data samples
"""
total = 0
for i in range(len(data)):
for j in range(len(data[i])):
total += data[i][j].shape[0]
return total
def train(model, data, labels, criterion):
""" Train the model with the given data in a federated way.
:param model: Model to train
:param data: Data to use for training
:param labels: Labels to use for loss calculation
:param criterion: Criterion to use for loss calculations
:return: Loss of this epoch
"""
model.train()
epoch_total = data_result_size(data)
running_loss = 0
for i in range(len(data)):
# initialize an dedicated optimizer for every worker to prevent errors with adams momentum
optimizer = optim.Adam(model.parameters())
for j in range(len(data[i])):
# check the location of the data and send the model there
worker = data[i][j].location
model.send(worker)
# train one step
optimizer.zero_grad()
output = model(data[i][j])
loss = criterion(output, labels[i][j])
loss.backward()
optimizer.step()
# get the updated model and the loss back from the worker
model.get()
loss = loss.get()
running_loss += loss.item() * data[i][j].shape[0]
epoch_loss = running_loss / epoch_total
# TODO: add EarlyStopping
return epoch_loss
def test(model, test_loader, criterion):
""" Test the model.
:param model: The model to test
:param test_loader: DataLoader with the test data
:param criterion: Criterion to use for loss calculation
:return: Loss of the test
"""
model.eval()
test_loss = 0
with torch.no_grad():
for data, target in test_loader:
output = model(data)
test_loss += criterion(output, target).item() * data.size(0)
test_loss /= len(test_loader.dataset)
return test_loss
def start_federated_training(model, train_data, train_labels, val_data, val_labels, epochs):
""" Start federated training.
:param model: Model to train
:param train_data: Training data
:param train_labels: Training labels
:param val_data: Validation data
:param val_labels: Validation labels
:param epochs: Number of epochs
:return: The trained model
"""
global history
torch.manual_seed(1)
np.random.seed(51)
criterion = nn.L1Loss() # mae
# manually track the losses
history['epoch'] = []
history['loss'] = []
history['val_loss'] = []
val_loader = get_data_loader(val_data, val_labels)
for epoch in range(1, epochs + 1):
train_loss = train(model, train_data, train_labels, criterion)
val_loss = test(model, val_loader, criterion)
print('Epoch: {}/{}\tloss: {:.4f}\tval_loss: {:.4f}'.format(epoch, epochs, train_loss, val_loss))
history['epoch'].append(epoch)
history['loss'].append(train_loss)
history['val_loss'].append(val_loss)
return model
def get_model_error(model, test_data, test_labels):
""" Calculate the RMSE for the given model and test data.
:param model: Model to use
:param test_data: test data
:param test_labels: test labels
:return: RMSE score
"""
model.eval()
with torch.no_grad():
output = model(test_data)
rmse = root_mean_squared_error(test_labels, output)
return rmse
|
11492647
|
import os, os.path, errno, sys, traceback, subprocess
import re, html.entities
import json
import logging
import yaml
from bs4 import BeautifulSoup
from datetime import datetime
import requests
import urllib.parse
import io
import gzip
import docx
import zipfile
from urllib.parse import urljoin
import inspect
import pdfrw
from . import admin
logging.getLogger("pdfrw").setLevel(logging.CRITICAL)
# scraper should be instantiated at class-load time, so that it can rate limit appropriately
import scrapelib
scraper = scrapelib.Scraper(requests_per_minute=120, retry_attempts=3)
scraper.user_agent = "unitedstates/inspectors-general (https://github.com/unitedstates/inspectors-general)"
scraper.timeout = 60
class Soft404HttpAdapter(requests.adapters.HTTPAdapter):
"""Transport adapter that checks all responses against a blacklist of "file
not found" pages that are served with 200 status codes."""
SOFT_404_BODY_SIGNATURES = {
"si.edu": b"<title>Page Not FoundSmithsonian</title>",
}
def build_response(self, req, resp):
domain = urllib.parse.urlparse(req.url)[1].split(':')[0]
base_domain = ".".join(domain.split(".")[-2:])
if base_domain in self.SOFT_404_BODY_SIGNATURES:
if resp.getheader("Content-Type") in ["text/html; charset=utf-8",
"text/html"]:
data = resp.data
headers = resp.headers
if resp.getheader("Content-Encoding") == "gzip":
decompressed_data = gzip.decompress(data)
else:
decompressed_data = data
if resp.getheader("Transfer-Encoding") == "chunked":
headers.pop("Transfer-Encoding")
body = io.BytesIO(data)
resp = requests.packages.urllib3.response.HTTPResponse(
body=body,
headers=headers,
status=resp.status,
version=resp.version,
reason=resp.reason,
strict=resp.strict,
preload_content=False,
)
if decompressed_data.find(self.SOFT_404_BODY_SIGNATURES[base_domain], 0, 10240) != -1:
result = super(Soft404HttpAdapter, self).build_response(req, resp)
result.status_code = 404 # tells scrapelib to not retry
return result
result = super(Soft404HttpAdapter, self).build_response(req, resp)
return result
scraper.mount("http://www.si.edu/", Soft404HttpAdapter())
scraper.mount("http://si.edu/", Soft404HttpAdapter())
scraper.mount("https://www.si.edu/", Soft404HttpAdapter())
scraper.mount("https://si.edu/", Soft404HttpAdapter())
WHITELIST_INSECURE_DOMAINS = (
"https://www.ncua.gov/", # incomplete chain as of 12/10/2016
)
# Special case handling for governmentattic.org, va.gov, etc.:
# These pages are served without an encoding in the HTTP headers,
# and with the encoding specified in a <meta> tag inside the document.
# See https://gist.github.com/divergentdave/0985ae1f2fd2a7235ccef0d5bbb6aaa3
# for data collection script.
META_CHARSETS = {
"http://www.usda.gov/oig": "iso-8859-1",
"http://www.cftc.gov/": "utf-8",
"http://cftc.gov/": "utf-8",
"https://www.cftc.gov/": "utf-8",
"https://cftc.gov/": "utf-8",
"http://www.dodig.mil/": "utf-8",
"https://www.dodig.mil/": "utf-8",
"https://oig.justice.gov/": "utf-8",
"https://www.fca.gov/": "utf-8",
"https://fca.gov/": "utf-8",
"http://www.fec.gov/": "iso-8859-1",
"https://www.fec.gov/": "iso-8859-1",
"https://oig.federalreserve.gov/": "iso-8859-1",
"http://www.gao.gov/": "utf-8",
"http://gao.gov/": "utf-8",
"https://www.gao.gov/": "utf-8",
"https://gao.gov/": "utf-8",
"http://www.governmentattic.org/": "utf-8",
"http://governmentattic.org/": "utf-8",
"https://www.governmentattic.org/": "utf-8",
"https://governmentattic.org/": "utf-8",
"https://oig.nasa.gov/": "utf-8",
"http://www.nrc.gov/": "utf-8",
"https://www.nrc.gov/": "utf-8",
"http://oig.pbgc.gov/": "utf-8",
"https://oig.pbgc.gov/": "utf-8",
"https://www.treasury.gov/tigta/": "iso-8859-1",
"http://oig.tva.gov/": "utf-8",
"https://oig.tva.gov/": "utf-8",
"https://www.va.gov/oig/": "utf-8",
}
# will pass correct options on to individual scrapers whether
# run through ./igs or individually, because argv[1:] is the same
def run(run_method, additional=None):
cli_options = options()
configure_logging(cli_options)
if additional:
cli_options.update(additional)
try:
return run_method(cli_options)
except Exception as exception:
admin.log_exception(exception)
# read options from the command line
# e.g. ./inspectors/usps.py --since=2012-03-04 --debug
# => {"since": "2012-03-04", "debug": True}
AVAILABLE_OPTIONS = (
"archive",
"bulk",
"component",
"debug",
"dry_run",
"end",
"ig",
"limit",
"log",
"only",
"pages",
"quick",
"report_id",
"safe",
"since",
"skip_downloaded",
"start",
"topics",
"types",
"year",
)
def options():
options = {}
for arg in sys.argv[1:]:
if arg.startswith("--"):
if "=" in arg:
key, value = arg.split('=')
else:
key, value = arg, "true"
key = key.split("--")[1]
key = key.lower()
value = value.lower()
if key not in AVAILABLE_OPTIONS:
print("Unknown option: \"%s\"\n"
"The following options are recognized\n"
" %s"% (key, ", ".join(AVAILABLE_OPTIONS)))
sys.exit(1)
if value == 'true': value = True
elif value == 'false': value = False
options[key] = value
return options
def configure_logging(options=None):
options = {} if not options else options
if options.get('debug', False):
log_level = "debug"
else:
log_level = options.get("log", "warn")
if log_level not in ["debug", "info", "warn", "error"]:
print("Invalid log level (specify: debug, info, warn, error).")
sys.exit(1)
logging.basicConfig(format='%(message)s', level=log_level.upper())
# download the data at url
def download(url, destination=None, options=None, scraper_slug=None):
options = {} if not options else options
cache = options.get('cache', True) # default to caching
binary = options.get('binary', False) # default to assuming text
# check cache first
if destination and cache and os.path.exists(destination):
logging.info("## Cached: (%s, %s)" % (destination, url))
# if a binary file is cached, we're done
if binary:
return True
# otherwise, decode it for return
with open(destination, 'r', encoding='utf-8') as f:
body = f.read()
# otherwise, download from the web
else:
logging.warn(url)
logging.info("## Downloading: %s" % url)
if binary:
if destination:
logging.info("## \tto: %s" % destination)
else:
raise Exception("A destination path is required for downloading a binary file")
try:
mkdir_p(os.path.dirname(destination))
verify_options = domain_verify_options(url)
scraper.urlretrieve(url, destination, verify=verify_options)
except connection_errors() as e:
admin.log_http_error(e, url, scraper_slug)
return None
else: # text
try:
if destination: logging.info("## \tto: %s" % destination)
# Special handler for downloading reports whose server has
# misconfigured their HTTPS, and for which no alternative
# exists.
# This happens very rarely, and scrapelib has a bug with
# verification options, so this disables the rate limiting
# provided by scrapelib.
verify_options = domain_verify_options(url)
response = scraper.get(url, verify=verify_options)
except connection_errors() as e:
admin.log_http_error(e, url, scraper_slug)
return None
for prefix, charset in META_CHARSETS.items():
if url.startswith(prefix):
response.encoding = charset
break
body = response.text
if not isinstance(body, str): raise ValueError("Content not decoded.")
# don't allow 0-byte files
if (not body) or (not body.strip()):
return None
# cache content to disk
if destination:
write(body, destination, binary=binary)
# don't return binary content
if binary:
return True
else:
# whether from disk or web, unescape HTML entities
return unescape(body)
def beautifulsoup_from_url(url):
caller_filename = inspect.stack()[1][1]
caller_scraper = os.path.splitext(os.path.basename(caller_filename))[0]
body = download(url, scraper_slug=caller_scraper)
if body is None: return None
doc = BeautifulSoup(body, "lxml")
# Some of the pages will return meta refreshes
if doc.find("meta") and doc.find("meta").attrs.get('http-equiv') == 'REFRESH':
redirect_url = urljoin(url, doc.find("meta").attrs['content'].split("url=")[1])
return beautifulsoup_from_url(redirect_url)
else:
return doc
def post(url, data=None, headers=None, **kwargs):
response = None
try:
verify_options = domain_verify_options(url)
response = scraper.post(url, data=data, headers=headers, verify=verify_options)
except connection_errors() as e:
admin.log_http_error(e, url)
return None
return response
def resolve_redirect(url):
res = scraper.request(method='HEAD', url=url, allow_redirects=False)
if "Location" in res.headers:
return res.headers["Location"]
else:
return url
def connection_errors():
return (scrapelib.HTTPError, requests.exceptions.ConnectionError, requests.packages.urllib3.exceptions.MaxRetryError)
# uses BeautifulSoup to do a naive extraction of text from HTML,
# then writes it and returns the /data-relative path.
def text_from_html(real_html_path, real_text_path):
html = open(real_html_path, encoding='utf-8').read()
doc = BeautifulSoup(html, "lxml")
for node in doc.findAll(['script', 'style']):
node.extract()
text = doc.text
lines = text.splitlines()
for i in range(len(lines)):
lines[i] = lines[i].strip()
lines = filter(None, lines)
text = "\n".join(lines)
write(text, real_text_path, binary=False)
def domain_verify_options(url):
for domain in WHITELIST_INSECURE_DOMAINS:
if url.startswith(domain):
logging.warn("SKIPPING HTTPS VERIFICATION.")
return False
return True
_tool_present_cache = {}
def check_tool_present(*args):
if args in _tool_present_cache:
return _tool_present_cache[args]
try:
subprocess.Popen(args,
shell=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT).communicate()
result = True
except FileNotFoundError:
result = False
_tool_present_cache[args] = result
return result
# read PDF's directory to determine if we need to decrypt it
def check_pdf_decryption(pdf_path):
try:
doc = pdfrw.PdfReader(pdf_path)
return "/Encrypt" in doc
except:
return False
# uses qpdf to decrypt a PDF
def decrypt_pdf(source_path, destination_path):
if not check_tool_present("qpdf", "--version"):
logging.warn("Install qpdf to decrypt PDFs! "
"The qpdf executable must be in a directory that is in "
"your PATH environment variable.")
return False
try:
subprocess.check_call(["qpdf",
"--decrypt",
source_path,
destination_path], shell=False)
return True
except subprocess.CalledProcessError as exc:
logging.warn("Error decrypting %s:\n\n%s" %
(source_path, format_exception(exc)))
return False
if not os.path.exists(destination_path):
logging.warn("PDF not decrypted to %s" % destination_path)
return False
# uses pdftotext to get text out of PDFs,
# then writes it and returns the /data-relative path.
def text_from_pdf(real_pdf_path, real_text_path):
if not check_tool_present("pdftotext", "-v"):
logging.warn("Install pdftotext to extract text! "
"The pdftotext executable must be in a directory that is in "
"your PATH environment variable.")
return
try:
subprocess.check_call(["pdftotext",
"-layout",
"-nopgbrk",
real_pdf_path,
real_text_path], shell=False)
except subprocess.CalledProcessError as exc:
logging.warn("Error extracting text to %s:\n\n%s" %
(real_text_path, format_exception(exc)))
return
if not os.path.exists(real_text_path):
logging.warn("Text not extracted to %s" % real_text_path)
def text_from_doc(real_doc_path, real_text_path):
if not check_tool_present("abiword", "-?"):
logging.warn("Install AbiWord to extract text! "
"The abiword executable must be in a directory that is in "
"your PATH environment variable.")
return
try:
subprocess.check_call(["abiword",
real_doc_path,
"--to",
"txt"], shell=False)
except subprocess.CalledProcessError as exc:
logging.warn("Error extracting text to %s:\n\n%s" %
(real_text_path, format_exception(exc)))
return
if not os.path.exists(real_text_path):
logging.warn("Text not extracted to %s" % real_text_path)
def text_from_docx(real_docx_path, real_text_path):
def text_from_paragraphs(paragraphs):
return "\n\n".join([paragraph.text for paragraph in paragraphs])
def text_from_tables(tables):
return "\n\n".join([text_from_table(table) for table in tables])
def text_from_table(table):
return "\n\n".join([text_from_row(row) for row in table.rows])
def text_from_row(row):
return "\n\n".join([text_from_doc_or_cell(cell) for cell in row.cells])
def text_from_doc_or_cell(cell):
part1 = text_from_paragraphs(cell.paragraphs)
part2 = text_from_tables(cell.tables)
if part1 and part2:
return "%s\n\n%s" % (part1, part2)
else:
return part1 + part2
try:
document = docx.Document(real_docx_path)
text = text_from_doc_or_cell(document)
write(text, real_text_path, binary=False)
except zipfile.BadZipFile as exc:
logging.warn("Error extracting text to %s:\n\n%s" %
(real_text_path, format_exception(exc)))
return None
PDF_PAGE_RE = re.compile("Pages: +([0-9]+)\r?\n")
PDF_CREATION_DATE_RE = re.compile("CreationDate: +([^\r\n]*)\r?\n")
PDF_MOD_DATE_RE = re.compile("ModDate: +([^\r\n]*)\r?\n")
PDF_TITLE_RE = re.compile("Title: +([^\r\n]*)\r?\n")
PDF_KEYWORDS_RE = re.compile("Keywords: +([^\r\n]*)\r?\n")
PDF_AUTHOR_RE = re.compile("Author: +([^\r\n]*)\r?\n")
def parse_pdf_datetime(raw):
if raw.strip() == "":
return None
my_datetime = None
datetime_formats = [
'%m/%d/%y %H:%M:%S',
'%a %b %d %H:%M:%S %Y',
'%A, %B %d, %Y %I:%M:%S %p',
'%a %b %d %H:%M:%S %Y %Z'
]
for datetime_format in datetime_formats:
try:
my_datetime = datetime.strptime(raw, datetime_format)
break
except ValueError:
pass
if my_datetime:
return datetime.strftime(my_datetime, '%Y-%m-%d')
else:
logging.warn('Could not parse PDF date: %s' % raw)
return None
def metadata_from_pdf(pdf_path):
if not check_tool_present("pdfinfo", "-v"):
logging.warn("Install pdfinfo to extract metadata! "
"The pdfinfo executable must be in a directory that is in "
"your PATH environment variable.")
return None
real_pdf_path = os.path.expandvars(os.path.join(data_dir(), pdf_path))
real_pdf_path = os.path.abspath(real_pdf_path)
try:
output = subprocess.check_output(["pdfinfo", real_pdf_path], shell=False)
output = output.decode('utf-8', errors='replace')
except subprocess.CalledProcessError as exc:
logging.warn("Error extracting metadata for %s:\n\n%s" %
(pdf_path, format_exception(exc)))
return None
metadata = {}
page_match = PDF_PAGE_RE.search(output)
if page_match:
metadata['page_count'] = int(page_match.group(1))
creation_date_match = PDF_CREATION_DATE_RE.search(output)
if creation_date_match:
metadata['creation_date'] = parse_pdf_datetime(creation_date_match.group(1))
mod_date_match = PDF_MOD_DATE_RE.search(output)
if mod_date_match:
metadata['modification_date'] = parse_pdf_datetime(mod_date_match.group(1))
title_match = PDF_TITLE_RE.search(output)
if title_match:
metadata['title'] = title_match.group(1)
keywords_match = PDF_KEYWORDS_RE.search(output)
if keywords_match:
metadata['keywords'] = keywords_match.group(1)
author_match = PDF_AUTHOR_RE.search(output)
if author_match:
metadata['author'] = author_match.group(1)
if metadata:
return metadata
return None
def check_report_url(report_url):
try:
verify_options = domain_verify_options(report_url)
scraper.request(method='HEAD', url=report_url, verify=verify_options)
except connection_errors() as e:
admin.log_http_error(e, report_url)
DOC_PAGE_RE = re.compile("Number of Pages: ([0-9]*),")
DOC_CREATION_DATE_RE = re.compile("Create Time/Date: ([A-Za-z 0-9:]*),")
DOC_MOD_DATE_RE = re.compile("Last Saved Time/Date: ([A-Za-z 0-9:]*),")
DOC_TITLE_RE = re.compile("Title: ([^,]*),")
DOC_AUTHOR_RE = re.compile("Author: ([^,]*),")
def parse_doc_datetime(raw):
if raw.strip() == "":
return None
my_datetime = None
try:
my_datetime = datetime.strptime(raw, '%a %b %d %H:%M:%S %Y')
except ValueError:
pass
if my_datetime:
return datetime.strftime(my_datetime, '%Y-%m-%d')
else:
logging.warn('Could not parse DOC date: %s' % raw)
return None
def metadata_from_doc(doc_path):
if not check_tool_present("file", "-v"):
logging.warn("Install file to extract metadata! "
"The file executable must be in a directory that is in your "
"PATH environment variable.")
return None
real_doc_path = os.path.expandvars(os.path.join(data_dir(), doc_path))
real_doc_path = os.path.abspath(real_doc_path)
try:
output = subprocess.check_output(["file", real_doc_path], shell=False)
output = output.decode('utf-8', errors='replace')
except subprocess.CalledProcessError as exc:
logging.warn("Error extracting metadata for %s:\n\n%s" %
(doc_path, format_exception(exc)))
return None
metadata = {}
page_match = DOC_PAGE_RE.search(output)
if page_match:
metadata['page_count'] = int(page_match.group(1))
creation_date_match = DOC_CREATION_DATE_RE.search(output)
if creation_date_match:
metadata['creation_date'] = parse_doc_datetime(creation_date_match.group(1))
mod_date_match = DOC_MOD_DATE_RE.search(output)
if mod_date_match:
metadata['mod_date'] = parse_doc_datetime(mod_date_match.group(1))
title_match = DOC_TITLE_RE.search(output)
if title_match:
metadata['title'] = title_match.group(1)
author_match = DOC_AUTHOR_RE.search(output)
if author_match:
metadata['author'] = author_match.group(1)
if metadata:
return metadata
return None
def metadata_from_docx(docx_path):
try:
real_docx_path = os.path.expandvars(os.path.join(data_dir(), docx_path))
real_docx_path = os.path.abspath(real_docx_path)
document = docx.Document(real_docx_path)
core_props = document.core_properties
metadata = {}
if core_props.author:
metadata['author'] = core_props.author
if core_props.title:
metadata['title'] = core_props.title
if core_props.created:
metadata['creation_date'] = datetime.strftime(core_props.created, '%Y-%m-%d')
if core_props.modified:
metadata['mod_date'] = datetime.strftime(core_props.created, '%Y-%m-%d')
if core_props.keywords:
metadata['keywords'] = core_props.keywords
if metadata:
return metadata
return None
except zipfile.BadZipFile as exc:
logging.warn("Error extracting metadata for %s:\n\n%s" %
(docx_path, format_exception(exc)))
return None
def format_exception(exception):
exc_type, exc_value, exc_traceback = sys.exc_info()
return "\n".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
# assumes working dir is the root dir
def data_dir():
if admin.config and admin.config.get('data_directory'):
return admin.config.get('data_directory')
return "data"
def write(content, destination, binary=False):
mkdir_p(os.path.dirname(destination))
if binary:
f = open(destination, 'bw')
else:
f = open(destination, 'w', encoding='utf-8')
f.write(content)
f.close()
def json_for(object):
return json.dumps(object, sort_keys=True, indent=2, default=format_datetime)
def format_datetime(obj):
if isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, str):
return obj
else:
return None
# mkdir -p in python, from:
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
# taken from http://effbot.org/zone/re-sub.htm#unescape-html
def unescape(text):
def remove_unicode_control(str):
remove_re = re.compile('[\x00-\x08\x0B-\x0C\x0E-\x1F\x7F]')
return remove_re.sub('', str)
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return chr(int(text[3:-1], 16))
else:
return chr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = chr(html.entities.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
text = re.sub("&#?\w+;", fixup, text)
text = remove_unicode_control(text)
return text
# 'safe' scrapers listed in safe.yml
def safe_igs():
return yaml.load(open("safe.yml"))
|
11492648
|
from xwing.network.transport.socket.backend.rfc1078 import send, recv
class Connection:
def __init__(self, loop, sock):
self.loop = loop
self.sock = sock
async def recv(self):
'''Try to recv data. If not data is recv NoData exception will
raise.
:param timeout: Timeout in seconds. `None` meaning forever.
'''
return await recv(self.loop, self.sock)
async def recv_str(self, encoding='utf-8'):
data = await self.recv()
if encoding:
data = data.decode(encoding)
return data
async def send(self, data):
'''Send data to connected client.
:param data: Data to send.
'''
return await send(self.loop, self.sock, data)
async def send_str(self, data, encoding='utf-8'):
if encoding:
data = bytes(data, encoding)
return await self.send(data)
def close(self):
self.sock.close()
|
11492659
|
import os
import uuid
import boto3
from six.moves import configparser
from botocore.exceptions import ClientError
from .aws_util_exceptions import ProfileParsingError
from .aws_util_exceptions import RoleNotFoundError
from .aws_util_exceptions import AssumeRoleError
def get_aws_account_id(profile=None):
if profile:
session = boto3.Session(profile_name=profile)
client = session.client('sts')
else:
client = boto3.client('sts')
account_id = client.get_caller_identity()['Account']
return account_id
def get_credential_method_description(session):
"""Provides a helpful message describing the current IAM execution context."""
profile = ''
try:
profile = session.profile_name
except:
pass
try:
credentials = session.get_credentials()
return "{} ({}{})".format(
credentials.method,
"profile {} -> ".format(profile) if profile != 'default' else '',
credentials.access_key
)
except:
return 'error describing session credentials'
def get_boto3_session(aws_creds):
if aws_creds:
session_token = None
if 'AWS_SESSION_TOKEN' in aws_creds:
session_token = aws_creds['AWS_SESSION_TOKEN']
return boto3.Session(
aws_access_key_id=aws_creds['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=aws_creds['AWS_SECRET_ACCESS_KEY'],
aws_session_token=session_token,
)
else:
return boto3.Session()
def get_aws_profile_credentials(profile_name, verbose=False):
aws_creds = {}
# the source_profile may be overridden if a source_profile is indicated in ~/.aws/config
source_profile = profile_name
config = configparser.ConfigParser()
config_file_path = os.path.join(os.path.expanduser("~"),'.aws/config')
config.read([config_file_path])
try:
section = 'profile {}'.format(profile_name)
if not config.has_section(section):
msg = "Profile {} not found in {}".format(profile_name, config_file_path)
raise ProfileParsingError(msg)
if not config.has_option(section, 'role_arn'):
if verbose:
print("Profile {} in ~/.aws/config does not indicate a role_arn".format(profile_name))
else:
aws_creds['role_arn'] = config.get(section, 'role_arn')
print("Profile {} indicates role to assume: {}".format(profile_name, aws_creds['role_arn']))
if config.has_option(section, 'source_profile'):
source_profile = config.get(section, 'source_profile')
print("Found profile {} in ~/.aws/config, indicated source_profile {}".format(
profile_name, source_profile
))
else:
msg = "Profile {} in ~/.aws/config does not indicate a source_profile needed to assume role {}".format(
profile_name, aws_creds['role_arn']
)
raise ProfileParsingError(msg)
except configparser.ParsingError:
print('Error parsing AWS config file')
raise
except (configparser.NoSectionError, configparser.NoOptionError):
print('Error parsing sections or options for AWS profile {} in {}'.format(
profile_name,
config_file_path))
raise
credentials = configparser.ConfigParser()
credentials_file_path = os.path.join(os.path.expanduser("~"),'.aws/credentials')
credentials.read([credentials_file_path])
try:
aws_creds['AWS_ACCESS_KEY_ID'] = credentials.get(source_profile, 'aws_access_key_id')
aws_creds['AWS_SECRET_ACCESS_KEY'] = credentials.get(source_profile, 'aws_secret_access_key')
try:
aws_creds['AWS_SESSION_TOKEN'] = credentials.get(source_profile, 'aws_session_token')
except:
pass
if verbose:
print("Found source profile {} in ~/.aws/credentials, access key: {}".format(
source_profile, aws_creds['AWS_ACCESS_KEY_ID']
))
except configparser.ParsingError:
print('Error parsing AWS credentials file')
raise
except (configparser.NoSectionError, configparser.NoOptionError):
print('Unable to find AWS profile named {} in {}'.format(
profile_name,
credentials_file_path))
raise
return aws_creds
def get_role_arn_from_name(aws_creds, role_name, verbose=False):
try:
session = get_boto3_session(aws_creds)
iam_client = session.client('iam')
role_arn = iam_client.get_role(RoleName=role_name)['Role']['Arn']
return role_arn
except ClientError as e:
if verbose:
print(e)
method = get_credential_method_description(session)
if e.response['Error']['Code'] == 'NoSuchEntity':
raise RoleNotFoundError(method, e)
else:
raise AssumeRoleError(method, "Error reading role arn for role name {}: {}".format(role_name, e))
except Exception as e:
if verbose:
print(e)
method = get_credential_method_description(session)
raise AssumeRoleError(method, "Error reading role arn for role name {}: {}".format(role_name, e))
def generate_aws_temp_creds(role_arn, aws_creds=None, verbose=False):
session = get_boto3_session(aws_creds)
sts_client = session.client('sts')
aws_creds = {}
try:
random_session = uuid.uuid4().hex
assumed_role_object = sts_client.assume_role(
RoleArn=role_arn,
RoleSessionName="iamstarter-session-{}".format(random_session),
DurationSeconds=3600 # 1 hour max
)
aws_creds['AWS_ACCESS_KEY_ID'] = assumed_role_object["Credentials"]["AccessKeyId"]
aws_creds['AWS_SECRET_ACCESS_KEY'] = assumed_role_object["Credentials"]["SecretAccessKey"]
aws_creds['AWS_SESSION_TOKEN'] = assumed_role_object["Credentials"]["SessionToken"]
except Exception as e:
if verbose:
print(e)
method = get_credential_method_description(session)
raise AssumeRoleError(method, "Error assuming role {}: {}".format(role_arn, e))
return aws_creds
|
11492668
|
from socket import AF_INET, SOCK_DGRAM, socket
# import requests
import sys
host = "172.16.58.3"
seq_num = sys.argv[1]
method = sys.argv[2]
buf = 2048
# url = "http://localhost:8002/getUDPPort"
# response = requests.get(url)
# data = response.content
# port = int(data.decode('utf-8'))
port = 20000
addr = (host, port)
udp_string = seq_num + "," + method
udp_socket = socket(AF_INET, SOCK_DGRAM)
udp_socket.sendto(udp_string.encode(), addr)
print("Sending %s ..." % udp_string)
udp_socket.close()
|
11492669
|
import sys
sys.path.insert(0, '..')
import unittest
import json
import numpy as np
import torch
from model import DurIAN
from base import suite, BaseModelForwardPassTest
seed = 0
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
class DurIANForwardPassTest(BaseModelForwardPassTest):
def __init__(self, *args, **kwargs):
super(DurIANForwardPassTest, self).__init__(*args, **kwargs)
self.CLASS_TYPE = DurIAN
with open('../configs/default.json') as f:
self.config = json.load(f)
self.config['n_symbols'] = 100
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite([DurIANForwardPassTest]))
|
11492695
|
import copy
import random
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
class Bit_Flipping_Environment(gym.Env):
environment_name = "Bit Flipping Game"
def __init__(self, environment_dimension=20, deterministic=False):
self.action_space = spaces.Discrete(environment_dimension)
self.observation_space = spaces.Dict(dict(
desired_goal=spaces.Box(0, 1, shape=(environment_dimension,), dtype='float32'),
achieved_goal=spaces.Box(0, 1, shape=(environment_dimension,), dtype='float32'),
observation=spaces.Box(0, 1, shape=(environment_dimension,), dtype='float32'),
))
self.seed()
self.reward_threshold = 0.0
self.trials = 50
self.max_episode_steps = environment_dimension
self.id = "Bit Flipping"
self.environment_dimension = environment_dimension
self.reward_for_achieving_goal = self.environment_dimension
self.step_reward_for_not_achieving_goal = -1
self.deterministic = deterministic
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
if not self.deterministic:
self.desired_goal = self.randomly_pick_state_or_goal()
self.state = self.randomly_pick_state_or_goal()
else:
self.desired_goal = [0 for _ in range(self.environment_dimension)]
self.state = [1 for _ in range(self.environment_dimension)]
self.state.extend(self.desired_goal)
self.achieved_goal = self.state[:self.environment_dimension]
self.step_count = 0
return {"observation": np.array(self.state[:self.environment_dimension]), "desired_goal": np.array(self.desired_goal),
"achieved_goal": np.array(self.achieved_goal)}
def randomly_pick_state_or_goal(self):
return [random.randint(0, 1) for _ in range(self.environment_dimension)]
def step(self, action):
"""Conducts the discrete action chosen and updated next_state, reward and done"""
if type(action) is np.ndarray:
action = action[0]
assert action <= self.environment_dimension + 1, "You picked an invalid action"
self.step_count += 1
if action != self.environment_dimension + 1: #otherwise no bit is flipped
self.next_state = copy.copy(self.state)
self.next_state[action] = (self.next_state[action] + 1) % 2
if self.goal_achieved(self.next_state):
self.reward = self.reward_for_achieving_goal
self.done = True
else:
self.reward = self.step_reward_for_not_achieving_goal
if self.step_count >= self.environment_dimension:
self.done = True
else:
self.done = False
self.achieved_goal = self.next_state[:self.environment_dimension]
self.state = self.next_state
return {"observation": np.array(self.next_state[:self.environment_dimension]),
"desired_goal": np.array(self.desired_goal), "achieved_goal": np.array(self.achieved_goal)}, self.reward, self.done, {}
def goal_achieved(self, next_state):
return next_state[:self.environment_dimension] == next_state[-self.environment_dimension:]
def compute_reward(self, achieved_goal, desired_goal, info):
"""Computes the reward we would have got with this achieved goal and desired goal. Must be of this exact
interface to fit with the open AI gym specifications"""
if (achieved_goal == desired_goal).all():
reward = self.reward_for_achieving_goal
else:
reward = self.step_reward_for_not_achieving_goal
return reward
|
11492702
|
import pykd
import re
from common.v_0_0_3.common_utils import *
print('='*10 + ' Start ' + '='*10)
runCmd(r'bc *;g')
#runCmd(r'!idna.tt 4159E80000018') # Abnormal:ieframe!CDownloadWindowItem::_SetState
#runCmd(r'!idna.tt 54BF700000644') # Abnormal:wininet!CommitUrlCacheEntryW
#runCmd(r'!idna.tt CC66400005FC') # Normal: wininet!CommitUrlCacheEntryW
runCmd(r'bp ieframe!CDownloadSecurity::_SendSecurityErrorMessage')
runCmd(r'bp ieframe!CDownloadWindowItem::_SetState')
runCmd(r'bp ieframe!CNotificationBar2::SetFormattedText')
runCmd(r'bp wininet!CommitUrlCacheEntryW')
runCmd(r'bp wininet!CCacheServerContainer::AddUrl')
runCmd(r'bp wininet!CCacheClientContainer::AddUrl')
#runCmd(r'bp rpcrt4!LRPC_BASE_CCALL::SendReceive') # Detail * 0.5
#runCmd(r'bp ntdll!ZwAlpcSendWaitReceivePort') # Detail * 0.5
#runCmd(r'bp rpcrt4!NdrClientCall3') # Detail
#runCmd(r'bp rpcrt4!NdrpClientCall3') # Detail
#runCmd(r'bp rpcrt4!Ndr64pClientUnMarshal') # Detail
#runCmd(r'bp ntdll!memcpy') # Detail *2
runCmd(r'bd *;g-;be *')
runCmdLog(r'bl', False)
while True:
ret = runCmd(r'g')
if ttt_test2end(ret):
pyLog('='*10 + ' End ' + '='*10)
break
runCmd(r'.time')
runCmdLog(r'kL3', False)
#if test2Time('54C4AC0000046'): # Abnormal: Out of wininet!CommitUrlCacheEntryW
#if test2Time('CC79C000003A'): # Normal: Out of wininet!CommitUrlCacheEntryW
# break
ret = runCmd(r'kP')
for line in ret.split('\n'):
if 'eState = DLState' in line \
or 'wchar_t * pwzOriginDownloadUrl = ' in line \
or 'wchar_t * pwzDestinationFilePath = ' in line \
or 'wchar_t * psz' in line:
pyLog(line)
#if 'ieframe!CDownloadSecurity::_SendSecurityErrorMessage' in ret:
# break
ret = runCmd(r'!mex.t')
for line in ret.split('\n'):
if 'webcache_' in line:
pyLog(line)
LOG_FILE.flush()
LOG_FILE.close()
|
11492704
|
import enum
from typing import NamedTuple, Optional
@enum.unique
class ErdAcFanSetting(enum.Enum):
DEFAULT = 0
AUTO = 1
LOW = 2
LOW_AUTO = 3
MED = 4
MED_AUTO = 5
HIGH = 8
HIGH_AUTO = 9
def stringify(self, **kwargs):
return self.name.replace("_"," ").title()
@enum.unique
class ErdAcOperationMode(enum.Enum):
COOL = 0
FAN_ONLY = 1
ENERGY_SAVER = 2
HEAT = 3
DRY = 4
AUTO = 5
DEFAULT = 9
def stringify(self, **kwargs):
return self.name.replace("_"," ").title()
@enum.unique
class ErdAcFilterStatus(enum.Enum):
OK = 0
CLEAN = 1
DEFAULT = -1
def stringify(self, **kwargs):
return self.name.replace("_"," ").title()
def boolify(self) -> Optional[bool]:
return self != ErdAcFilterStatus.OK
|
11492726
|
import unittest, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_common
print "Assumes you ran ../build_for_clone.py in this directory"
print "Using h2o-nodes.json. Also the sandbox dir"
# uses a reduced common class
class releaseTest(h2o_common.ReleaseCommon2, unittest.TestCase):
def test_shutdown(self):
h2o.nodes[0].shutdown_all()
if __name__ == '__main__':
h2o.unit_main()
|
11492728
|
from setuptools import setup, find_packages
with open("README.md") as f:
long_description = f.read()
setup(
name="pytest-pylenium",
description="selenium wrapper for pytest to aid with system testing",
license="Apache Software License 2.0",
author="<NAME>",
url="https://github.com/symonk/pytest-pylenium",
version="0.0.1",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
packages=find_packages(where="src"),
package_dir={"": "src"},
entry_points={"pytest11": ["pylenium = pylenium.plugin"]},
setup_requires=["setuptools_scm"],
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: Pytest",
"Topic :: Software Development :: Testing",
"Topic :: Software Development :: Quality Assurance",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
"License :: OSI Approved :: Apache Software License",
],
)
|
11492764
|
from __future__ import print_function
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
import pandas as pd
import pickle
import threading
import sys
class FOMC (object):
'''
A convenient class for extracting meeting minutes from the FOMC website
Example Usage:
fomc = FOMC()
df = fomc.get_statements()
fomc.pickle("./df_minutes.pickle")
'''
def __init__(self, base_url='https://www.federalreserve.gov',
calendar_url='https://www.federalreserve.gov/monetarypolicy/fomccalendars.htm',
historical_date = 2011,
verbose = True,
max_threads = 10):
self.base_url = base_url
self.calendar_url = calendar_url
self.df = None
self.links = None
self.dates = None
self.articles = None
self.verbose = verbose
self.HISTORICAL_DATE = historical_date
self.MAX_THREADS = max_threads
def _get_links(self, from_year):
'''
private function that sets all the links for the FOMC meetings from the giving from_year
to the current most recent year
'''
if self.verbose:
print("Getting links...")
self.links = []
fomc_meetings_socket = urlopen(self.calendar_url)
soup = BeautifulSoup(fomc_meetings_socket, 'html.parser')
statements = soup.find_all('a', href=re.compile('^/newsevents/pressreleases/monetary\d{8}a.htm'))
self.links = [statement.attrs['href'] for statement in statements]
if from_year <= self.HISTORICAL_DATE:
for year in range(from_year, self.HISTORICAL_DATE + 1):
fomc_yearly_url = self.base_url + '/monetarypolicy/fomchistorical' + str(year) + '.htm'
fomc_yearly_socket = urlopen(fomc_yearly_url)
soup_yearly = BeautifulSoup(fomc_yearly_socket, 'html.parser')
statements_historical = soup_yearly.findAll('a', text = 'Statement')
for statement_historical in statements_historical:
self.links.append(statement_historical.attrs['href'])
def _date_from_link(self, link):
date = re.findall('[0-9]{8}', link)[0]
if date[4] == '0':
date = "{}/{}/{}".format(date[:4], date[5:6], date[6:])
else:
date = "{}/{}/{}".format(date[:4], date[4:6], date[6:])
return date
def _add_article(self, link, index=None):
'''
adds the related article for 1 link into the instance variable
index is the index in the article to add to. Due to concurrent
prcessing, we need to make sure the articles are stored in the
right order
'''
if self.verbose:
sys.stdout.write(".")
sys.stdout.flush()
# date of the article content
self.dates.append(self._date_from_link(link))
statement_socket = urlopen(self.base_url + link)
statement = BeautifulSoup(statement_socket, 'html.parser')
paragraphs = statement.findAll('p')
self.articles[index]= "\n\n".join([paragraph.get_text().strip() for paragraph in paragraphs])
def _get_articles_multi_threaded(self):
'''
gets all articles using multi-threading
'''
if self.verbose:
print("Getting articles - Multi-threaded...")
self.dates, self.articles = [], ['']*len(self.links)
jobs = []
# initiate and start threads:
index = 0
while index < len(self.links):
if len(jobs) < self.MAX_THREADS:
t = threading.Thread(target=self._add_article, args=(self.links[index],index,))
jobs.append(t)
t.start()
index += 1
else: # wait for threads to complete and join them back into the main thread
t = jobs.pop(0)
t.join()
for t in jobs:
t.join()
for row in range(len(self.articles)):
self.articles[row] = self.articles[row].strip()
def get_statements(self, from_year=1994):
'''
Returns a Pandas DataFrame of meeting minutes with the date as the index
uses a date range of from_year to the most current
Input from_year is ignored if it is within the last 5 years as this is meant for
parsing much older years
'''
self._get_links(from_year)
print("There are", len(self.links), 'statements')
self._get_articles_multi_threaded()
self.df = pd.DataFrame(self.articles, index = pd.to_datetime(self.dates)).sort_index()
self.df.columns = ['statements']
return self.df
def pick_df(self, filename="../data/minutes.pickle"):
if filename:
if self.verbose:
print("Writing to", filename)
with open(filename, "wb") as output_file:
pickle.dump(self.df, output_file)
if __name__ == '__main__':
#Example Usage
fomc = FOMC()
df = fomc.get_statements()
fomc.pickle("./df_minutes.pickle")
|
11492766
|
import copy
import tensorflow as tf
import numpy as np
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_func
@onnx_op("LRN")
@tf_func(tf.nn.lrn)
class LRN(BackendHandler):
@classmethod
def _common(cls, node, **kwargs):
attrs = copy.deepcopy(node.attrs)
alpha = attrs.get("alpha", 1e-4)
attrs.setdefault("beta", 0.75)
size = attrs["size"]
attrs["alpha"] = alpha / size
attrs["depth_radius"] = np.floor([(size - 1) / 2.])[0]
# TODO: LRN in tf accepts radius
# but in ONNX/Caffe accepts diameter.
# This could be a problem.
return [
cls.make_tensor_from_onnx_node(
node, attrs=attrs, c_last_only=True, **kwargs)
]
@classmethod
def version_1(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_13(cls, node, **kwargs):
return cls._common(node, **kwargs)
|
11492812
|
from .._public.artifactstore.api.ArtifactStoreApi import ArtifactStoreApi
from .._public.modeldb.api.CommentApi import CommentApi
from .._public.modeldb.api.DatasetServiceApi import DatasetServiceApi
from .._public.modeldb.api.DatasetVersionServiceApi import DatasetVersionServiceApi
from .._public.modeldb.api.ExperimentRunServiceApi import ExperimentRunServiceApi
from .._public.modeldb.api.ExperimentServiceApi import ExperimentServiceApi
from .._public.modeldb.api.HydratedServiceApi import HydratedServiceApi
from .._public.modeldb.api.JobApi import JobApi
from .._public.modeldb.api.LineageApi import LineageApi
from .._public.modeldb.api.ProjectServiceApi import ProjectServiceApi
class ClientSet(object):
def __init__(self, client):
self.client = client
self.artifactStoreService = ArtifactStoreApi(client, "/api/v1/modeldb")
self.commentService = CommentApi(client, "/api/v1/modeldb")
self.datasetService = DatasetServiceApi(client, "/api/v1/modeldb")
self.datasetVersionService = DatasetVersionServiceApi(client, "/api/v1/modeldb")
self.experimentRunService = ExperimentRunServiceApi(client, "/api/v1/modeldb")
self.experimentService = ExperimentServiceApi(client, "/api/v1/modeldb")
self.hydratedService = HydratedServiceApi(client, "/api/v1/modeldb")
self.jobService = JobApi(client, "/api/v1/modeldb")
self.lineageService = LineageApi(client, "/api/v1/modeldb")
self.projectService = ProjectServiceApi(client, "/api/v1/modeldb")
|
11492818
|
from ghizmo.commands import lib
import os
import yaml
import urllib.parse
from collections import defaultdict
_AUTHORS_INFO_FILES = ["authors-info.yml", "authors-info.json", "admin/authors-info.yml", "admin/authors-info.json"]
def assemble_authors(config, args):
"""
Assemble a list of authors as an AUTHORS.md file based on GitHub repo history and a
authors-info.{yml,json} file. Supports roles (for each person) and groups of people
(leads, contributors, etc.).
"""
github = config.github
repo = config.repo
authors_info_filename = None
for filename in _AUTHORS_INFO_FILES:
if os.path.isfile(filename):
authors_info_filename = filename
header = None
footer = None
groups = []
exclude = []
# Assemble roles, keyed by login.
roles = {}
if authors_info_filename:
yield lib.status("Info from: %s" % authors_info_filename)
with open(authors_info_filename, "r", encoding="utf-8") as f:
info = yaml.safe_load(f)
header = info.get("header")
footer = info.get("footer")
roles = info.get("roles")
groups = info.get("groups")
exclude = info.get("exclude")
yield {"roles": roles, "groups": groups}
else:
yield lib.status("No roles file")
login_to_user = {}
for contributor in repo.contributors():
user = github.user(contributor.login)
login_to_user[user.login] = user
# If any roles are listed but were somehow missing from the contributors return by the API
# (for example the commits weren't linked up to the account properly), include them too.
contributors_found = {contributor.login for contributor in repo.contributors()}
unknown_contributors = []
for login in roles:
if login not in contributors_found:
user = github.user(login)
if user:
yield lib.status("Author has a role but is not returned by GitHub as a contributor: %s (%s)" % (login, user))
else:
yield lib.status("Author has a role but is not a contributor or a known user: %s [%s]" % (user, type(user)))
unknown_contributors.append(login)
login_to_user[login] = user
yield lib.status("Found %s authors" % len(login_to_user))
yield lib.status("Found without GitHub user info: %s" % unknown_contributors)
# Get a list of each group of logins.
grouped_authors = [[] for g in groups]
default_group = None
assigned_logins = set()
for count, group in enumerate(groups):
yield lib.status("Group %s: %s" % (count, group))
group["number"] = count
if "members" in group:
members = group["members"]
assert isinstance(members, list)
lib.status("members %s" % members)
for login in members:
grouped_authors[count].append(login)
assigned_logins.add(login)
else:
yield lib.status("group has no members: %s %s %s" % (count, group, grouped_authors[count]))
default_group = grouped_authors[count]
yield {"all_logins": list(login_to_user.keys()), "assigned_logins": list(assigned_logins)}
# Put all unassigned logins into last group.
assert default_group is not None, "must have a group with no explicit members for unassigned contributors"
default_group.extend(set(login_to_user.keys()).difference(assigned_logins))
# Sort each group alphabetically by login.
for logins in grouped_authors:
logins.sort(key=lambda login: login.lower())
def format_user(login, name):
if login and name:
return "%s (%s)" % (name, login)
elif login:
return login
else:
raise ValueError("Missing login name")
commit_tallies = {}
for stat in repo.contributor_statistics():
yield lib.status("contrib stat: login '%s' total '%s'" % (stat.author.login, stat.total))
commit_tallies[stat.author.login] = stat.total
yield lib.status("Read %s contributor stats" % len(commit_tallies))
issue_tallies = defaultdict(int)
for issue in repo.issues(state="all"):
issue_tallies[issue.user.login] += 1
yield lib.status("Read %s issues/PRs" % len(issue_tallies))
yield {"commit_tallies": commit_tallies, "issue_tallies": issue_tallies}
with open("AUTHORS.md", "w", encoding="utf-8") as f:
f.write("# Authors\n\n")
if header:
f.write("%s\n\n" % header)
for group_number, logins in enumerate(grouped_authors):
f.write("\n*%s*\n\n" % groups[group_number]["name"])
for login in logins:
user = login_to_user.get(login)
role = roles.get(login)
name = user.name if user else None
if login in exclude:
continue
user_url = "https://github.com/%s" % login if user else None
# Link to commits by that author
commits_count = commit_tallies.get(login, 0)
commits_url = "%s/commits?author=%s" % (repo.html_url, urllib.parse.quote_plus(login))
# Link to issues and PRs by that author.
issues_count = issue_tallies.get(login, 0)
issues_url = "%s/issues?q=%s" % (repo.html_url, urllib.parse.quote_plus("author:%s" % login))
yield lib.status("login '%s' commits %s issues %s" % (login, commits_count, issues_count))
user_link = format_user(login, name)
if user_url:
user_link = "[%s](%s)" % (user_link, user_url)
f.write("* %s" % user_link)
if commits_count or issues_count:
f.write(" — [%s+](%s)/[%s+](%s)" % (commits_count, commits_url, issues_count, issues_url))
if role:
f.write(" — _%s_" % role)
f.write("\n")
if footer:
f.write("\n%s\n\n" % footer)
f.write("\n(This file was auto-generated by [ghizmo assemble-authors](https://github.com/jlevy/ghizmo).)")
|
11492819
|
from typing import Any
from typing import Union
import numpy as np
__all__ = ["Face", "Integer", "Float", "Complex", "Bool", "Void"]
Face = Any
_NumpyInt = Union[
np.int_,
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
_NumpyFloat = Union[np.float_, np.float16, np.float32, np.float64]
_NumpyComplex = Union[np.complex_, np.complex64, np.complex128]
Integer = Union[int, _NumpyInt]
Float = Union[float, _NumpyFloat]
Complex = Union[complex, _NumpyComplex]
Bool = Union[bool, np.bool_]
Void = Union[None, np.void]
|
11492853
|
import pandas as pd
def compute_quotient_metrics(filename,
index_col=0,
resample_period='1M',
shift=0,
quotient_metrics=[('Volume', 'max', 'median'),
('Close', 'max', 'min')]):
"""Compute quotient metrics from CSV data.
Quotient metrices are given in the form: (Name, Num, Den), where Name is
the corresponding column from the CSV file to be measured, Num is the
metric to be used as the numerator, and Den is the metric to be used as
the denominator.
The column specified by index_col is assumed to contain the dates, which
are then binned each metric over the resample period. By default, the
metrics are computed for the same time period, but this can be changed by
setting the shift value. For example, setting shift=1 will cause the
metric computed for February to be divided by the metric computed in
January.
For example:
compute_quotient_metrics(filename, index_col=0, resample_period='1M',
shift=0, quotient_metrics = [('Volume', 'max',
'median'), ('Close', 'max', 'min')]):
computes for each month in the data, the maximum volume divided by the
median volume, and the maximum close price over the minimum close price.
The output metrics can be further reduced by computing the maximum monthly
metric.
See pandas.series.resample for examples of valid resample periods and
metrics.
"""
data = pd.read_csv(filename, index_col=index_col, parse_dates=True)
def compute_quotient_metric(name, num_metric, den_metric):
series = pd.TimeSeries(data[name])
num_period = series.resample(resample_period, how=num_metric)
den_period = series.resample(resample_period, how=den_metric)
return num_period[shift:]/den_period[:len(num_period)-shift].values
return [compute_quotient_metric(*qm) for qm in quotient_metrics]
def compute_quotient_metric(data, resample_period, shift, name, num_metric, den_metric):
series = pd.TimeSeries(data[name])
num_period = series.resample(resample_period, how=num_metric)
den_period = series.resample(resample_period, how=den_metric)
return num_period[shift:]/den_period[:len(num_period)-shift].values
if __name__ == '__main__':
v, p = compute_quotient_metrics('plpl.csv', shift=1)
print "Maximum Monthly Volume Max/Median for PLPL : %6.6g" % v.max()
print "Maximum Monthly Close Price Max/Min for PLPL: %6.6g" % p.max()
|
11492872
|
from datetime import datetime
class Field(object):
to_py = lambda self, value: value
to_linode = to_py
def __init__(self, field):
self.field = field
class IntField(Field):
def to_py(self, value):
if value is not None and value != '':
return int(value)
to_linode = to_py
class FloatField(Field):
def to_py(self, value):
if value is not None:
return float(value)
to_linode = to_py
class CharField(Field):
to_py = lambda self, value: str(value)
to_linode = to_py
class BoolField(Field):
def to_py(self, value):
if value in (1, '1'): return True
else: return False
def to_linode(self, value):
if value: return 1
else: return 0
class ChoiceField(Field):
to_py = lambda self, value: value
def __init__(self, field, choices=[]):
Field.__init__(self, field)
self.choices = choices
def to_linode(self, value):
if value in self.choices:
return value
else:
raise AttributeError
class ListField(Field):
def __init__(self, field, type=Field(''), delim=','):
Field.__init__(self, field)
self.__type=type
self.__delim=delim
def to_linode(self, value):
return self.__delim.join([str(self.__type.to_linode(v)) for v in value])
def to_py(self, value):
return [self.__type.to_py(v) for v in value.split(self.__delim) if v != '']
class DateTimeField(Field):
to_py = lambda self, value: datetime.strptime(value, '%Y-%m-%d %H:%M:%S.0')
to_linode = lambda self, value: value.strftime('%Y-%m-%d %H:%M:%S.0')
class ForeignField(Field):
def __init__(self, field):
self.field = field.primary_key
self.__model = field
def to_py(self, value):
return self.__model.get(id=value)
def to_linode(self, value):
if isinstance(value, int):
return value
else:
return value.id
|
11492919
|
import os
import numpy as np
import PIL
import cv2
import tifffile
from scipy.signal import convolve2d
import merlin
from merlin.core import dataset
from merlin.data import codebook as cb
class MERFISHDataFactory(object):
"""
A class for simulating MERFISH data sets.
"""
def __init__(self):
self.codebookPath = 'L26E1.csv'
self.psfSigma = 1.2
self.imageSize = np.array([1024, 1024])
self.upsampleFactor = 10
self.fluorophoreBrightness = 1000
self.fiducialBrightness = 10000
self.background = 100
self.bitOrganization = [[0, 1], [0, 0], [1, 0], [1, 1],
[2, 1], [2, 0], [3, 1], [3, 0], [4, 0], [4, 1],
[5, 1], [5, 0], [6, 1], [6, 0], [7, 0], [7, 1]]
def simulate_image(self, spotPositions: np.ndarray=None,
addNoise: bool=False) -> np.ndarray:
"""Simulate a single image consisting of point sources with a Gaussian
point spread function
Args:
spotPositions: a n x 2 numpy array containing the positions to
simulate the point sources. If not specified, 1000 random
positions are selected.
addNoise: flag indicating whether poisson noise should be added
to the simulated image.
Returns:
the simulated image
"""
if spotPositions is None:
spotPositions = np.random.uniform(size=(1000, 2))
spotPositions[:, 0] *= self.imageSize[0]
spotPositions[:, 1] *= self.imageSize[1]
upsampledImage = np.zeros(self.upsampleFactor*self.imageSize)
for p in spotPositions:
upsampledImage[int(np.floor(p[0]*self.upsampleFactor)),
int(np.floor(p[1]*self.upsampleFactor))] += 1000
return self._downsample_image_stack([upsampledImage],
addNoise=addNoise)[0]
def simulate_dataset(self, datasetName, abundanceScale=1,
fluorophoreCount=5, fovCount=10):
"""Simulate a full MERFISH dataset"""
dataDir = os.sep.join([merlin.DATA_HOME, datasetName])
if not os.path.exists(dataDir):
os.mkdir(dataDir)
simDataset = dataset.DataSet(datasetName)
codebook = cb.Codebook(simDataset, self.codebookPath)
barcodeNumber = codebook.get_barcode_count()
barcodeAbundances = abundanceScale*np.array(
[10**np.random.uniform(3) for i in range(barcodeNumber)])
barcodeAbundances[:10] = 0
for i in range(fovCount):
merfishImages, rnaPositions = self._simulate_single_fov(
codebook, barcodeAbundances, fluorophoreCount)
fiducialImage = self._simulate_fiducial_image()
tifffile.imsave(
os.sep.join([dataDir, 'full_stack_' + str(i) + '.tiff']),
merfishImages.astype(np.uint16))
imageCount = np.max([x[0] for x in self.bitOrganization]) + 1
for j in range(imageCount):
fileName = 'Conventional_750_650_561_488_405_' + str(i) + \
'_' + str(j) + '.tiff'
filePath = os.sep.join([dataDir, fileName])
imageData = np.zeros(
shape=(5, *self.imageSize), dtype=np.uint16)
firstBitIndex = [i for i,x in enumerate(self.bitOrganization) \
if x[0] == j and x[1] == 0][0]
secondBitIndex = [i for i,x in enumerate(self.bitOrganization) \
if x[0] == j and x[1] == 1][0]
imageData[0,:,:] = merfishImages[firstBitIndex]
imageData[1,:,:] = merfishImages[secondBitIndex]
imageData[2,:,:] = fiducialImage
tifffile.imsave(filePath, imageData)
np.save(os.sep.join(
[dataDir, 'true_positions_' + str(i) + '.npy']), rnaPositions)
def _simulate_fiducial_image(self):
fiducialPositions = np.random.uniform(size=(1000,2))
upsampledFiducials = self.fiducialBrightness*np.histogram2d(
fiducialPositions[:,0]*self.imageSize[0],
fiducialPositions[:,1]*self.imageSize[1],
bins=self.upsampleFactor*self.imageSize)[0]
return self._downsample_image_stack([upsampledFiducials])[0]
def _simulate_single_fov(self, codebook, barcodeAbundances,
fluorophoreCount):
barcodeCount = len(barcodeAbundances)
bitNumber = codebook.get_bit_count()
imageSize = self.imageSize
rnaCounts = np.random.poisson(barcodeAbundances)
rnaPositions = [np.random.uniform(size=(c, 2)) for c in rnaCounts]
for b in range(barcodeCount):
rnaPositions[b][:, 0] *= imageSize[0]
rnaPositions[b][:, 1] *= imageSize[1]
upsampledStack = np.zeros((bitNumber, *self.upsampleFactor*imageSize))
for b in range(barcodeCount):
self._add_spots_for_barcode(
codebook.get_barcode(b), rnaPositions[b], fluorophoreCount,
upsampledStack)
imageStack = self._downsample_image_stack(upsampledStack)
return imageStack, rnaPositions
def _add_spots_for_barcode(self, barcode, positions, fluorophoreCount,
upsampledStack):
upsampledImage = np.zeros(self.upsampleFactor*self.imageSize)
for p in positions:
upsampledImage[int(np.floor(p[0]*self.upsampleFactor)), \
int(np.floor(p[1]*self.upsampleFactor))] += 1
upsampledImage = self.fluorophoreBrightness*np.random.poisson(
upsampledImage*fluorophoreCount)
for i in np.where(barcode)[0]:
np.add(upsampledStack[i], upsampledImage, out=upsampledStack[i])
def _downsample_image_stack(self, upsampledStack, addNoise=True):
imageStack = np.zeros((len(upsampledStack), *self.imageSize))
for i in range(len(imageStack)):
blurredImage = cv2.GaussianBlur(upsampledStack[i].astype(float),
ksize=(51, 51), sigmaX=self.upsampleFactor*self.psfSigma)
downsampledImage = np.array(PIL.Image.fromarray(
convolve2d(blurredImage,
np.ones((self.upsampleFactor, self.upsampleFactor))))\
.resize(self.imageSize, PIL.Image.BILINEAR))
if addNoise:
imageStack[i] = np.random.poisson(
downsampledImage + self.background)
else:
imageStack[i] = downsampledImage + self.background
return imageStack
|
11492939
|
def helper(lst1,lst2,n,x):
lst1.sort(reverse=True)
for i in range(0,n):
if(lst1[i] + lst2[i] <= x):
continue
else:
return False
return True
t = int(input())
while t > 0:
n,x = map(int,input().split())
lst1 = list(map(int,input().split()))
lst2 = list(map(int,input().split()))
if(helper(lst1,lst2,n,x)):
print("YES")
else:
print("NO")
t-=1
|
11492957
|
import pygame as pg
from Entity import Entity
from Const import *
class Goombas(Entity):
def __init__(self, x_pos, y_pos, move_direction):
super().__init__()
self.rect = pg.Rect(x_pos, y_pos, 32, 32)
if move_direction:
self.x_vel = 1
else:
self.x_vel = -1
self.crushed = False
self.current_image = 0
self.image_tick = 0
self.images = [
pg.image.load('images/goombas_0.png').convert_alpha(),
pg.image.load('images/goombas_1.png').convert_alpha(),
pg.image.load('images/goombas_dead.png').convert_alpha()
]
self.images.append(pg.transform.flip(self.images[0], 0, 180))
def die(self, core, instantly, crushed):
if not instantly:
core.get_map().get_player().add_score(core.get_map().score_for_killing_mob)
core.get_map().spawn_score_text(self.rect.x + 16, self.rect.y)
if crushed:
self.crushed = True
self.image_tick = 0
self.current_image = 2
self.state = -1
core.get_sound().play('kill_mob', 0, 0.5)
self.collision = False
else:
self.y_vel = -4
self.current_image = 3
core.get_sound().play('shot', 0, 0.5)
self.state = -1
self.collision = False
else:
core.get_map().get_mobs().remove(self)
def check_collision_with_player(self, core):
if self.collision:
if self.rect.colliderect(core.get_map().get_player().rect):
if self.state != -1:
if core.get_map().get_player().y_vel > 0:
self.die(core, instantly=False, crushed=True)
core.get_map().get_player().reset_jump()
core.get_map().get_player().jump_on_mob()
else:
if not core.get_map().get_player().unkillable:
core.get_map().get_player().set_powerlvl(0, core)
def update_image(self):
self.image_tick += 1
if self.image_tick == 14:
self.current_image = 1
elif self.image_tick == 28:
self.current_image = 0
self.image_tick = 0
def update(self, core):
if self.state == 0:
self.update_image()
if not self.on_ground:
self.y_vel += GRAVITY
blocks = core.get_map().get_blocks_for_collision(int(self.rect.x // 32), int(self.rect.y // 32))
self.update_x_pos(blocks)
self.update_y_pos(blocks)
self.check_map_borders(core)
elif self.state == -1:
if self.crushed:
self.image_tick += 1
if self.image_tick == 50:
core.get_map().get_mobs().remove(self)
else:
self.y_vel += GRAVITY
self.rect.y += self.y_vel
self.check_map_borders(core)
def render(self, core):
core.screen.blit(self.images[self.current_image], core.get_map().get_camera().apply(self))
|
11492987
|
import sys, pytest
sys.path.append("../")
LIST_ONE = ["a", "b", "c", "d", "e"]
LIST_TWO = ["v", "d", "s", "t", "z"]
print(LIST_ONE)
print(LIST_TWO)
from tkinter import OptionMenu
from appJar import gui
def get(btn):
a = app.getOptionBoxWidget("l1")
b = app.getOptionBoxWidget("l2")
c = app.getOptionBoxWidget("l3")
print("VAR ARRAY: ", app.n_optionVars["l1"], app.n_optionVars["l2"], app.n_optionVars["l2"])
print("LINKED VARS: ", a.var, b.var, c.var)
print("ORIG GET: ", app.getOptionBox("l1"), app.getOptionBox("l2"))
print("LINKED VARS: ", a.var.get(), b.var.get())
print("STORED VARS: ", app.n_optionVars["l1"].get(), app.n_optionVars["l2"].get())
def test0(btn=None):
print(LIST_ONE[0], LIST_TWO[0])
assert app.getOptionBox("l1") == LIST_ONE[0]
assert app.getOptionBox("l2") == LIST_TWO[0]
def test1(btn=None):
print(LIST_ONE[0], LIST_TWO[0])
obs = app.getAllOptionBoxes()
assert obs["l1"] == LIST_ONE[0]
assert obs["l2"] == LIST_TWO[0]
def test2(btn=None):
# select new items - by position
app.setOptionBox("l1", 3)
app.setOptionBox("l2", 2)
print( LIST_ONE[3], LIST_TWO[2])
assert app.getOptionBox("l1") == LIST_ONE[3]
assert app.getOptionBox("l2") == LIST_TWO[2]
def test3(btn=None):
app.clearOptionBox("l1")
print( LIST_ONE[0], LIST_TWO[2])
assert app.getOptionBox("l1") == LIST_ONE[0]
assert app.getOptionBox("l2") == LIST_TWO[2]
def test4(btn=None):
app.setOptionBox("l1", 2)
app.clearAllOptionBoxes()
print( LIST_ONE[0], LIST_TWO[0])
assert app.getOptionBox("l1") == LIST_ONE[0]
assert app.getOptionBox("l2") == LIST_TWO[0]
def test5(btn=None):
# select new items - by position
app.setOptionBox("l1", 2)
app.setOptionBox("l2", 3)
def test6(btn=None):
# select new items - by value
app.setOptionBox("l1", LIST_ONE[3])
app.setOptionBox("l2", LIST_TWO[1])
app.renameOptionBoxItem("l2", LIST_TWO[0], "newName")
assert app.getOptionBox("l1") == LIST_ONE[3]
with gui() as app:
print("\tTesting options")
# add two option boxes
assert isinstance(app.addOptionBox("l1", LIST_ONE), OptionMenu)
app.addOptionBox("l2", LIST_TWO)
with pytest.raises(Exception):
app.addOptionBox("l2", LIST_TWO)
app.addOptionBox("l3", LIST_TWO)
app.addButtons(["0", "1", "2", "3", "4", "5", "6"], [test0, test1, test2, test3, test4, test5, test6])
app.addButton("get", get)
|
11493020
|
import unittest
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from PySeismoSoil.class_ground_motion import Ground_Motion as GM
from PySeismoSoil.class_Vs_profile import Vs_Profile
from PySeismoSoil.class_frequency_spectrum import Frequency_Spectrum
import os
from os.path import join as _join
f_dir = _join(os.path.dirname(os.path.realpath(__file__)), 'files')
class Test_Class_Ground_Motion(unittest.TestCase):
def test_loading_data__two_columns_from_file(self):
# Two columns from file
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='gal')
PGA_benchmark = 294.30 # unit: cm/s/s
PGV_benchmark = 31.46 # unit: cm/s
PGD_benchmark = 38.77 # unit: cm
tol = 1e-2
self.assertAlmostEqual(gm.pga_in_gal, PGA_benchmark, delta=tol)
self.assertAlmostEqual(gm.pgv_in_cm_s, PGV_benchmark, delta=tol)
self.assertAlmostEqual(gm.pgd_in_cm, PGD_benchmark, delta=tol)
self.assertAlmostEqual(gm.peak_Arias_Intensity, 1.524, delta=tol)
self.assertAlmostEqual(gm.rms_accel, 0.4645, delta=tol)
def test_loading_data__two_columns_from_numpy_array(self):
# Two columns from numpy array
gm = GM(np.array([[0.1, 0.2, 0.3, 0.4], [1, 2, 3, 4]]).T, unit='m/s/s')
self.assertAlmostEqual(gm.pga, 4)
def test_loading_data__one_column_from_file(self):
# One column from file
gm = GM(_join(f_dir, 'one_column_data_example.txt'), unit='g', dt=0.2)
self.assertAlmostEqual(gm.pga_in_g, 12.0)
def test_loading_data__one_column_from_numpy_array(self):
# One column from numpy array
gm = GM(np.array([1, 2, 3, 4, 5]), unit='gal', dt=0.1)
self.assertAlmostEqual(gm.pga_in_gal, 5.0)
def test_loading_data__one_column_without_specifying_dt(self):
# One column without specifying dt
error_msg = 'is needed for one-column `data`.'
with self.assertRaisesRegex(ValueError, error_msg):
gm = GM(np.array([1, 2, 3, 4, 5]), unit='gal')
def test_loading_data__test_invalid_unit_names(self):
# Test invalid unit names
with self.assertRaisesRegex(ValueError, 'Invalid `unit` name.'):
GM(np.array([1, 2, 3, 4, 5]), unit='test', dt=0.1)
with self.assertRaisesRegex(ValueError, r"use '/s/s' instead of 's\^2'"):
GM(np.array([1, 2, 3, 4, 5]), unit='m/s^2', dt=0.1)
def test_differentiation(self):
veloc = np.array([[.1, .2, .3, .4, .5, .6], [1, 3, 7, -1, -3, 5]]).T
gm = GM(veloc, unit='m', motion_type='veloc')
accel_benchmark = np.array(
[[.1, .2, .3, .4, .5, .6],
[0, 20, 40, -80, -20, 80]]
).T
self.assertTrue(np.allclose(gm.accel, accel_benchmark))
def test_integration__artificial_example(self):
gm = GM(_join(f_dir, 'two_column_data_example.txt'), unit='m/s/s')
v_bench = np.array([[0.1000, 0.1000], # from MATLAB
[0.2000, 0.3000],
[0.3000, 0.6000],
[0.4000, 1.0000],
[0.5000, 1.5000],
[0.6000, 1.7000],
[0.7000, 2.0000],
[0.8000, 2.4000],
[0.9000, 2.9000],
[1.0000, 3.5000],
[1.1000, 3.8000],
[1.2000, 4.2000],
[1.3000, 4.7000],
[1.4000, 5.3000],
[1.5000, 6.0000]])
u_bench = np.array([[0.1000, 0.0100], # from MATLAB
[0.2000, 0.0400],
[0.3000, 0.1000],
[0.4000, 0.2000],
[0.5000, 0.3500],
[0.6000, 0.5200],
[0.7000, 0.7200],
[0.8000, 0.9600],
[0.9000, 1.2500],
[1.0000, 1.6000],
[1.1000, 1.9800],
[1.2000, 2.4000],
[1.3000, 2.8700],
[1.4000, 3.4000],
[1.5000, 4.0000]])
self.assertTrue(np.allclose(gm.veloc, v_bench))
self.assertTrue(np.allclose(gm.displ, u_bench))
def test_integration__real_world_example(self):
# Note: In this test, the result by cumulative trapezoidal numerical
# integration is used as the benchmark. Since it is infeasible to
# achieve perfect "alignment" between the two time histories,
# we check the correlation coefficient instead of element-wise
# check.
veloc_ = np.genfromtxt(_join(f_dir, 'sample_accel.txt'))
gm = GM(veloc_, unit='m/s', motion_type='veloc')
displ = gm.displ[:, 1]
displ_cumtrapz = np.append(0, sp.integrate.cumtrapz(veloc_[:, 1], dx=gm.dt))
r = np.corrcoef(displ_cumtrapz, displ)[1, 1] # cross-correlation
self.assertTrue(r >= 0.999)
def test_fourier_transform(self):
gm = GM(_join(f_dir, 'two_column_data_example.txt'), unit='m/s/s')
freq, spec = gm.get_Fourier_spectrum(real_val=False).raw_data.T
freq_bench = [
0.6667, 1.3333, 2.0000, 2.6667, 3.3333, 4.0000, 4.6667, 5.3333,
]
FS_bench = [
60.0000 + 0.0000j, -1.5000 + 7.0569j, -1.5000 + 3.3691j,
-7.5000 +10.3229j, -1.5000 + 1.3506j, -1.5000 + 0.8660j,
-7.5000 + 2.4369j, -1.5000 + 0.1577j,
]
self.assertTrue(np.allclose(freq, freq_bench, atol=0.0001, rtol=0.0))
self.assertTrue(np.allclose(spec, FS_bench, atol=0.0001, rtol=0.0))
def test_baseline_correction(self):
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m/s/s')
corrected = gm.baseline_correct(show_fig=True)
self.assertTrue(isinstance(corrected, GM))
def test_high_pass_filter(self):
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m')
hp = gm.highpass(cutoff_freq=1.0, show_fig=True)
self.assertTrue(isinstance(hp, GM))
def test_low_pass_filter(self):
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m')
lp = gm.lowpass(cutoff_freq=1.0, show_fig=True)
self.assertTrue(isinstance(lp, GM))
def test_band_pass_filter(self):
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m')
bp = gm.bandpass(cutoff_freq=[0.5, 8], show_fig=True)
self.assertTrue(isinstance(bp, GM))
def test_band_stop_filter(self):
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m')
bs = gm.bandstop(cutoff_freq=[0.5, 8], show_fig=True)
self.assertTrue(isinstance(bs, GM))
def test_amplify_via_profile(self):
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m')
vs_prof = Vs_Profile(_join(f_dir, 'profile_FKSH14.txt'))
output_motion = gm.amplify(vs_prof, boundary='elastic')
self.assertTrue(isinstance(output_motion, GM))
def test_deconvolution(self):
# Assert `deconvolve()` & `amplify()` are reverse operations to each other.
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m')
vs_prof = Vs_Profile(_join(f_dir, 'profile_FKSH14.txt'))
for boundary in ['elastic', 'rigid']:
deconv_motion = gm.deconvolve(vs_prof, boundary=boundary)
output_motion = deconv_motion.amplify(vs_prof, boundary=boundary)
self.assertTrue(self.nearly_identical(gm.accel, output_motion.accel))
amplified_motion = gm.amplify(vs_prof, boundary=boundary)
output_motion = amplified_motion.deconvolve(vs_prof, boundary=boundary)
self.assertTrue(self.nearly_identical(gm.accel, output_motion.accel))
def test_plot(self):
filename = _join(f_dir, 'sample_accel.txt')
gm = GM(filename, unit='m')
fig, axes = gm.plot() # automatically generate fig/ax objects
self.assertTrue(isinstance(axes, tuple))
self.assertEqual(len(axes), 3)
self.assertEqual(axes[0].title.get_text(), os.path.split(filename)[1])
fig2 = plt.figure(figsize=(8, 8))
fig2_, axes = gm.plot(fig=fig2) # feed an external figure object
self.assertTrue(np.allclose(fig2_.get_size_inches(), (8, 8)))
def test_unit_convert(self):
data = np.array([1, 3, 7, -2, -10, 0])
gm = GM(data, unit='m', dt=0.1)
accel = gm.accel[:, 1]
accel_in_m = gm._unit_convert(unit='m/s/s')[:, 1]
accel_in_gal = gm._unit_convert(unit='gal')[:, 1]
accel_in_g = gm._unit_convert(unit='g')[:, 1]
self.assertTrue(np.allclose(accel_in_m, accel))
self.assertTrue(np.allclose(accel_in_gal, accel * 100))
self.assertTrue(np.allclose(accel_in_g, accel / 9.81))
def test_scale_motion(self):
data = np.array([1, 3, 7, -2, -10, 0])
gm = GM(data, unit='g', dt=0.1)
gm_scaled_1 = gm.scale_motion(factor=2.0) # scale by 2.0
gm_scaled_2 = gm.scale_motion(target_PGA_in_g=5.0) # scale by 0.5
self.assertTrue(np.allclose(gm.accel[:, 1] * 2, gm_scaled_1.accel[:, 1]))
self.assertTrue(np.allclose(gm.accel[:, 1] * 0.5, gm_scaled_2.accel[:, 1]))
def test_amplify_by_tf__case_1_an_artificial_transfer_function(self):
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='gal')
ratio_benchmark = 2.76
freq = np.arange(0.01, 50, step=0.01)
tf = ratio_benchmark * np.ones_like(freq)
transfer_function = Frequency_Spectrum(np.column_stack((freq, tf)))
new_gm = gm.amplify_by_tf(transfer_function, show_fig=False)
ratio = new_gm.accel[:, 1] / gm.accel[:, 1]
self.assertTrue(np.allclose(ratio, ratio_benchmark))
def test_amplify_by_tf__case_2_a_transfer_function_from_a_Vs_profile(self):
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='gal')
vs_prof = Vs_Profile(_join(f_dir, 'profile_FKSH14.txt'))
tf_RO, tf_BH, _ = vs_prof.get_transfer_function()
gm_with_tf_RO = gm.amplify_by_tf(tf_RO)
gm_with_tf_BH = gm.amplify_by_tf(tf_BH)
gm_with_tf_RO_ = gm.amplify(vs_prof, boundary='elastic')
gm_with_tf_BH_ = gm.amplify(vs_prof, boundary='rigid')
# Assert that `amplify_by_tf()` and `amplify()` can generate
# nearly identical results
self.assertTrue(
self.nearly_identical(gm_with_tf_RO.accel, gm_with_tf_RO_.accel)
)
self.assertTrue(
self.nearly_identical(gm_with_tf_BH.accel, gm_with_tf_BH_.accel)
)
@staticmethod
def nearly_identical(motion_1, motion_2, thres=0.99):
"""
Assert that two ground motions are nearly identical, by checking the
correlation coefficient between two time series.
Parameters
----------
motion_1 : numpy.ndarray
Two-column array (time, acceleration).
motion_2 : numpy.ndarray
Two-column array (time, acceleration).
thres : float
The threshold that the correlation coefficient must be above (or
equal to).
Returns
-------
result : bool
Whether the motions are nearly identical
"""
if not np.allclose(motion_1[:, 0], motion_2[:, 0], rtol=0.001, atol=0.0):
return False
r = np.corrcoef(motion_1[:, 1], motion_2[:, 1])
if r[1, 0] < thres:
return False
return True
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(Test_Class_Ground_Motion)
unittest.TextTestRunner(verbosity=2).run(SUITE)
|
11493041
|
from toee import *
import char_editor
def CheckPrereq(attachee, classLevelled, abilityScoreRaised):
#Str requirement enforced by the engine
if not char_editor.has_feat(feat_two_weapon_fighting) and not char_editor.has_feat(feat_two_weapon_fighting_ranger):
return 0
return 1
|
11493045
|
from .AbstractAdjacencyCompander import AbstractAdjacencyCompander
import numpy as np
import sys
import os
import os.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),'../../../../preprocessing')))
from mnist_to_graph_tensor import mnist_adj_mat
class ImageAdjacencyCompander(AbstractAdjacencyCompander):
def __init__(self,V,A):
super(ImageAdjacencyCompander, self).__init__(V, A)
self.NUM_DIRS = 8
def contractA(self):
self.flatA = self.A.sum(axis=1)
return self.flatA
def expandA(self):
return self.A
def update(self, P):
Ptiled = np.tile(np.expand_dims(P,axis=1),(1,self.NUM_DIRS,1))
Ptranspose = np.transpose(Ptiled, axes=[1, 2, 0])
Pnottranspose = np.transpose(Ptiled, axes=[1, 0, 2])
Abatched = np.transpose(self.A, axes=[1, 0, 2])
leftMultiply = np.matmul(Ptranspose, Abatched)
rightMultiply = np.matmul(leftMultiply, Pnottranspose)
self.A = np.transpose(rightMultiply, axes=[1, 0, 2])
self.V = np.dot(P.transpose(), self.V)
self.N = self.V.shape[0]
|
11493048
|
import os
from infiniteremixer.utils.io import load
class BatchExtractor:
"""Batch extract features dynamically from a list of audio files."""
def __init__(self, sample_rate=22050):
self.sample_rate = sample_rate
self.extractors = []
self._features = {}
def add_extractor(self, extractor):
"""Add a concrete Extractor to the extractors.
:param extractor: (Extractor) Concrete Extractor (e.g., MFCCExtractor)
"""
self.extractors.append(extractor)
def extract(self, dir):
"""Extract features from all the audio files in the directory.
:param dir: (str) Path to directory with audio files to analyse
:return: (dict): Dictionary with audio file paths as keys and
extracted features in the form:
{
"filepath1": {
"chromogram": np.ndarray([[], [], ...]),
"mfcc": np.ndarray([[], [], ...])
},
"filepath2": {
"chromogram": np.ndarray([[], [], ...]),
"mfcc": np.ndarray([[], [], ...])
},
...
}
"""
features = {}
for root, _, files in os.walk(dir):
for file in files:
file_path = os.path.join(root, file)
file_features = self._extract_features_for_file(file_path)
features[file_path] = file_features
return features
def _extract_features_for_file(self, file_path):
features = {}
signal = load(file_path, self.sample_rate)
for extractor in self.extractors:
feature = extractor.extract(signal, self.sample_rate)
features[extractor.feature_name] = feature
return features
if __name__ == "__main__":
from infiniteremixer.data.extraction.mfccextractor import MFCCExtractor
from infiniteremixer.data.extraction.chromogramextractor import ChromogramExtractor
num_mfccs = 13
frame_size = 1024
hop_size = 512
dir = "/home/valerio/datasets/infinitemixer/beats"
mfcc_extractor = MFCCExtractor(frame_size, hop_size, num_mfccs)
chromogram_extractor = ChromogramExtractor(frame_size, hop_size)
batch_extractor = BatchExtractor(22050)
batch_extractor.add_extractor(mfcc_extractor)
batch_extractor.add_extractor(chromogram_extractor)
features = batch_extractor.extract(dir)
a = 1
|
11493136
|
import numpy as np
from scipy.special import jv as besselj
from Solvers.QSP_solver import QSP_Solver
from math import ceil
# --------------------------------------------------------------------------
# Test case 1: Hamiltonian simulation
#
# Here we want to approxiamte e^{-i\tau x} by Jacobi-Anger expansion:
#
# e^{-i\tau x} = J_0(\tau)+2\sum_{k even} (-1)^{k/2}J_{k}(\tau)T_k(x)+2i\sum_{k odd} (-1)^{(k-1)/2}J_{k}(\tau) T_k(x)
#
# We truncate the series up to N = 1.4\tau+log(10^{14}), which gives an polynomial approximation of e^{-i\tau x} with
# accuracy 10^{-14}. Besides, we deal with real and imaginary part of the truncated series seperatly and divide them
# by a constant factor 2 to enhance stability.
#
# parameters
# tau: the duration \tau in Hamiltonian simulation
# criteria: stop criteria, default 1e-12
# plot_phase: whether plot phase factors
#
# --------------------------------------------------------------------------
#
# Reference: <NAME>, <NAME>, <NAME> and <NAME>
# Efficient Phase Factor Evaluation in Quantum Signal Processing
#
# Author: <NAME>, <NAME>
# Version 1.0
# Last Update 06/2020
#
# --------------------------------------------------------------------------
# setup parameters
tau = 1000
criteria = 1e-12
plot_phase = True
opts = dict()
# --------------------------------------------------------------------------
# find phase factors
opts["criteria"] = criteria
max_order = ceil(1.4 * tau + np.log(1e14))
if np.mod(max_order, 2) == 1:
max_order -= 1
# --------------------------------------------------------------------------
# even part
coeff = np.zeros((max_order//2 + 1, 1))
for i in range(len(coeff)):
coeff[i] = (-1)**(i) * besselj(2*i, tau)
coeff[0] /= 2
[phi1, out1] = QSP_Solver(coeff, 0, opts)
print("- Info: \t\tQSP phase factors --- solved by L-BFGS\n")
print("- Parity: \t\t%s\n- Degree: \t\t%d\n", "even", max_order)
print("- Iteration times: \t%d\n", out1["iter"])
print("- CPU time: \t%.1f s\n", out1["time"])
#--------------------------------------------------------------------------
# odd part
coeff = np.zeros((max_order/2 + 1, 1))
for i in range(len(coeff)):
coeff[i] = (-1)**(i) * besselj(2*i + 1, tau)
[phi2,out2] = QSP_Solver(coeff, 1, opts)
#--------------------------------------------------------------------------
# output
print("- Info: \t\tQSP phase factors --- solved by L-BFGS\n")
print("- Parity: \t\t%s\n- Degree: \t\t%d\n", "odd", max_order + 1)
print("- Iteration times: \t%d\n", out2["iter"])
print("- CPU time: \t%.1f s\n", out2["time"])
#--------------------------------------------------------------------------
# plot phase factors
## Won't plot until necessary
|
11493153
|
from django.contrib.auth import get_user_model
from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from guardian.shortcuts import assign_perm, remove_perm
from grandchallenge.blogs.models import Post
@receiver(m2m_changed, sender=Post.authors.through)
def update_permissions_on_authors_changed(
instance, action, reverse, pk_set, **_
):
if action not in ["post_add", "post_remove", "pre_clear"]:
# nothing to do for the other actions
return
if reverse:
users = [instance]
if pk_set is None:
# When using a _clear action, pk_set is None
# https://docs.djangoproject.com/en/2.2/ref/signals/#m2m-changed
posts = instance.blog_authors.all()
else:
posts = Post.objects.filter(pk__in=pk_set)
else:
posts = Post.objects.get(pk=instance.pk)
if pk_set is None:
# When using a _clear action, pk_set is None
# https://docs.djangoproject.com/en/2.2/ref/signals/#m2m-changed
users = instance.authors.all()
else:
users = get_user_model().objects.filter(pk__in=pk_set)
op = assign_perm if "add" in action else remove_perm
for user in users:
op("change_post", user, posts)
|
11493156
|
from django.conf import settings
from django.core import validators
class BookmarkURLValidator(validators.URLValidator):
"""
Extends default Django URLValidator and cancels validation if it is disabled in settings.
This allows to switch URL validation on/off dynamically which helps with testing
"""
def __call__(self, value):
if settings.LD_DISABLE_URL_VALIDATION:
return
super().__call__(value)
|
11493185
|
import time
def initGPIO():
try:
with open("/sys/class/gpio/export", "w") as fp:
fp.write("477")
except:
pass
try:
with open("/sys/class/gpio/export", "w") as fp:
fp.write("476")
except:
pass
try:
with open("/sys/class/gpio/gpio477/direction", "w") as fp:
fp.write("out")
except:
pass
try:
with open("/sys/class/gpio/gpio476/direction", "w") as fp:
fp.write("out")
except:
pass
def gpioWriteDC(val):
try:
with open("/sys/class/gpio/gpio477/value", "w") as fp:
fp.write(str(val))
except:
pass
def gpioWriteReset(val):
try:
with open("/sys/class/gpio/gpio476/value", "w") as fp:
fp.write(str(val))
except:
pass
def gpioDoReset():
gpioWriteReset(1)
time.sleep(0.1)
gpioWriteReset(0)
time.sleep(0.1)
gpioWriteReset(1)
time.sleep(0.1)
|
11493202
|
import pytest
from shortcuts import FMT_SHORTCUT, FMT_TOML
from shortcuts.cli import _get_format
class Test_get_format:
@pytest.mark.parametrize('filepath,exp_format', [
('file.shortcut', FMT_SHORTCUT),
('file.plist', FMT_SHORTCUT),
('file.toml', FMT_TOML),
('https://icloud.com/shortcuts/some-id/', 'url'),
])
def test_for_url(self, filepath, exp_format):
assert _get_format(filepath) == exp_format
def test_raises(self):
with pytest.raises(RuntimeError):
_get_format('abc')
|
11493211
|
from sentry_relay import DataCategory, SPAN_STATUS_CODE_TO_NAME
def test_parse_data_category():
assert DataCategory.parse("default") == DataCategory.DEFAULT
assert DataCategory.parse("transaction") == DataCategory.TRANSACTION
assert DataCategory.parse("") is None
assert DataCategory.parse(None) is None
assert DataCategory.parse("something completely different") is None
def test_data_category_from_event_type():
assert DataCategory.from_event_type("transaction") == DataCategory.TRANSACTION
# Special case!
assert DataCategory.from_event_type("default") == DataCategory.ERROR
# Anything unknown is coerced to "default", which is ERROR
assert DataCategory.from_event_type("") == DataCategory.ERROR
assert DataCategory.from_event_type(None) == DataCategory.ERROR
def test_data_category_api_name():
assert DataCategory.ERROR.api_name() == "error"
def test_data_category_compatibility():
assert 1 == DataCategory.ERROR
assert 1 in DataCategory.event_categories()
assert DataCategory.ERROR in (0, 1, 2)
def test_span_mapping():
# This is a pure regression test to protect against accidental renames.
assert SPAN_STATUS_CODE_TO_NAME == {
0: "ok",
1: "cancelled",
2: "unknown",
3: "invalid_argument",
4: "deadline_exceeded",
5: "not_found",
6: "already_exists",
7: "permission_denied",
8: "resource_exhausted",
9: "failed_precondition",
10: "aborted",
11: "out_of_range",
12: "unimplemented",
13: "internal_error",
14: "unavailable",
15: "data_loss",
16: "unauthenticated",
}
|
11493213
|
import argparse
from typing import List
import tqdm
import canrevan.parsing as parsing
import canrevan.utils as utils
from canrevan.crawling import Crawler
DEFAULT_USER_AGENT_STRING = (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/87.0.4280.66 "
"Safari/537.36"
)
def _main():
args = _create_argument_parser().parse_args()
# Create a crawler for collecting article urls and news contents.
crawler = Crawler(
concurrent_tasks=args.max_jobs,
num_parsing_processes=args.num_cores,
request_headers={"user-agent": args.user_agent},
request_timeout=args.timeout,
)
# Collect article urls from navigation pages.
nav_urls = _prepare_nav_urls(args)
print(f"[*] navigation pages: {len(nav_urls)}")
with tqdm.tqdm(nav_urls, desc="[*] collect article urls") as tbar:
article_urls = crawler.reduce_to_array(
nav_urls, parse_fn=parsing.extract_article_urls, update_fn=tbar.update
)
# Flatten the grouped urls and remove duplicates from the array.
article_urls = {url for urls in article_urls for url in urls}
print(f"[*] total collected articles: {len(article_urls)}")
# Crawl news articles from the collected article urls and save the content to the
# output file.
with tqdm.tqdm(article_urls, desc="[*] crawl news article contents") as tbar:
total_contents = crawler.reduce_to_file(
article_urls,
args.output_path,
parse_fn=parsing.parse_article_content,
update_fn=tbar.update,
)
print(
f"[*] finish crawling {total_contents} news articles to "
f"[{args.output_path}]"
)
def _prepare_nav_urls(args: argparse.Namespace) -> List[str]:
return [
f"https://news.naver.com/main/list.nhn?mode=LSD&mid=shm"
f"&sid1={category}&date={date}&page={page}"
for category in args.category
for date in utils.drange(args.start_date, args.end_date, args.skip_days)
for page in range(1, args.max_page + 1)
]
def _create_argument_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
prog="canrevan", description="crawl naver news articles"
)
parser.add_argument(
"--output_path", default="articles.txt", help="output file path"
)
parser.add_argument(
"--category",
required=True,
nargs="*",
type=int,
help="list of news article categories",
)
parser.add_argument(
"--start_date", required=True, help="minimum date of news articles"
)
parser.add_argument(
"--end_date", required=True, help="maximum date of news articles"
)
parser.add_argument(
"--skip_days", default=1, type=int, help="number of days to skip from crawling"
)
parser.add_argument(
"--max_page", default=10, type=int, help="maximum number of pages to navigate"
)
parser.add_argument(
"--timeout", default=5, type=float, help="timeout for the whole request"
)
parser.add_argument(
"--max_jobs",
default=500,
type=int,
help="maximum number of concurrent requests",
)
parser.add_argument(
"--num_cores",
default=4,
type=int,
help="number of multi-processing cores for parsing",
)
parser.add_argument(
"--user-agent",
default=DEFAULT_USER_AGENT_STRING,
help="use custom user-agent string",
)
return parser
|
11493241
|
import pandas as pd
from tqdm import tqdm
import json
import torch
from nltk.corpus import stopwords
from transformers import GPT2LMHeadModel
from model.ConditionalLM import ConditionalLM
from util.utils import load_args, compute_logProb, init_tokenizer
from util.CLMDataset import CLMDataset
from torch.utils.data import DataLoader
from collections import Counter
import json
from tqdm import tqdm
import os
class LocatingModule():
def __init__(self, config='configs/locating.yaml'):
self.args = load_args(config)
print(self.args)
if not os.path.isdir('output/locating/{}'.format(self.args.dataset)):
os.mkdir('output/locating/{}'.format(self.args.dataset))
self.tokenizer = init_tokenizer(self.args.gpt_path)
def load_utt_intent(self, fname):
df = pd.read_csv(fname)
return [(utt, intent) for utt, intent in zip(list(df['utt']), list(df['intent']))]
def load_semantic(self, fname):
with open(fname) as f:
word_score = json.load(f)
intent_related_words = {}
for intent, scores in word_score.items():
intent_related_words[intent] = []
for score in scores:
if score[1] > self.args.threshold:
intent_related_words[intent].append(score[0])
return intent_related_words
def token_score_to_word_score(self, utt, score):
m_score = []
# score = [float(ts) for ts in scores[i].split(' ')[1:]] # ts: token score
utt_tokens_with_space = utt.split(' ')
with_space_index = 0
utt_tokens_without_space = utt.split()
score_index = 0
m_score = [] # merge score
for index, token in enumerate(utt_tokens_without_space):
if index == 0:
length = len(self.tokenizer.tokenize(token))
with_space_index += 1
else:
if utt_tokens_with_space[with_space_index] == '':
with_space_index += 2
length = len(self.tokenizer.tokenize('one ' + token)) - 1
else:
with_space_index += 1
length = len(self.tokenizer.tokenize('one ' + token)) - 1
tmp = 0
for i in range(score_index, score_index + length):
tmp += score[i]
score_index += length
m_score.append(tmp)
assert len(m_score) == len(utt_tokens_without_space)
# m_scores.append(' '.join([str(ts)[:5] if len(str(ts)) >= 5 else str(ts) + '0' * (5 - len(str(ts))) for ts in m_score]))
return m_score
@torch.no_grad()
def generate_word_score(self):
output = 'output/locating/{}/{}.txt'.format(self.args.dataset, self.args.llr_name)
delimiter = '##'
gpt2 = GPT2LMHeadModel.from_pretrained(self.args.gpt_path, return_dict=True).to(self.args.device)
clm = ConditionalLM(self.args.gpu, self.args.dataset, self.args.label_num).to(self.args.device)
clm.load_state_dict(torch.load('output/params/{}/{}.pt'.format(self.args.dataset, self.args.cond_name), \
map_location='cuda:{}'.format(self.args.gpu) if self.args.gpu != -1 else 'cpu'))
gpt2.eval()
clm.eval()
train_loader = self.create_loader(self.args.train, False)
with open(output, 'w', encoding='utf-8') as f:
for X_in, X_out, mask, lengths, y, intents in tqdm(train_loader):
logProb_Cond = compute_logProb(X_out, clm(X_in, y))
logProb = compute_logProb(X_out, gpt2(input_ids=X_in, attention_mask=mask).logits)
llrs = logProb_Cond - logProb
for x, llr, intent, length in zip(X_out, llrs, intents, lengths):
f.write("{}{}".format(intent, delimiter))
x = self.tokenizer.batch_decode(x.reshape(1,-1))[0].replace("<|endoftext|>","")
x_word = x.split()
x_token_llrs = [r.item() for r in llr[:length]]
x_word_scores = self.token_score_to_word_score(x, x_token_llrs)
assert len(x_word) == len(x_word_scores)
f.write(delimiter.join(x_word))
f.write("\n{}".format(delimiter)+ delimiter.join([str(s) for s in x_word_scores])+ "\n")
en_stopwords = list(stopwords.words('english'))
summary = {}
with open(output) as f:
for i, line in enumerate(f):
line = line.strip()
if i % 2 == 0:
text = line.split(delimiter)
else:
score = list(map(float, line.split(delimiter)[1:]))
if text[0] not in summary:
summary[text[0]]={}
for j, w in enumerate(text[1:]):
if w in en_stopwords: # not necessary
continue
if w not in summary[text[0]]:
summary[text[0]][w]=score[j]
else:
summary[text[0]][w]+=score[j]
old_summary = summary
summary = {}
for key in old_summary:
summary[key] = Counter(old_summary[key]).most_common(10)
with open('output/locating/{}/{}.json'.format(self.args.dataset, self.args.word_score_name), 'w') as f:
json.dump(summary, f, indent=4)
def create_loader(self, file_name, shuffle):
dataset = CLMDataset(file_name, self.tokenizer, self.args.device)
loader = DataLoader(dataset=dataset, batch_size=self.args.batch_size, shuffle=shuffle, drop_last=False)
return loader
def generate_masked_utts(self):
utt_intents = self.load_utt_intent(self.args.train)
intent_related_words = self.load_semantic('output/locating/{}/{}.json'.format(self.args.dataset, self.args.word_score_name))
# print(intent_related_words)
masked_utts = {'utt':[],'masked_word':[],'intent':[]}
for utt_intent in tqdm(utt_intents):
utt, intent = utt_intent
if intent_related_words.get(intent, None) is None:
continue
for word in intent_related_words[intent]:
if utt.count(word) == 1:
masked_utts['utt'].append(utt.replace(word, '[MASK]'))
masked_utts['masked_word'].append(word)
masked_utts['intent'].append(intent)
# for intent, num in intent_num.items():
# print(intent, num)
df = pd.DataFrame(masked_utts, columns=list(masked_utts.keys()), index=None)
df.to_csv('output/locating/{}/{}.csv'.format(self.args.dataset, self.args.masked_utt_name))
if __name__ == '__main__':
l = LocatingModule()
l.generate_word_score()
l.generate_masked_utts()
|
11493266
|
import queue
visited = []
MAX = 20
adj = {1: set([2,3,5]), 2: set([3]), 3: set([8,4]), 4:6}
q = queue.Queue(MAX)
def dfs(s):
if s in visited :
return
visited.append(s)
print(s)
children = adj.get(s)
if children != None:
if isinstance(children,int): # Case : values are not a set
dfs(children)
else : # Case : values are a set
for i in children:
dfs(i)
return
def test():
global visited
visited = []
print("dfs : ")
dfs(1)
test()
|
11493278
|
class Solution:
def convert(self, s, numRows):
if numRows == 1 or numRows >= len(s): return s
row, direction, res = 0, -1, [""] * numRows
for char in s:
res[row] += char
if row == 0 or row == numRows - 1: direction *= -1
row += direction
return "".join(res)
|
11493288
|
import os
import sys
import argparse
def genera(envF):
"""
Read genera per environment
:param envF:
:return:
"""
with open(envF, 'r') as f:
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Calculate shared genera per env")
parser.add_argument('-f', help='file of environments and genera')
args = parser.parse_args()
|
11493328
|
import numpy as np
import multiprocessing
import timeit
def start():
a = np.random.rand(1000, 1000)
b = np.random.rand(1000, 1000)
np.multiply(a,b)
start2 = timeit.timeit()
for i in range(500):
start()
end = timeit.timeit()
print(end-start2)
start2 = timeit.timeit()
pool = multiprocessing.Pool(multiprocessing.cpu_count())
liste = [pool.apply_async(start, ()) for i in range(500)]
[p.get() for p in liste]
end = timeit.timeit()
print(end - start2)
|
11493338
|
import collections
class Solution:
def maxNumberOfBalloons(self, text: str) -> int:
cnt = collections.Counter(text)
ans = float('inf')
for ch in 'ban':
ans = min(ans, cnt.get(ch, 0))
for ch in 'lo':
ans = min(ans, cnt.get(ch, 0) // 2)
return ans
|
11493352
|
from numpy import zeros, copy
from itertools import accumulate
M = 1e9 + 7
class Solution:
def numOfArrays(self, n: int, m: int, k: int) -> int:
k, m = k + 1, m + 1
f1 = zeros((k, m))
f1[1, 1:] = 1
for _ in range(1, n):
f2 = copy(f1)
for j in range(1, k):
item2s = list(accumulate(f1[j - 1]))
for c in range(1, m):
f2[j, c] = (f1[j, c] * c + item2s[c - 1]) % M
f1 = f2
return int(sum(f1[k - 1, 1:]) % M)
|
11493378
|
import pandas as pd
import numpy as np
def dataset():
train_data = pd.read_csv('train.csv', index_col=None)
direction_data = train_data['direction']
sentence_data = train_data['sentence']
rel_data = train_data['relation']
term1_data = train_data['term1']
term2_data = train_data['term2']
m = []
fd = pd.read_csv('train.csv', usecols = ['_unit_id'])
count = 1
total = 0
ls = []
#print(len(sentence_data))
for i in fd['_unit_id']:
ls.append(i)
ls.append(0) # so that last set of sentences are counted
j = 0
i = 0
for j in range(0, len(ls)-1):
if ls[j] == ls[j+1]:
count = count + 1
else:
dir_data = direction_data.ix[i:i+count-1].sort_values()
s = set(dir_data)
a = []
for x in s:
a.append(x)
#print(len(a))
c1 = 0
c2 = 0
c3 = 0
if len(a) != 1:
for j in dir_data:
if a[0] == j:
c1+=1
elif a[1] == j:
c2+=1
else:
c3+=1
else:
c1 = count
c2 = c3 = 0
if c1 > c2 and c1 > c3:
m.append([a[0], sentence_data.ix[i], rel_data[i], term1_data[i], term2_data[i]])
elif c2 > c1 and c2 > c3:
m.append([a[1], sentence_data.ix[i], rel_data[i], term1_data[i], term2_data[i]])
elif c3 > c1 and c3 > c2:
m.append([a[2], sentence_data.ix[i], rel_data[i], term1_data[i], term2_data[i]])
i += count
count = 1
columns= ['direction','sentence', 'relation', 'term1', 'term2']
data = pd.DataFrame(m, columns=columns)
return data
|
11493410
|
from ..cw_model import CWModel
class Report(CWModel):
def __init__(self, json_dict=None):
self.column_definitions = None # (JObject[])
self.row_values = None # (JObject[])
# initialize object with json dict
super().__init__(json_dict)
|
11493416
|
import dataclasses
import re
from typing import Any, ClassVar, Match, Optional, Pattern, Set, Union
BASE_PATTERN = re.compile(
r"^arn:"
r"(?P<partition>.+?):"
r"(?P<service>.+?):"
r"(?P<region>.*?):"
r"(?P<account>.*?):"
r"(?P<rest>.*)$"
)
class InvalidArnException(Exception):
"""Raised when the value cannot be parsed as a valid ARN."""
def __init__(self, arn: str):
self.arn = arn
super().__init__(f"{arn} is not a valid ARN")
class InvalidArnRestException(Exception):
"""Raised when the value can be parsed as a valid ARN but the rest cannot."""
def __init__(self, rest: str, class_name: str) -> None:
self.rest = rest
self.class_name = class_name
super().__init__(f"{rest} is not a valid rest expression for type {class_name}")
class ConflictingFieldNamesException(Exception):
"""A subclass tried to use reserved field names.
The :py:attr:`partition`, :py:attr:`service`, :py:attr:`region`, and
:py:attr:`account` names are reserved and cannot be used as attributes.
"""
def __init__(self, field_names: Set[str]) -> None:
self.field_names = field_names
super().__init__(
f"Fields {', '.join(field_names)} are reserved and "
f"cannot be used as field names"
)
@dataclasses.dataclass()
class Arn:
"""The base class that represents an AWS ARN.
This class is meant to be used as a superclass for more specific ARN classes.
Instantiating this class with a valid ARN will parse out the common fields
(:py:attr:`partition`, :py:attr:`service`, :py:attr:`region`, and
:py:attr:`account`), and will place the rest of the ARN string in the
:py:attr:`rest` field.
"""
REST_PATTERN: ClassVar[Union[str, Pattern]] = re.compile(r"(?P<rest>.*)")
"""The pattern that parses the "rest" of the ARN. The "rest" of and ARN is the part
that is specific to the AWS service that the ARN represents. When overriding in a
subclass, this value can be either an `re.Pattern`_ or an ``str``.
.. _re.Pattern: https://docs.python.org/3/library/re.html#regular-expression-objects
"""
input_arn: Any
"""The instance that was parsed, unchanged."""
partition: str = ""
"""The partition of the AWS of the resource."""
service: str = ""
"""The AWS service of the resource."""
region: str = ""
"""The AWS region in which the resource is located."""
account: str = "" # str because some pre-built resources have "aws" as the account
"""The AWS account ID of the resource."""
rest: str = dataclasses.field(init=False, default="")
"""The rest of the ARN, as matched by :py:const:`REST_PATTERN`."""
def __post_init__(self) -> None:
if isinstance(self.input_arn, bytes):
arn = self.input_arn.decode()
elif isinstance(self.input_arn, str):
arn = self.input_arn
else:
arn = str(self.input_arn)
base_match = BASE_PATTERN.match(arn)
if not base_match:
raise InvalidArnException(arn)
self._assign_fields_from_match(base_match)
rest = base_match["rest"]
rest_match = self.match_rest(rest)
if not rest_match:
raise InvalidArnRestException(rest, self.__class__.__name__)
reserved_field_names = {f.name for f in dataclasses.fields(Arn) if f.init}
subclass_field_names = set(rest_match.re.groupindex.keys())
conflicting_field_names = reserved_field_names & subclass_field_names
if conflicting_field_names:
raise ConflictingFieldNamesException(conflicting_field_names)
self.assign_rest(rest_match)
def __str__(self):
return (
f"arn:"
f"{self.partition}:"
f"{self.service}:"
f"{self.region}:"
f"{self.account}:"
f"{self.format_rest()}"
)
def match_rest(self, rest: str) -> Optional[Match]:
"""Convert the rest of the ARN into an `re.Match`_.
By default, matches the rest of the ARN against :py:const:`REST_PATTERN`.
Override this metod to match against a pattern dynamically. For an example,
see :py:meth:`arn.ecs.ServiceArn.match_rest`.
.. _re.Match: https://docs.python.org/3/library/re.html#match-objects
"""
return re.match(self.REST_PATTERN, rest)
def assign_rest(self, match: Match):
"""Assign an `re.Match`_'s groups to fields on ``self``.
By default, assigns all named groups in :py:const:`REST_PATTERN` as strings.
Override this method to cast group matches to a more appropriate type. For an
example, see :py:meth:`arn.ecs.TaskDefinitionArn.assign_rest`.
.. _re.Match: https://docs.python.org/3/library/re.html#match-objects
"""
self._assign_fields_from_match(match)
def format_rest(self) -> str:
"""Produce a formatted representation of the rest of the ARN.
This method is essentially the reverse of :py:meth:`match_rest` and
:py:meth:`assign_rest`. By default returns :py:data:`rest`. Override this method
to allow users to override specific fields and get a ``str(...)``
representation that includes the override.
"""
return self.rest
def _assign_fields_from_match(self, match):
for key in match.re.groupindex.keys():
if not getattr(self, key):
setattr(self, key, match[key])
|
11493443
|
import datetime
from dojo.tools.stackhawk.parser import StackHawkParser
from dojo.models import Test, Finding
from unittests.dojo_test_case import DojoTestCase
class TestStackHawkParser(DojoTestCase):
__test_datetime = datetime.datetime(2022, 2, 16, 23, 7, 19, 575000, datetime.timezone.utc)
def test_invalid_json_format(self):
testfile = open("unittests/scans/stackhawk/invalid.json")
parser = StackHawkParser()
with self.assertRaises(ValueError):
parser.get_findings(testfile, Test())
def test_parser_ensures_data_is_for_stackhawk_before_parsing(self):
testfile = open("unittests/scans/stackhawk/oddly_familiar_json_that_isnt_us.json")
parser = StackHawkParser()
with self.assertRaises(ValueError):
parser.get_findings(testfile, Test())
def test_stackhawk_parser_with_no_vuln_has_no_findings(self):
testfile = open("unittests/scans/stackhawk/stackhawk_zero_vul.json")
parser = StackHawkParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.assertEqual(0, len(findings))
def test_stackhawk_parser_with_one_high_vuln_has_one_findings(self):
testfile = open("unittests/scans/stackhawk/stackhawk_one_vul.json")
parser = StackHawkParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.__assertAllEndpointsAreClean(findings)
self.assertEqual(1, len(findings))
finding = findings[0]
self.__assertFindingEquals(
finding,
"Anti CSRF Tokens Scanner",
self.__test_datetime,
"Secured Application",
"Development",
"High",
"https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27/finding/20012",
"20012",
"10",
False,
False
)
def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicates(self):
testfile = open("unittests/scans/stackhawk/stackhawk_many_vul.json")
parser = StackHawkParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.__assertAllEndpointsAreClean(findings)
self.assertEqual(6, len(findings))
self.__assertFindingEquals(
findings[0],
"Cookie Slack Detector",
self.__test_datetime,
"Secured Application",
"Development",
"Low",
"https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27/finding/90027",
"90027",
"10",
False,
False
)
self.__assertFindingEquals(
findings[1],
"Proxy Disclosure",
self.__test_datetime,
"Secured Application",
"Development",
"Medium",
"https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27/finding/40025",
"40025",
"10",
False,
False
)
self.__assertFindingEquals(
findings[2],
"Anti CSRF Tokens Scanner",
self.__test_datetime,
"Secured Application",
"Development",
"High",
"https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27/finding/20012",
"20012",
"10",
False,
False
)
self.__assertFindingEquals(
findings[3],
"Cross Site Scripting Weakness (Reflected in JSON Response)",
self.__test_datetime,
"Secured Application",
"Development",
"High",
"https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27/finding/40012",
"40012",
"1",
False,
False
)
self.__assertFindingEquals(
findings[4],
"Content Security Policy (CSP) Header Not Set",
self.__test_datetime,
"Secured Application",
"Development",
"Medium",
"https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27/finding/10038",
"10038",
"12",
False,
False
)
self.__assertFindingEquals(
findings[5],
"Permissions Policy Header Not Set",
self.__test_datetime,
"Secured Application",
"Development",
"Low",
"https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27/finding/10063",
"10063",
"12",
False,
False
)
def test_that_a_scan_import_updates_the_test_description(self):
testfile = open("unittests/scans/stackhawk/stackhawk_zero_vul.json")
parser = StackHawkParser()
test = Test()
parser.get_findings(testfile, test)
testfile.close()
self.assertEqual(
test.description,
'View scan details here: ' +
'[https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27]' +
'(https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27)'
)
def test_that_a_scan_with_all_false_positive_endpoints_on_a_finding_marks_as_false_positive(self):
testfile = open("unittests/scans/stackhawk/stackhawk_one_vuln_all_endpoints_false_positive.json")
parser = StackHawkParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.__assertAllEndpointsAreClean(findings)
self.assertEqual(1, len(findings))
self.__assertFindingEquals(
findings[0],
"Cookie Slack Detector",
self.__test_datetime,
"Secured Application",
"Development",
"Low",
"https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27/finding/90027",
"90027",
"3",
True,
False
)
def test_that_a_scan_with_all_risk_accepted_endpoints_on_a_finding_marks_as_risk_accepted(self):
testfile = open("unittests/scans/stackhawk/stackhawk_one_vuln_all_endpoints_risk_accepted.json")
parser = StackHawkParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.__assertAllEndpointsAreClean(findings)
self.assertEqual(1, len(findings))
self.__assertFindingEquals(
findings[0],
"Cookie Slack Detector",
self.__test_datetime,
"Secured Application",
"Development",
"Low",
"https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27/finding/90027",
"90027",
"3",
False,
True
)
def test_that_a_scan_with_endpoints_in_differing_statuses_does_not_mark_as_risk_accepted_or_false_positive(self):
testfile = open("unittests/scans/stackhawk/stackhawk_one_vuln_all_endpoints_have_different_status.json")
parser = StackHawkParser()
findings = parser.get_findings(testfile, Test())
testfile.close()
self.__assertAllEndpointsAreClean(findings)
self.assertEqual(1, len(findings))
self.__assertFindingEquals(
findings[0],
"Cookie Slack Detector",
self.__test_datetime,
"Secured Application",
"Development",
"Low",
"https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27/finding/90027",
"90027",
"3",
False,
False
)
def __assertFindingEquals(
self,
actual_finding: Finding,
title,
date: datetime.datetime,
application_name,
environment,
severity,
finding_url,
finding_id,
count,
false_positive,
risk_accepted
):
self.assertEqual(title, actual_finding.title)
self.assertEqual(date, actual_finding.date)
self.assertEqual(application_name, actual_finding.component_name)
self.assertEqual(environment, actual_finding.component_version)
self.assertEqual(severity, actual_finding.severity)
self.assertEqual("View this finding in the StackHawk platform at:\n[" + finding_url + '](' + finding_url + ')',
actual_finding.description)
self.assertRegexpMatches(
actual_finding.steps_to_reproduce,
"Use a specific message link and click 'Validate' to see the cURL!.*"
)
self.assertFalse(actual_finding.static_finding)
self.assertTrue(actual_finding.dynamic_finding)
self.assertEqual(finding_id, actual_finding.vuln_id_from_tool)
self.assertEqual(count, actual_finding.nb_occurences)
self.assertEqual(application_name, actual_finding.service)
self.assertEqual(false_positive, actual_finding.false_p)
self.assertEqual(risk_accepted, actual_finding.risk_accepted)
# The following fields should be not be set from this parser.
self.assertIsNone(actual_finding.unique_id_from_tool)
def __assertAllEndpointsAreClean(self, findings):
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
|
11493478
|
import argparse
import logging
import os
import subprocess
from concurrent import futures
from enum import Enum
from os import PathLike
from pathlib import Path
from tempfile import NamedTemporaryFile
from threading import Thread
from typing import List
from buglab.controllers.helper.randombugselectorserver import random_bug_selector_server
from buglab.data.deduplication import DuplicationIndex
from buglab.utils.logging import configure_logging
LOGGER = logging.getLogger(__name__)
class ExtractJob(Enum):
BUG = 1
TYPE = 2
def create_container_and_extract(
package_name: str, target_dir: PathLike, bug_selector_server_address: str, extract_job: ExtractJob
):
docker_command = f'docker run --network="host" --rm -it -v {target_dir}:/data/targetDir buglab-base:latest '
if extract_job == ExtractJob.BUG:
docker_command += (
f"python3.8 -m buglab.controllers.packageextracttodisk {package_name} "
f"--bug-selector-server {bug_selector_server_address}"
)
elif extract_job == ExtractJob.TYPE:
docker_command += f"python3.8 -m buglab.controllers.typelessextracttodisk {package_name} "
else:
raise ValueError(f"Unknown extraction job type: {extract_job}")
_ = subprocess.run(docker_command, shell=True)
def extract_from_packages(
packages: List[str], target_dir: PathLike, bug_selector_server_address: str, extract_job: ExtractJob
):
with futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
all_futures = {
executor.submit(
create_container_and_extract, pkg, target_dir, bug_selector_server_address, extract_job
): pkg
for pkg in packages
}
for future in futures.as_completed(all_futures):
try:
_ = future.result()
except Exception as exc:
LOGGER.exception("Failure for `%s` package", all_futures[future], exc_info=exc)
if __name__ == "__main__":
configure_logging()
parser = argparse.ArgumentParser(description="Orchestrator to extract graphs across multiple packages.")
parser.add_argument(
"package_list_path",
type=str,
help="the path to a txt file containing the names of the packages to be considered",
)
parser.add_argument("target_dir", type=str, help="the target directory to store the results.")
parser.add_argument(
"--extract-types",
action="store_true",
help="Set this flag if you want to remove type information instead of extracting bugs.",
)
args = parser.parse_args()
f = NamedTemporaryFile()
duplication_index = DuplicationIndex(Path(f.name))
duplication_server_thread = Thread(target=lambda: duplication_index.server(address="tcp://*:5555"), daemon=True)
duplication_server_thread.start()
rewrite_selector_server_port: str = "8345"
extract_job = ExtractJob.TYPE
if not args.extract_types:
extract_job = ExtractJob.BUG
server_thread = Thread(
target=lambda: random_bug_selector_server("tcp://*:" + rewrite_selector_server_port), daemon=True
)
server_thread.start()
all_packages = []
with open(args.package_list_path) as f:
for line in f.readlines():
pkg_name = line.strip()
if len(pkg_name) > 0:
all_packages.append(pkg_name)
extract_from_packages(all_packages, args.target_dir, "tcp://localhost:" + rewrite_selector_server_port, extract_job)
|
11493505
|
import re
import pandas as pd
from config_db import DB_CONNECTION
__all__ = []
OPTIONS = dict(
if_exists='replace',
index=False
)
def gsheet_csv_url(url):
"""
Returns the url for the Google Sheet csv export.
Parameters
----------
url : str
The editor url string as found when viewing the sheet in a browser.
"""
def get_sheet():
for i, x in enumerate(s):
if x == 'gid':
return s[i+1]
raise ValueError('Sheet ID not found in url {}'.format(url))
s = re.split('/|#|=|&', url)
key = s[5]
sheet = get_sheet()
return 'https://docs.google.com/spreadsheets/d/{}/export?gid={}&format=csv'.format(key, sheet)
def make_unique_columns(df):
"""
Renames the data frame columns to be unique when exported to an SQL
database.
Parameters
----------
df : :class:`pandas.DataFrame`
The data frame.
"""
names = {}
lnames = set()
for x in list(df):
n = x
# Get a unique name
while n.lower() in lnames:
n = n + '_'
lnames.add(n.lower())
names[x] = n
df.rename(columns=names, inplace=True)
return df
def rename_columns_15_0(df):
"""
Renames the version 15.0 data frame columns.
Parameters
----------
df : :class:`pandas.DataFrame`
The data frame.
"""
names = {
'AISC_Manual_Label': 'name',
'W': 'unit_weight',
'A': 'area',
'Ix': 'inertia_x',
'Iy': 'inertia_y',
'Iz': 'inertia_z',
'Zx': 'plast_sect_mod_x',
'Zy': 'plast_sect_mod_y',
'Sx': 'elast_sect_mod_x',
'Sy': 'elast_sect_mod_y',
'Sz': 'elast_sect_mod_z',
'rx': 'gyradius_x',
'ry': 'gyradius_y',
'rz': 'gyradius_z',
'J': 'inertia_t',
}
df.rename(columns=names, inplace=True)
return df
def write_aisc_metric_15_0():
"""
Writes the 'aisc_metric_15_0' table to the database.
"""
url = 'https://docs.google.com/spreadsheets/d/1RwpcQxKsQmb_ylxR5Zx4JYWf9_A4Cd-6KrUrXt3ynls/edit#gid=55672305'
table = 'aisc_metric_15_0'
url = gsheet_csv_url(url)
df = pd.read_csv(url)
rename_columns_15_0(df)
make_unique_columns(df)
df.to_sql(table, DB_CONNECTION, **OPTIONS)
def write_aisc_imperial_15_0():
"""
Writes the 'aisc_imperial_15_0' table to the database.
"""
url = 'https://docs.google.com/spreadsheets/d/1RwpcQxKsQmb_ylxR5Zx4JYWf9_A4Cd-6KrUrXt3ynls/edit#gid=1797343786'
table = 'aisc_imperial_15_0'
url = gsheet_csv_url(url)
df = pd.read_csv(url)
rename_columns_15_0(df)
make_unique_columns(df)
df.to_sql(table, DB_CONNECTION, **OPTIONS)
def write_database():
"""
Writes all data to the database.
"""
write_aisc_metric_15_0()
write_aisc_imperial_15_0()
if __name__ == '__main__':
write_database()
|
11493509
|
from django.urls import path
from custom_auth.oauth.views import login_with_github, login_with_google
app_name = "oauth"
urlpatterns = [
path("google/", login_with_google, name="google-oauth-login"),
path("github/", login_with_github, name="github-oauth-login"),
]
|
11493574
|
import random
class Position:
def __init__(self, x, y):
self.x = x
self.y = y
def __hash__(self):
return hash((self.x, self.y))
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __repr__(self):
return "Position=[x={},y={}]".format(self.x, self.y)
def get_potential_knight_moves(start, size, visited):
moves = list()
moves.append(Position(start.x + 1, start.y + 2))
moves.append(Position(start.x + 1, start.y - 2))
moves.append(Position(start.x - 1, start.y + 2))
moves.append(Position(start.x - 1, start.y - 2))
moves.append(Position(start.x + 2, start.y + 1))
moves.append(Position(start.x + 2, start.y - 1))
moves.append(Position(start.x - 2, start.y + 1))
moves.append(Position(start.x - 2, start.y - 1))
valid_moves = [pos for pos in moves if
pos.x >= 0 and pos.x < size and
pos.y >= 0 and pos.y < size and
pos not in visited]
return valid_moves
def run_knights_tour(start, size, visited):
if len(visited) == size * size:
return 1
moves = get_potential_knight_moves(start, size, visited)
count = 0
for move in moves:
tmp_visted = visited.copy()
tmp_visted.add(move)
count += run_knights_tour(move, size, tmp_visted)
return count
def count_knights_tours(size):
count = 0
for i in range(size):
for j in range(size):
start = Position(i, j)
count += run_knights_tour(start, size, set([start]))
return count
assert count_knights_tours(1) == 1
assert count_knights_tours(2) == 0
assert count_knights_tours(3) == 0
assert count_knights_tours(4) == 0
assert count_knights_tours(5) == 1728
|
11493590
|
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.db',
}
}
# Required to serve the files while we use Gunicorn in dev env
MIDDLEWARE += [
'whitenoise.middleware.WhiteNoiseMiddleware',
]
|
11493634
|
import pytest
from dbnd._core.settings.tracking_config import (
TrackingConfig,
ValueTrackingLevel,
get_value_meta,
)
from targets import target as target_factory
from targets.value_meta import ValueMetaConf
from targets.values import ListValueType, ObjectValueType, StrValueType, TargetValueType
class TestTrackingConfig:
@pytest.mark.parametrize(
"value, value_type, target, expected_value_preview, expected_data_schema",
[
(10, None, None, "10", {"type": "int"}),
(10, ObjectValueType(), None, "10", {"type": "int"}),
(10, TargetValueType(), target_factory("/path"), "10", {"type": "int"}),
(10, StrValueType(), None, "10", {"type": "str"}),
([10], ListValueType(), None, "[10]", {"type": "List"}),
],
)
def test_get_value_meta(
self, value, value_type, target, expected_value_preview, expected_data_schema,
):
tracking_config = TrackingConfig.current()
tracking_config.value_reporting_strategy = ValueTrackingLevel.ALL
result = get_value_meta(
value,
ValueMetaConf(),
tracking_config,
value_type=value_type,
target=target,
)
assert result.value_preview == expected_value_preview
assert result.data_schema == expected_data_schema
|
11493637
|
from ..utility.expr_wrap_util import symbolic
from ..expr import BVV, BVS
from ..utility.models_util import get_arg_k
from ..sym_state import State
MAX_READ = 100
def read_handler(state: State, view):
fd = get_arg_k(state, 1, 4, view)
buf = get_arg_k(state, 2, state.arch.bits() // 8, view)
count = get_arg_k(state, 3, 4, view)
assert not symbolic(fd) or not state.solver.symbolic(fd)
fd = fd.value
assert state.os.is_open(fd)
if symbolic(count):
count = state.solver.max(count)
count = MAX_READ if count > MAX_READ else count
else:
count = count.value
res = state.os.read(fd, count)
for i, b in enumerate(res):
state.mem.store(buf + i, b)
state.events.append(
"read from fd %d, count %d" % (fd, count)
)
return BVV(count, 32)
def write_handler(state: State, view):
fd = get_arg_k(state, 1, 4, view)
buf = get_arg_k(state, 2, state.arch.bits() // 8, view)
count = get_arg_k(state, 3, 4, view)
assert not symbolic(fd) or not state.solver.symbolic(fd)
fd = fd.value
if symbolic(count):
count = state.solver.max(count)
count = MAX_READ if count > MAX_READ else count
else:
count = count.value
data = []
for i in range(count):
b = state.mem.load(buf + i, 1)
data.append(b)
state.os.write(fd, data)
state.events.append(
"write to fd %d, count %d" % (fd, count)
)
return BVV(count, 32)
stat_idx = 0
def _stat(state: State, statbuf):
global stat_idx
long_t = state.arch.bits()
int_t = 32
st_dev = BVS('stat_st_dev_%d' % stat_idx, long_t)
st_ino = BVS('stat_st_ino_%d' % stat_idx, long_t)
st_mode = BVS('stat_st_mode_%d' % stat_idx, long_t)
st_nlink = BVS('stat_st_nlink_%d' % stat_idx, long_t)
st_uid = BVS('stat_st_uid_%d' % stat_idx, int_t)
st_gid = BVS('stat_st_gid_%d' % stat_idx, int_t)
st_rdev = BVS('stat_st_rdev_%d' % stat_idx, long_t)
st_size = BVS('stat_st_size_%d' % stat_idx, long_t)
st_blksize = BVS('stat_st_blksize_%d' % stat_idx, long_t)
st_blocks = BVS('stat_st_blocks_%d' % stat_idx, long_t)
st_atim_tv_sec = BVS('stat_atim.sec_%d' % stat_idx, long_t)
st_atim_tv_nsec = BVS('stat_atim.nsec_%d' % stat_idx, long_t)
st_mtim_tv_sec = BVS('stat_mtim.sec_%d' % stat_idx, long_t)
st_mtim_tv_nsec = BVS('stat_mtim.nsec_%d' % stat_idx, long_t)
st_ctim_tv_sec = BVS('stat_ctim.sec_%d' % stat_idx, long_t)
st_ctim_tv_nsec = BVS('stat_ctim.nsec_%d' % stat_idx, long_t)
stat_idx += 1
state.mem.store(statbuf + 0, st_dev, state.arch.endness())
state.mem.store(statbuf + 8, st_ino, state.arch.endness())
state.mem.store(statbuf + 16, st_nlink, state.arch.endness())
state.mem.store(statbuf + 24, st_mode, state.arch.endness())
state.mem.store(statbuf + 32, st_uid, state.arch.endness())
state.mem.store(statbuf + 36, st_gid, state.arch.endness())
state.mem.store(statbuf + 40, BVV(0, 8*8)) # padding
state.mem.store(statbuf + 48, st_rdev, state.arch.endness())
state.mem.store(statbuf + 56, st_size, state.arch.endness())
state.mem.store(statbuf + 64, st_blksize, state.arch.endness())
state.mem.store(statbuf + 72, st_blocks, state.arch.endness())
state.mem.store(statbuf + 80, st_atim_tv_sec, state.arch.endness())
state.mem.store(statbuf + 88, st_atim_tv_nsec, state.arch.endness())
state.mem.store(statbuf + 96, st_mtim_tv_sec, state.arch.endness())
state.mem.store(statbuf + 104, st_mtim_tv_nsec, state.arch.endness())
state.mem.store(statbuf + 112, st_ctim_tv_sec, state.arch.endness())
state.mem.store(statbuf + 120, st_ctim_tv_nsec, state.arch.endness())
state.mem.store(statbuf + 128, BVV(0, 8*16)) # reserved (zero (?))
return BVV(0, 32)
def stat_handler(state: State, view):
global stat_idx
pathname = get_arg_k(state, 1, state.arch.bits() // 8, view)
statbuf = get_arg_k(state, 2, state.arch.bits() // 8, view)
path = ""
if not symbolic(pathname):
i = 0
c = state.mem.load(pathname, 1)
while not symbolic(c) and c.value != 0 and i < 100:
path += chr(c.value)
i += 1
c = state.mem.load(pathname+i, 1)
else:
path = "<symbolic>"
state.events.append(
"stat on %s" % path
)
return _stat(state, statbuf)
def xstat_handler(state: State, view):
version = get_arg_k(state, 1, 4, view)
pathname = get_arg_k(state, 2, state.arch.bits() // 8, view)
statbuf = get_arg_k(state, 3, state.arch.bits() // 8, view)
path = ""
if not symbolic(pathname):
i = 0
c = state.mem.load(pathname, 1)
while not symbolic(c) and c.value != 0 and i < 100:
path += chr(c.value)
i += 1
c = state.mem.load(pathname+i, 1)
else:
path = "<symbolic>"
if not symbolic(version):
version = str(version.value)
else:
version = "<symbolic>"
state.events.append(
"__xstat on %s. version %s" % (path, version)
)
return _stat(state, statbuf)
|
11493750
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
# TODO
# - Proper tests of everything
# - be clearer about meaning of t_elapsed, t_ix and either (t)
# - Time Since Event is a ticking bomb. Needs better naming/definitions
# to ensure that it's either inverse TTE or a feature or if they coincide.
def roll_fun(x, size, fun=np.mean, reverse=False):
"""Like cumsum but with any function `fun`.
"""
y = np.copy(x)
n = len(x)
size = min(size, n)
if size <= 1:
return x
for i in xrange(size):
y[i] = fun(x[0:(i + 1)])
for i in xrange(size, n):
y[i] = fun(x[(i - size + 1):(i + 1)])
return y
def carry_forward_if(x, is_true):
"""Locomote forward `x[i]` if `is_true[i]`.
remain x untouched before first pos of truth.
:param Array x: object whos elements are to carry forward
:param Array is_true: same length as x containing true/false boolean.
:return Array x: forwarded object
"""
for i in xrange(len(x)):
if is_true[i]:
cargo = x[i]
if cargo is not None:
x[i] = cargo
return x
def carry_backward_if(x, is_true):
"""Locomote backward `x[i]` if `is_true[i]`.
remain x untouched after last pos of truth.
:param Array x: object whos elements are to carry backward
:param Array is_true: same length as x containing true/false boolean.
:return Array x: backwarded object
"""
for i in xrange(reversed(len(x))):
if is_true[i]:
cargo = x[i]
if cargo is not None:
x[i] = cargo
return x
def steps_since_true_minimal(is_event):
"""(Time) since event over discrete (padded) event vector.
:param Array is_event: a vector of 0/1s or boolean
:return Array x: steps since is_event was true
"""
n = len(is_event)
z = -1 # at the latest on step before
x = np.int32(is_event)
for i in xrange(n):
if is_event[i]:
z = i
x[i] = i - z
return x
def steps_to_true_minimal(is_event):
"""(Time) to event for discrete (padded) event vector.
:param Array is_event: a vector of 0/1s or boolean
:return Array x: steps until is_event is true
"""
n = len(is_event)
z = n # at the earliest on step after
x = np.int32(is_event)
for i in reversed(xrange(n)):
if is_event[i]:
z = i
x[i] = z - i
return x
def get_tte_discrete(is_event, t_elapsed=None):
"""Calculates discretely measured tte over a vector.
:param Array is_event: Boolean array
:param IntArray t_elapsed: integer array with same length as `is_event`. If none, it will use `xrange(len(is_event))`
:return Array tte: Time-to-event array (discrete version)
- Caveats
tte[i] = numb. timesteps to timestep with event
Step of event has tte = 0 \
(event happened at time [t,t+1))
tte[-1]=1 if no event (censored data)
"""
n = len(is_event)
tte = np.int32(is_event)
stepsize = 1
if t_elapsed is None:
t_elapsed = xrange(n)
t_next = t_elapsed[-1] + stepsize
for i in reversed(xrange(n)):
if is_event[i]:
t_next = t_elapsed[i]
tte[i] = t_next - t_elapsed[i]
return tte
def get_tte_continuous(is_event, t_elapsed):
"""Calculates time to (pointwise measured) next event over a vector.
:param Array is_event: Boolean array
:param IntArray t_elapsed: integer array with same length as `is_event` that supports vectorized subtraction. If none, it will use `xrange(len(is_event))`
:return Array tte: Time-to-event (continuous version)
TODO::
Should support discretely sampled, continuously measured TTE
.. Caveats::
tte[i] = time to *next* event at time t[i]
(t[i] is exactly at event&/or query)
tte[-1]=0 always
(since last time is a *point*)
Last datpoints are right censored.
"""
n = len(is_event)
if t_elapsed is None:
t_elapsed = np.int32(xrange(n))
t_next = t_elapsed[-1]
# lazy initialization to autoinit if difftime
tte = t_elapsed - t_next
for i in reversed(xrange(n)):
tte[i] = t_next - t_elapsed[i]
if is_event[i]:
t_next = t_elapsed[i]
return tte
def get_tte(is_event, discrete_time, t_elapsed=None):
""" wrapper to calculate *Time To Event* for input vector.
:param Boolean discrete_time: if `True`, use `get_tte_discrete`. If `False`, use `get_tte_continuous`.
"""
if discrete_time:
return get_tte_discrete(is_event, t_elapsed)
else:
return get_tte_continuous(is_event, t_elapsed)
def get_tse(is_event, t_elapsed=None):
""" Wrapper to calculate *Time Since Event* for input vector.
Inverse of tte. Safe to use as a feature.
Always "continuous" method of calculating it.
tse >0 at time of event
(if discrete we dont know about the event yet, if continuous
we know at record of event so superfluous to have tse=0)
tse = 0 at first step
:param Array is_event: Boolean array
:param IntArray t_elapsed: None or integer array with same length as `is_event`.
* If none, it will use `t_elapsed.max() - t_elapsed[::-1]`.
.. TODO::
reverse-indexing is pretty slow and ugly and not a helpful template for implementing in other languages.
"""
if t_elapsed is not None:
t_elapsed = t_elapsed.max() - t_elapsed[::-1]
return get_tte_continuous(is_event[::-1], t_elapsed)[::-1]
def get_is_not_censored(is_event, discrete_time=True):
""" Calculates non-censoring indicator `u` for one vector.
:param array is_event: logical or numeric array indicating event.
:param Boolean discrete_time: if `True`, last observation is conditionally censored.
"""
n = len(is_event)
is_not_censored = np.copy(is_event)
if discrete_time:
# Last obs is conditionally censored
event_seen = is_event[-1]
for i in reversed(xrange(n)):
if is_event[i] and not event_seen:
event_seen = is_event[i]
is_not_censored[i] = event_seen
else:
# Last obs is always censored
event_seen = False
for i in reversed(xrange(n)):
is_not_censored[i] = event_seen
if is_event[i] and not event_seen:
event_seen = is_event[i]
return is_not_censored
|
11493754
|
from typing import Optional
from dissect.cstruct import Instance
from structlog import get_logger
from unblob.extractors.command import Command
from unblob.file_utils import InvalidInputFormat
from ...models import File, HexString, StructHandler, ValidChunk
logger = get_logger()
VALID_NT_SIGNATURES = [0x28, 0x29]
VALID_MEDIAS = [0xF0, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF]
class FATHandler(StructHandler):
NAME = "fat"
PATTERNS = [
HexString(
"""
// An initial x86 short jump instruction
// OEMName (8 bytes)
// BytesPerSec (2 bytes)
// SecPerClus (1 byte) "Must be one of 1, 2, 4, 8, 16, 32, 64, 128."
// 495 (0x1EF) bytes of whatever
// 55 AA is the "signature". "This will be the end of the sector only in case the
// sector size is 512."
( EB | E9 ) [13] ( 01 | 02 | 04 | 08 | 10 | 20 | 40 | 80 ) [495] 55 AA
"""
)
]
C_DEFINITIONS = r"""
// Common between FAT12, FAT16 and FAT32.
typedef struct bios_param_common {
char JmpBoot[3]; // An x86 JMP - e.g. EB 3C 90 ("One finds either eb xx 90, or e9 xx xx.")
char OEMName[8]; // OEM name/version (E.g. "IBM 3.3", "IBM 20.0", "MSDOS5.0", "MSWIN4.0".)
// "BIOS Parameter Block" starts here
uint16 BytesPerSec; // Almost always 512? Microsoft operating systems
// will properly support 1024, 2048, and 4096.
uint8 SecPerClus; // The legal values are 1, 2, 4, 8, 16, 32, 64,
// and 128.
uint16 RsvdSecCnt; // Must not be 0. Should be 1 on FAT12/16? 32 on FAT32?
uint8 NumFATs; // Should always be 2?
uint16 RootEntCnt; // Number of 32-byte dir entries. Must be 0 for FAT32.
uint16 TotSectors; // Old 16-bit count of all sectors on volume
// This field can be 0; if it is 0, then TotSec32 must be
// non-zero. For FAT32, this field must be 0. For FAT12 and
// FAT16 volumes, this field contains the sector count, and
// TotSec32 is 0 if the total sector count fits (is less than
// 0x10000).
uint8 Media; // The valid values for this field is 0xF0, 0xF8, 0xF9, 0xFA, 0xFB,
// 0xFC, 0xFD, 0xFE and 0xFF.
uint16 FATSz16; // This field is the FAT12/FAT16 16-bit count of sectors occupied by
// ONE FAT. On FAT32 volumes this field must be 0, and
// FATSz32 contains the FAT size count.
uint16 SecPerTrk; // Sectors per track for interrupt 0x13.
uint16 NumHeads; // Number of heads for interrupt 0x13
} bios_param_common_t;
// BIOS params for FAT16.
typedef struct fat12_16_bootsec {
bios_param_common_t common;
uint32 NumHidden;
uint32 NumSectors;
uint8 DrvNum;
uint8 Reserved1;
uint8 BootSig; // If it's 0x29 (or 0x28 on NT), means that the next 3 fields are present
char VolID[4];
char VolLab[11];
char FileSysType[8]; // Filesystem type (E.g. "FAT12 ", "FAT16 ", "FAT ", or all zero.)
} fat12_16_bootsec_t;
// BIOS params for FAT32.
typedef struct fat32_bootsec {
bios_param_common_t common;
uint32 Num_Hidden;
uint32 TotSec32;
uint32 FATSz32;
uint16 ExtFlags;
uint16 FSVer;
uint32 RootClus;
uint16 FSInfo;
uint16 BkBootSec;
uint8 Reserved[12];
uint8 DrvNum;
uint8 Reserved1;
uint8 BootSig;
char VolID[4];
char VolLab[11];
char FileSysType[8];
} fat32_bootsec_t;
typedef struct fat_unknown {
bios_param_common_t common;
} fat_unknown_t;
"""
EXTRACTOR = Command("7z", "x", "-y", "{inpath}", "-o{outdir}")
def valid_name(self, name: bytes) -> bool:
try:
name.decode("utf-8")
except UnicodeDecodeError:
return False
return True
def valid_fat32_header(self, header: Instance) -> bool:
if header.common.RootEntCnt != 0:
return False
if header.common.TotSectors != 0:
return False
if header.common.FATSz16 != 0:
return False
return True
def valid_header(self, header: Instance) -> bool:
if not self.valid_name(header.common.OEMName):
return False
if header.common.BytesPerSec % 2 != 0:
return False
if header.common.RsvdSecCnt == 0:
return False
if header.common.Media not in VALID_MEDIAS:
return False
if header.BootSig not in VALID_NT_SIGNATURES:
return False
if header.FileSysType not in (b"FAT12 ", b"FAT16 "):
return self.valid_fat32_header(header)
return True
def calculate_chunk(self, file: File, start_offset: int) -> Optional[ValidChunk]:
header = self.cparser_le.fat12_16_bootsec_t(file)
if header.FileSysType in (b"FAT12 ", b"FAT16 "):
if header.common.TotSectors == 0:
sector_count = header.NumSectors
else:
sector_count = header.common.TotSectors
else:
file.seek(start_offset)
header = self.cparser_le.fat32_bootsec_t(file)
sector_count = header.TotSec32
if not self.valid_header(header):
raise InvalidInputFormat("Invalid FAT header.")
logger.debug("FAT header parsed", header=header, _verbosity=3)
if sector_count == 0x0:
sector_count = header.common.TotSectors
size = header.common.BytesPerSec * sector_count
return ValidChunk(
start_offset=start_offset,
end_offset=start_offset + size,
)
|
11493758
|
from deepmatch.models import NCF
from ..utils import get_xy_fd_ncf
def test_NCF():
model_name = "NCF"
x, y, user_feature_columns, item_feature_columns = get_xy_fd_ncf(False)
model = NCF(user_feature_columns, item_feature_columns, )
model.compile('adam', "binary_crossentropy")
model.fit(x, y, batch_size=10, epochs=2, validation_split=0.5)
if __name__ == "__main__":
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.