id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1654907
|
import os
import numpy as np
import sys
import gc
import torch
from .tools import fix_seed
from torch_geometric.utils import is_undirected
fix_seed(1234)
class AutoEDA(object):
"""
A tool box for Exploratory Data Analysis (EDA)
Parameters:
----------
n_class: int
number of classes
----------
"""
def __init__(self, n_class):
self.info = {'n_class': n_class}
def get_info(self, data):
self.get_feature_info(data['fea_table'])
self.get_edge_info(data['edge_file'])
self.set_priori_knowledges()
self.get_label_weights(data, reweighting=True)
return self.info
def get_feature_info(self, df):
"""
Get information of the original node features: number of nodes, number of features, etc.
Remove those features which have only one value.
"""
unique_counts = df.nunique()
unique_counts = unique_counts[unique_counts == 1]
df.drop(unique_counts.index, axis=1, inplace=True)
self.info['num_nodes'] = df.shape[0]
self.info['num_features'] = df.shape[1] - 1
print('Number of Nodes:', self.info['num_nodes'])
print('Number of Original Features:', self.info['num_features'])
def get_edge_info(self, df):
"""
Get information of the edges: number of edges, if weighted, if directed, Max / Min weight, etc.
"""
self.info['num_edges'] = df.shape[0]
min_weight, max_weight = df['edge_weight'].min(), df['edge_weight'].max()
if min_weight != max_weight:
self.info['weighted'] = True
else:
self.info['weighted'] = False
edge_index = df[['src_idx', 'dst_idx']].to_numpy()
edge_index = sorted(edge_index, key=lambda d: d[0])
edge_index = torch.tensor(edge_index, dtype=torch.long).transpose(0, 1)
self.info['directed'] = not is_undirected(edge_index, num_nodes=self.info['num_nodes'])
print('Number of Edges:', self.info['num_edges'])
print('Is Directed Graph:', self.info['directed'])
print('Is Weighted Graph:',self.info['weighted'])
print('Max Weight:', max_weight, 'Min Weight:', min_weight)
def set_priori_knowledges(self):
"""
Set some hyper parameters to their initial value according to some priori knowledges.
"""
if self.info['num_features'] == 0:
if self.info['directed']:
self.info['dropedge_rate'] = 0.5
self.info['chosen_models'] = ['ResGCN', 'GraphConvNet', 'GraphSAGE']
self.info['ensemble_threshold'] = 0.01
else:
self.info['dropedge_rate'] = 0
self.info['chosen_models'] = ['GraphConvNet','GIN','GraphSAGE']
self.info['ensemble_threshold'] = 0.01
else:
if self.info['directed']:
self.info['dropedge_rate'] = 0.5
self.info['chosen_models'] = ['GraphConvNet','GraphSAGE','ResGCN']
self.info['ensemble_threshold'] = 0.02
else:
if self.info['num_edges'] / self.info['num_nodes']>= 10:
self.info['dropedge_rate'] = 0.5
self.info['chosen_models'] = ['ARMA','GraphSAGE', 'IncepGCN']
self.info['ensemble_threshold'] = 0.02
else:
self.info['dropedge_rate'] = 0.5
self.info['chosen_models'] = ['ARMA','IncepGCN','GraphConvNet','SG']
self.info['ensemble_threshold'] = 0.03
if self.info['num_edges'] / self.info['num_nodes'] >= 200:
self.info['num_layers'] = 1
self.info['init_hidden_size'] = 5
elif self.info['num_edges'] / self.info['num_nodes'] >= 100:
self.info['num_layers'] = 2
self.info['init_hidden_size'] = 5
else:
self.info['num_layers'] = 2
self.info['init_hidden_size'] = 7
if self.info['num_edges'] / self.info['num_nodes'] >= 10:
self.info['use_linear'] = True
self.info['dropout_rate'] = 0.2
else:
self.info['use_linear'] = False
self.info['dropout_rate'] = 0.5
self.info['lr'] = 0.005
if self.info['num_features'] == 0:
self.info['feature_type'] = ['svd'] # one_hot / svd / degree / node2vec / adj
else:
self.info['feature_type'] = ['original', 'svd']
self.info['normalize_features'] = 'None'
def get_label_weights(self, data, reweighting=True):
"""
Compute the weights of labels as the weight when computing loss.
"""
if not reweighting:
self.info['label_weights'] = None
return
groupby_data_orginal = data['train_label'].groupby('label').count()
label_weights = groupby_data_orginal.iloc[:,0]
if len(label_weights) < 10 or max(label_weights) < min(label_weights) * 10:
self.info['label_weights'] = None
return
label_weights = 1 / np.sqrt(label_weights)
self.info['label_weights'] = torch.tensor(label_weights.values,dtype=torch.float32)
print('Label Weights:', self.info['label_weights'])
|
1654948
|
import os
import importlib.util
import click
import sys
sys.path.append("libraries/common")
sys.path.append("libraries/layouts")
sys.path.append("libraries/keycodes")
def load_module(file, name):
spec = importlib.util.spec_from_file_location(name, file)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
return foo
@click.group(invoke_without_command=True)
@click.argument("keycodes", nargs=2)
@click.option("--lname", "-l", default="left")
@click.option("--rname", "-r", default="right")
def main(keycodes, lname, rname):
print(keycodes)
left = load_module(keycodes[0], "left")
right = load_module(keycodes[1], "right")
lASCII = left.KeyboardLayout.ASCII_TO_KEYCODE
rASCII = right.KeyboardLayout.ASCII_TO_KEYCODE
for x in range(len(lASCII)):
c = chr(x)
if lASCII[x] != rASCII[x]:
print(f"{x}/{repr(c)}: {repr(lASCII[x])} ≠ {repr(rASCII[x])}")
if left.KeyboardLayout.NEED_ALTGR != right.KeyboardLayout.NEED_ALTGR:
print("NEED_ALTGR")
print(" ", left.KeyboardLayout.NEED_ALTGR)
print(" ", right.KeyboardLayout.NEED_ALTGR)
lHIGH = left.KeyboardLayout.HIGHER_ASCII
rHIGH = right.KeyboardLayout.HIGHER_ASCII
for H in lHIGH:
if H not in rHIGH:
print(f"Only {lname} :", repr(H), hex(lHIGH[H]))
for H in rHIGH:
if H not in lHIGH:
print(f"Only {rname}:", repr(H), hex(rHIGH[H]))
elif rHIGH[H] != lHIGH[H]:
print("Different :", repr(H), hex(lHIGH[H]), hex(rHIGH[H]))
if __name__ == "__main__":
main()
|
1654962
|
import sqlite3
from argparse import ArgumentParser
import numpy as np
from openDAM.dataio.create_dam_db_from_csv import insert_in_table, create_tables
PUN_ZONES = {"SICI": 17, "SVIZ": 6}
MIN_RATIO = 0.1
PERIODS = range(9, 21)
N_BLOCKS_PER_ZONE = 25
QUANTITY_RANGE = [1, 75]
MEAN_PRICE = 50
STDEV_PRICE = 10
def populate_block_orders(connection, day_id):
block_id = 0
for zone, zone_id in PUN_ZONES.iteritems():
print("Creating blocks for zone " + zone)
data = []
profile_data = []
for block in range(1, N_BLOCKS_PER_ZONE + 1):
block_id += 1
price = np.random.normal(MEAN_PRICE, STDEV_PRICE)
data.append([day_id, block_id, zone_id, price, MIN_RATIO])
for period in range(1, 25):
quantity = 0.0
if period in PERIODS:
quantity = np.random.uniform(QUANTITY_RANGE[0], QUANTITY_RANGE[1])
profile_data.append([day_id, block_id, period, quantity])
insert_in_table(connection, "BLOCKS", data)
insert_in_table(connection, "BLOCK_DATA", profile_data)
conn.commit()
print(block_id)
if __name__ == "__main__":
parser = ArgumentParser(
description='Utility for Adding randomly generated blocks to a database. Block properties are defined at the top.')
parser.add_argument("-p", "--path", help="Folder where data is located", required=True)
parser.add_argument("-d", "--database",
help="Name of the sqlite database file, under the folder of the --path argument.", required=True)
parser.add_argument("--from_date", help="Date of first day to import, as YYYYMMDD, cast as an int.",
required=True)
parser.add_argument("--to_date", help="Date of last day to import, as YYYYMMDD, cast as an int.",
required=True)
parser.add_argument("--create_tables", help="True if block related tables must be created", default=False)
parser.add_argument("--seed", help="Seed for random number generation", default=1984)
args = parser.parse_args()
conn = sqlite3.connect('%s/%s' % (args.path, args.database))
if args.create_tables:
create_tables(conn, ["BLOCKS", "BLOCK_DATA"])
cursor = conn.cursor()
cmd = "delete from BLOCKS"
cursor.execute(cmd)
cmd = "delete from BLOCK_DATA"
cursor.execute(cmd)
for date in range(int(args.from_date), int(args.to_date) + 1):
np.random.seed(int(args.seed))
populate_block_orders(conn, date)
conn.close()
|
1654963
|
import collections
from contextlib import contextmanager
from functools import wraps
import io
from itertools import cycle
import logging
import os
import pprint
import re
import shutil
import socket
import subprocess
from sys import version, stderr
import sys
import threading
import time
import uuid
from warnings import catch_warnings
import webbrowser
from allure.common import AttachmentType
from allure.constants import Status, Label
from allure.structure import Environment, EnvParameter, TestLabel, Failure, Attach, TestSuite, TestStep
from allure.utils import now
import jprops
from lxml import etree
from oauthlib.uri_validate import path
import py
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.Process import Process
from robot.libraries.Screenshot import Screenshot
from robot.running.userkeyword import UserLibrary
from robot.version import get_version, get_full_version, get_interpreter
from six import text_type, iteritems
from sqlalchemy.sql.expression import false
from common import AllureImpl
from constants import Robot, ROBOT_OUTPUT_FILES, SEVERITIES, STATUSSES
from structure import AllureProperties, TestCase # Overriding TestCase due to missing severity attribute.
from util_funcs import clear_directory, copy_dir_contents
from version import VERSION
# for debugging purpose not needed by application
class AllureListener(object):
ROBOT_LISTENER_API_VERSION = 2
def __init__(self, allurePropPath=None, source='Listener'):
self.stack = []
self.testsuite = None
self.callstack = []
self.AllurePropPath = allurePropPath
self.AllureIssueIdRegEx=''
self.testsuite is None
self.isFirstSuite = True
# Setting this variable prevents the loading of a Library added Listener.
# I case the Listener is added via Command Line, the Robot Context is not
# yet there and will cause an exceptions. Similar section in start_suite.
try:
AllureListenerActive = BuiltIn().get_variable_value('${ALLURE}', false)
BuiltIn().set_global_variable('${ALLURE}', True)
except:
pass
def start_suitesetup(self, name, attributes):
start_test_attributes= {'critical': 'yes',
'doc': 'Test Suite Setup section',
'starttime': attributes['starttime'],
'tags': [],
'id': 's1-s1-t0',
'longname': BuiltIn().get_variable_value('${SUITE_NAME}'),
'template': ''
}
if len(str(start_test_attributes.get('doc'))) > 0:
description = str(start_test_attributes.get('doc'))
else:
description = name
test = TestCase(name=name,
description=description,
start=now(),
attachments=[],
labels=[],
# parameters=[],
steps=[])
self.stack.append(test)
return
def end_suitesetup(self, name, attributes):
end_test_attributes= {'critical': 'yes',
'doc': 'Test Suite Setup section',
'starttime': attributes['starttime'],
'endtime': attributes['endtime'],
'status': 'PASS',
'tags': [],
'id': 's1-s1-t0',
'longname': BuiltIn().get_variable_value('${SUITE_NAME}'),
'template': ''
}
test = self.stack.pop()
BuiltIn().run_keyword(name)
if end_test_attributes.get('status') == Robot.PASS:
test.status = Status.PASSED
elif end_test_attributes.get('status')==Robot.FAIL:
test.status = Status.FAILED
test.failure = Failure(message=end_test_attributes.get('message'), trace='')
elif end_test_attributes.get('doc') is not '':
test.description = attributes.get('doc')
if end_test_attributes['tags']:
for tag in end_test_attributes['tags']:
if re.search(self.AllureIssueIdRegEx, tag):
test.labels.append(TestLabel(
name=Label.ISSUE,
value=tag))
if tag.startswith('feature'):
test.labels.append(TestLabel(
name='feature',
value=tag.split(':')[-1]))
if tag.startswith('story'):
test.labels.append(TestLabel(
name='story',
value=tag.split(':')[-1]))
elif tag in SEVERITIES:
test.labels.append(TestLabel(
name='severity',
value=tag))
elif tag in STATUSSES:
test.status = tag # overwrites the actual test status with this value.
self.PabotPoolId = BuiltIn().get_variable_value('${PABOTEXECUTIONPOOLID}')
if(self.PabotPoolId is not None):
self.threadId = 'PabotPoolId-' + str(self.PabotPoolId)
else:
self.threadId = threading._get_ident()
test.labels.append(TestLabel(
name='thread',
value=str(self.threadId)))
self.testsuite.tests.append(test)
test.stop = now()
return test
def start_test(self, name, attributes):
if len(str(attributes.get('doc'))) > 0:
description = str(attributes.get('doc'))
else:
description = name
test = TestCase(name=name,
description=description,
start=now(),
attachments=[],
labels=[],
steps=[],
severity='normal')
self.stack.append(test)
return
def end_test(self, name, attributes):
# logger.console('\nend_test: ['+name+']')
# logger.console(attributes)
# logger.console(' [stack lenght] ['+str(len(self.stack))+'] [testsuite lenght] ['+ str(len(self.testsuite.tests))+']')
test = self.stack.pop()
if attributes.get('status') == Robot.PASS:
test.status = Status.PASSED
elif attributes.get('status')==Robot.FAIL:
test.status = Status.FAILED
test.failure = Failure(message=attributes.get('message'), trace='')
elif attributes.get('doc') is not '':
test.description = attributes.get('doc')
if attributes['tags']:
for tag in attributes['tags']:
if re.search(self.AllureIssueIdRegEx, tag):
test.labels.append(TestLabel(
name=Label.ISSUE,
value=tag))
elif tag.startswith('feature'):
test.labels.append(TestLabel(
name='feature',
value=tag.split(':')[-1]))
elif tag.startswith('story'):
test.labels.append(TestLabel(
name='story',
value=tag.split(':')[-1]))
elif tag in SEVERITIES:
test.labels.append(TestLabel(
name='severity',
value=tag))
test.severity = tag
elif tag in STATUSSES:
test.status = tag # overwrites the actual test status with this value.
else:
test.labels.append(TestLabel(
name='tag',
value=tag))
self.PabotPoolId = BuiltIn().get_variable_value('${PABOTEXECUTIONPOOLID}')
if(self.PabotPoolId is not None):
self.threadId = 'PabotPoolId-' + str(self.PabotPoolId)
else:
self.threadId = threading._get_ident()
test.labels.append(TestLabel(
name='thread',
value=str(self.threadId)))
self.testsuite.tests.append(test)
test.stop = now()
return test
def start_suite(self, name, attributes):
self.SuitSrc = BuiltIn().get_variable_value('${SUITE_SOURCE}')
self.ExecDir = BuiltIn().get_variable_value('${EXECDIR}')
# Reading the Allure Properties file for the Issue Id regular expression
# for the Issues and the URL to where the Issues/Test Man links should go.
if(self.AllurePropPath is None):
self.AllurePropPath = self.ExecDir + '\\allure.properties'
if os.path.exists(self.AllurePropPath) is True:
self.AllureProperties = AllureProperties(self.AllurePropPath)
self.AllureIssueIdRegEx = self.AllureProperties.get_property('allure.issues.id.pattern')
else:
self.AllureProperties = AllureProperties(self.AllurePropPath)
self.AllureIssueIdRegEx = self.AllureProperties.get_property('allure.issues.id.pattern')
# Not using &{ALLURE} as this is throwing an error and ${ALLURE} gives the
# desired dictionary in Allure as well.
BuiltIn().set_global_variable('${ALLURE}', self.AllureProperties.get_properties())
# When running a Robot folder, the folder itself is also considered a Suite
# The full check depends on the availability of all the vars which are
# only available when a Robot file has started.
IsSuiteDirectory = os.path.isdir(self.SuitSrc)
if(not(IsSuiteDirectory)):
''' Check if class received Output Directory Path in the properties file. '''
if self.AllureProperties.get_property('allure.cli.logs.xml') is None:
''' No Path was provided, so using output dir with additional sub folder. '''
self.allurelogdir = BuiltIn().get_variable_value('${OUTPUT_DIR}') + "\\Allure"
else:
self.allurelogdir = self.AllureProperties.get_property('allure.cli.logs.xml')
self.AllureImplc = AllureImpl(self.allurelogdir)
''' Clear the directory but not if run in parallel mode in Pabot'''
PabotPoolId = BuiltIn().get_variable_value('${PABOTEXECUTIONPOOLID}')
try:
if(self.isFirstSuite == True
and self.AllureProperties.get_property('allure.cli.logs.xml.clear') == 'True'
and PabotPoolId is None):
clear_directory(self.AllureProperties.get_property('allure.cli.logs.xml'))
except Exception as e:
logger.console(pprint.pformat(e))
finally:
self.isFirstSuite = False
if attributes.get('doc') is not '':
description = attributes.get('doc')
else:
description = name
self.testsuite = TestSuite(name=name,
title=name,
description=description,
tests=[],
labels=[],
start=now())
return
def end_suite(self, name, attributes):
self.testsuite.stop = now()
logfilename = '%s-testsuite.xml' % uuid.uuid4()
# When running a folder, the folder itself is also considered a Suite
# The full check depends on the availability of all the vars which are
# only available when a Robot file has started.
IsSuiteDirectory = os.path.isdir(BuiltIn().get_variable_value("${SUITE_SOURCE}"))
if(not(IsSuiteDirectory)):
with self.AllureImplc._reportfile(logfilename) as f:
self.AllureImplc._write_xml(f, self.testsuite)
return
def start_keyword(self, name, attributes):
# logger.console('\nstart_keyword: ['+name+']')
# logger.console(' ['+attributes['type']+'] [stack lenght] ['+str(len(self.stack))+'] [testsuite lenght] ['+ str(len(self.testsuite.tests))+']')
if(hasattr(self, attributes.get('kwname').replace(" ", "_")) and callable(getattr(self, attributes.get('kwname').replace(" ", "_")))):
libraryMethodToCall = getattr(self, attributes.get('kwname').replace(" ", "_"))
result = libraryMethodToCall(name, attributes)
keyword = TestStep(name=name,
title=attributes.get('kwname'),
attachments=[],
steps=[],
start=now(),)
if self.stack:
self.stack.append(keyword)
return keyword
if(attributes.get('type') == 'Keyword' or (attributes.get('type') == 'Teardown' and len(self.stack) is not 0)):
keyword = TestStep(name=name,
title=attributes.get('kwname'),
attachments=[],
steps=[],
start=now(),)
if self.stack:
self.stack.append(keyword)
return keyword
"""
Processing the Suite Setup.
Although there is no test case yet, a virtual one is created to allow
for the inclusion of the keyword.
"""
if(attributes.get('type') == 'Setup' and len(self.stack) == 0):
self.start_suitesetup(name, attributes)
return
if(attributes.get('type') == 'Teardown' and len(self.stack) == 0):
self.start_suitesetup(name, attributes)
return
def end_keyword(self, name, attributes):
# logger.console('\nend_keyword: ['+name+']')
# logger.console(' ['+attributes['type']+'] [stack lenght] ['+str(len(self.stack))+'] [testsuite lenght] ['+ str(len(self.testsuite.tests))+']')
if len(self.stack) > 0:
if(attributes.get('type') == 'Keyword' or (attributes.get('type') == 'Teardown' and isinstance(self.stack[-1], TestStep) is True)):
step = self.stack.pop()
if(attributes.get('status') == 'FAIL'):
step.status = 'failed'
elif(attributes.get('status') == 'PASS'):
step.status = 'passed'
step.stop = now()
# Append the step to the previous item. This can be another step, or
# another keyword.
self.stack[-1].steps.append(step)
return
if(attributes.get('type') == 'Setup' and len(self.testsuite.tests) == 0):
self.end_suitesetup(name, attributes)
return
if(attributes.get('type') == 'Teardown' and isinstance(self.stack[-1], TestCase) is True):
self.end_suitesetup(name, attributes)
return
return
def message(self, msg):
pass
def log_message(self, msg):
# logger.console(pprint.pformat(msg))
# logger.console(self.stack[-1].title)
# Check to see if there are any items to add the log message to
# this check is needed because otherwise Suite Setup may fail.
if len(self.stack) > 0:
if self.stack[-1].title == 'Capture Page Screenshot':
screenshot = re.search('[a-z]+-[a-z]+-[0-9]+.png',msg['message'])
if screenshot:
self.attach('{}'.format(screenshot.group(0)) , screenshot.group(0))
if(msg['html']=='yes'):
screenshot = re.search('[a-z]+-[a-z]+-[0-9]+.png',msg['message'])
kwname = '{}'.format(screenshot.group(0))
# logger.console('kwname: '+kwname)
else:
kwname = msg['message']
startKeywordArgs= {'args': [],
'assign': [],
'doc': '',
'kwname': kwname,
'libname': 'BuiltIn',
'starttime': now(),
'tags': [],
'type': 'Keyword'}
self.start_keyword('Log Message', startKeywordArgs)
endKeywordArgs= {'args': [],
'assign': [],
'doc': '',
'elapsedtime': 0,
'endtime': now(),
'kwname': kwname,
'libname': 'BuiltIn',
'starttime': now(),
'status': 'PASS',
'tags': [],
'type': 'Keyword'}
self.end_keyword('Log Message', endKeywordArgs)
return
def close(self):
IsSuiteDirectory = os.path.isdir(self.SuitSrc)
if(not(IsSuiteDirectory)):
self.save_environment()
# self.save_properties()
self.AllureProperties.save_properties()
if (self.AllureProperties.get_property('allure.cli.outputfiles') and self.PabotPoolId is None):
self.allure(self.AllureProperties)
return
# Helper functions
def save_environment(self):
environment = {}
environment['id'] = 'Robot Framework'
environment['name'] = socket.getfqdn()
environment['url']= 'http://'+socket.getfqdn()+':8000'
env_dict = (\
{'Robot Framework Full Version': get_full_version()},\
{'Robot Framework Version': get_version()},\
{'Interpreter': get_interpreter()},\
{'Python version': sys.version.split()[0]},\
{'Allure Adapter version': VERSION},\
{'Robot Framework CLI Arguments': sys.argv[1:]},\
{'Robot Framework Hostname': socket.getfqdn()},\
{'Robot Framework Platform': sys.platform}\
)
for key in env_dict:
self.AllureImplc.environment.update(key)
self.AllureImplc.logdir = self.AllureProperties.get_property('allure.cli.logs.xml')
self.AllureImplc.store_environment(environment)
def allure(self, AllureProps):
JAVA_PATH= AllureProps.get_property('allure.java.path')
ALLURE_HOME= '-Dallure.home='+AllureProps.get_property('allure.home')
JAVA_CLASSPATH= '-cp "'+ AllureProps.get_property('allure.java.classpath')+'"'
ALLURE_LOGFILE= AllureProps.get_property('allure.cli.logs.xml')
ALLURE_OUTPUT= '-o '+ AllureProps.get_property('allure.cli.logs.output')
JAVA_CLASS= 'ru.yandex.qatools.allure.CommandLine'
ALLURE_COMMAND= 'generate'
ALLURE_URL= AllureProps.get_property('allure.results.url')
allure_cmd = JAVA_PATH + ' ' + ALLURE_HOME + ' ' + JAVA_CLASSPATH + ' ' + JAVA_CLASS + ' ' + ALLURE_COMMAND + ' ' + ALLURE_LOGFILE + ' ' + ALLURE_OUTPUT
if(AllureProps.get_property('allure.cli.outputfiles')=='True'):
FNULL = open(os.devnull, 'w') #stdout=FNULL,
subprocess.Popen(allure_cmd, stderr=subprocess.STDOUT, shell=True).wait()
if(AllureProps.get_property('allure.results.browser.open')=='True'):
webbrowser.open(ALLURE_URL, new=0, autoraise=True)
def attach(self, title, contents, attach_type=AttachmentType.PNG):
"""
This functions created the attachments and append it to the test.
"""
# logger.console("attach-title: "+title)
contents = os.path.join(BuiltIn().get_variable_value('${OUTPUT_DIR}'), contents)
with open(contents, 'rb') as f:
file_contents = f.read()
attach = Attach(source=self.AllureImplc._save_attach(file_contents, attach_type),
title=title,
type=attach_type)
self.stack[-1].attachments.append(attach)
return
def Set_Output_Dir(self, name, attributes):
copy_dir_contents(self.AllureProperties.get_property('allure.cli.logs.xml'), attributes['args'][0])
self.AllureProperties.set_property('allure.cli.logs.xml', attributes['args'][0])
self.AllureImplc.logdir = attributes['args'][0]
|
1654980
|
data = (
'syae', # 0x00
'syaeg', # 0x01
'syaegg', # 0x02
'syaegs', # 0x03
'syaen', # 0x04
'syaenj', # 0x05
'syaenh', # 0x06
'syaed', # 0x07
'syael', # 0x08
'syaelg', # 0x09
'syaelm', # 0x0a
'syaelb', # 0x0b
'syaels', # 0x0c
'syaelt', # 0x0d
'syaelp', # 0x0e
'syaelh', # 0x0f
'syaem', # 0x10
'syaeb', # 0x11
'syaebs', # 0x12
'syaes', # 0x13
'syaess', # 0x14
'syaeng', # 0x15
'syaej', # 0x16
'syaec', # 0x17
'syaek', # 0x18
'syaet', # 0x19
'syaep', # 0x1a
'syaeh', # 0x1b
'seo', # 0x1c
'seog', # 0x1d
'seogg', # 0x1e
'seogs', # 0x1f
'seon', # 0x20
'seonj', # 0x21
'seonh', # 0x22
'seod', # 0x23
'seol', # 0x24
'seolg', # 0x25
'seolm', # 0x26
'seolb', # 0x27
'seols', # 0x28
'seolt', # 0x29
'seolp', # 0x2a
'seolh', # 0x2b
'seom', # 0x2c
'seob', # 0x2d
'seobs', # 0x2e
'seos', # 0x2f
'seoss', # 0x30
'seong', # 0x31
'seoj', # 0x32
'seoc', # 0x33
'seok', # 0x34
'seot', # 0x35
'seop', # 0x36
'seoh', # 0x37
'se', # 0x38
'seg', # 0x39
'segg', # 0x3a
'segs', # 0x3b
'sen', # 0x3c
'senj', # 0x3d
'senh', # 0x3e
'sed', # 0x3f
'sel', # 0x40
'selg', # 0x41
'selm', # 0x42
'selb', # 0x43
'sels', # 0x44
'selt', # 0x45
'selp', # 0x46
'selh', # 0x47
'sem', # 0x48
'seb', # 0x49
'sebs', # 0x4a
'ses', # 0x4b
'sess', # 0x4c
'seng', # 0x4d
'sej', # 0x4e
'sec', # 0x4f
'sek', # 0x50
'set', # 0x51
'sep', # 0x52
'seh', # 0x53
'syeo', # 0x54
'syeog', # 0x55
'syeogg', # 0x56
'syeogs', # 0x57
'syeon', # 0x58
'syeonj', # 0x59
'syeonh', # 0x5a
'syeod', # 0x5b
'syeol', # 0x5c
'syeolg', # 0x5d
'syeolm', # 0x5e
'syeolb', # 0x5f
'syeols', # 0x60
'syeolt', # 0x61
'syeolp', # 0x62
'syeolh', # 0x63
'syeom', # 0x64
'syeob', # 0x65
'syeobs', # 0x66
'syeos', # 0x67
'syeoss', # 0x68
'syeong', # 0x69
'syeoj', # 0x6a
'syeoc', # 0x6b
'syeok', # 0x6c
'syeot', # 0x6d
'syeop', # 0x6e
'syeoh', # 0x6f
'sye', # 0x70
'syeg', # 0x71
'syegg', # 0x72
'syegs', # 0x73
'syen', # 0x74
'syenj', # 0x75
'syenh', # 0x76
'syed', # 0x77
'syel', # 0x78
'syelg', # 0x79
'syelm', # 0x7a
'syelb', # 0x7b
'syels', # 0x7c
'syelt', # 0x7d
'syelp', # 0x7e
'syelh', # 0x7f
'syem', # 0x80
'syeb', # 0x81
'syebs', # 0x82
'syes', # 0x83
'syess', # 0x84
'syeng', # 0x85
'syej', # 0x86
'syec', # 0x87
'syek', # 0x88
'syet', # 0x89
'syep', # 0x8a
'syeh', # 0x8b
'so', # 0x8c
'sog', # 0x8d
'sogg', # 0x8e
'sogs', # 0x8f
'son', # 0x90
'sonj', # 0x91
'sonh', # 0x92
'sod', # 0x93
'sol', # 0x94
'solg', # 0x95
'solm', # 0x96
'solb', # 0x97
'sols', # 0x98
'solt', # 0x99
'solp', # 0x9a
'solh', # 0x9b
'som', # 0x9c
'sob', # 0x9d
'sobs', # 0x9e
'sos', # 0x9f
'soss', # 0xa0
'song', # 0xa1
'soj', # 0xa2
'soc', # 0xa3
'sok', # 0xa4
'sot', # 0xa5
'sop', # 0xa6
'soh', # 0xa7
'swa', # 0xa8
'swag', # 0xa9
'swagg', # 0xaa
'swags', # 0xab
'swan', # 0xac
'swanj', # 0xad
'swanh', # 0xae
'swad', # 0xaf
'swal', # 0xb0
'swalg', # 0xb1
'swalm', # 0xb2
'swalb', # 0xb3
'swals', # 0xb4
'swalt', # 0xb5
'swalp', # 0xb6
'swalh', # 0xb7
'swam', # 0xb8
'swab', # 0xb9
'swabs', # 0xba
'swas', # 0xbb
'swass', # 0xbc
'swang', # 0xbd
'swaj', # 0xbe
'swac', # 0xbf
'swak', # 0xc0
'swat', # 0xc1
'swap', # 0xc2
'swah', # 0xc3
'swae', # 0xc4
'swaeg', # 0xc5
'swaegg', # 0xc6
'swaegs', # 0xc7
'swaen', # 0xc8
'swaenj', # 0xc9
'swaenh', # 0xca
'swaed', # 0xcb
'swael', # 0xcc
'swaelg', # 0xcd
'swaelm', # 0xce
'swaelb', # 0xcf
'swaels', # 0xd0
'swaelt', # 0xd1
'swaelp', # 0xd2
'swaelh', # 0xd3
'swaem', # 0xd4
'swaeb', # 0xd5
'swaebs', # 0xd6
'swaes', # 0xd7
'swaess', # 0xd8
'swaeng', # 0xd9
'swaej', # 0xda
'swaec', # 0xdb
'swaek', # 0xdc
'swaet', # 0xdd
'swaep', # 0xde
'swaeh', # 0xdf
'soe', # 0xe0
'soeg', # 0xe1
'soegg', # 0xe2
'soegs', # 0xe3
'soen', # 0xe4
'soenj', # 0xe5
'soenh', # 0xe6
'soed', # 0xe7
'soel', # 0xe8
'soelg', # 0xe9
'soelm', # 0xea
'soelb', # 0xeb
'soels', # 0xec
'soelt', # 0xed
'soelp', # 0xee
'soelh', # 0xef
'soem', # 0xf0
'soeb', # 0xf1
'soebs', # 0xf2
'soes', # 0xf3
'soess', # 0xf4
'soeng', # 0xf5
'soej', # 0xf6
'soec', # 0xf7
'soek', # 0xf8
'soet', # 0xf9
'soep', # 0xfa
'soeh', # 0xfb
'syo', # 0xfc
'syog', # 0xfd
'syogg', # 0xfe
'syogs', # 0xff
)
|
1654988
|
import itertools as it, operator as op, functools as ft
from pathlib import Path
import unittest
from . import _common as c
@c.tb.u.attr_struct
class TestTripStop: keys = 'stop_id dts_arr dts_dep'
@c.tb.u.attr_struct
class TestFootpath: keys = 'src dst dt'
class SimpleTestCase(unittest.TestCase):
dt_ch = 2*60 # fixed time-delta overhead for changing trips (i.e. p->p footpaths)
def __init__(self, test_name, test_data):
self.test_name, self.test_data = test_name, test_data
setattr(self, test_name, self.run_test)
super(SimpleTestCase, self).__init__(test_name)
def init_router(self):
types = c.tb.t.public
trips, stops, footpaths = types.Trips(), types.Stops(), types.Footpaths()
tt = self.test_data.timetable or dict()
if not set(tt.keys()).difference(['trips', 'footpaths']):
tt_trips, tt_footpaths = (tt.get(k, list()) for k in ['trips', 'footpaths'])
else: tt_trips, tt_footpaths = self.test_data.timetable, list()
for trip_id, trip_data in tt_trips.items():
trip = types.Trip()
for stopidx, ts in enumerate(trip_data):
stop_id, dts_arr, dts_dep = c.struct_from_val(ts, TestTripStop, as_tuple=True)
if not dts_arr or dts_arr == 'x': dts_arr = dts_dep
if not dts_dep or dts_dep == 'x': dts_dep = dts_arr
dts_arr, dts_dep = map(c.tb.u.dts_parse, [dts_arr, dts_dep])
stop = stops.add(types.Stop(stop_id, stop_id, 0, 0))
trip.add(types.TripStop(trip, stopidx, stop, dts_arr, dts_dep))
trips.add(trip)
with footpaths.populate() as fp_add:
for spec in tt_footpaths:
src_id, dst_id, delta = c.struct_from_val(spec, TestFootpath, as_tuple=True)
src, dst = (stops.add(types.Stop(s, s, 0, 0)) for s in [src_id, dst_id])
fp_add(src, dst, delta * 60)
for stop in stops: fp_add(stop, stop, self.dt_ch)
timetable = types.Timetable(stops, footpaths, trips)
router = c.tb.engine.TBRoutingEngine(timetable, timer_func=c.tb.calc_timer)
checks = c.GraphAssertions(router.graph)
return timetable, router, checks
def run_test(self):
timetable, router, checks = self.init_router()
checks.assert_journey_components(self.test_data)
goal = c.struct_from_val(self.test_data.goal, c.TestGoal)
goal.dts_start = timetable.dts_parse(goal.dts_start)
goal.src, goal.dst = op.itemgetter(goal.src, goal.dst)(timetable.stops)
if not goal.dts_latest:
journeys = router.query_earliest_arrival(goal.src, goal.dst, goal.dts_start)
else:
goal.dts_latest = timetable.dts_parse(goal.dts_latest)
journeys = router.query_profile(goal.src, goal.dst, goal.dts_start, goal.dts_latest)
checks.assert_journey_results(self.test_data, journeys)
class SimpleGraphTests(unittest.TestSuite):
def __init__(self):
path_file = Path(__file__)
tests, tests_data = list(), c.load_test_data(
path_file.parent, path_file.stem, 'journey-planner-csa' )
for test_name, test_data in tests_data.items():
tests.append(SimpleTestCase(test_name, test_data))
super(SimpleGraphTests, self).__init__(tests)
def load_tests(loader, tests, pattern):
# XXX: because unittest in pypy3/3.3 doesn't have subTest ctx yet
return SimpleGraphTests()
|
1654998
|
from keras import backend as K
import os
# Parameters
candle_lib = '/data/BIDS-HPC/public/candle/Candle/common'
def initialize_parameters():
print('Initializing parameters...')
# Obtain the path of the directory of this script
file_path = os.path.dirname(os.path.realpath(__file__))
# Import the CANDLE library
import sys
sys.path.append(candle_lib)
import candle_keras as candle
# Instantiate the candle.Benchmark class
mymodel_common = candle.Benchmark(file_path,os.getenv("DEFAULT_PARAMS_FILE"),'keras',prog='myprog',desc='My model')
# Get a dictionary of the model hyperparamters
gParameters = candle.initialize_parameters(mymodel_common)
# Return the dictionary of the hyperparameters
return(gParameters)
def run(gParameters):
print('Running model...')
#### Begin model input ##########################################################################################
def get_model(model_json_fname,modelwtsfname):
# This is only for prediction
if os.path.isfile(model_json_fname):
# Model reconstruction from JSON file
with open(model_json_fname, 'r') as f:
model = model_from_json(f.read())
else:
model = get_unet()
#model.summary()
# Load weights into the new model
model.load_weights(modelwtsfname)
return model
def focal_loss(gamma=2., alpha=.25):
def focal_loss_fixed(y_true, y_pred):
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1))-K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0))
return focal_loss_fixed
def jaccard_coef(y_true, y_pred):
smooth = 1.0
intersection = K.sum(y_true * y_pred, axis=[-0, -1, 2])
sum_ = K.sum(y_true + y_pred, axis=[-0, -1, 2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def jaccard_coef_int(y_true, y_pred):
smooth = 1.0
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
intersection = K.sum(y_true * y_pred_pos, axis=[-0, -1, 2])
sum_ = K.sum(y_true + y_pred_pos, axis=[-0, -1, 2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def jaccard_coef_loss(y_true, y_pred):
return -K.log(jaccard_coef(y_true, y_pred)) + binary_crossentropy(y_pred, y_true)
def dice_coef_batch(y_true, y_pred):
smooth = 1.0
intersection = K.sum(y_true * y_pred, axis=[-0, -1, 2])
sum_ = K.sum(y_true + y_pred, axis=[-0, -1, 2])
dice = ((2.0*intersection) + smooth) / (sum_ + intersection + smooth)
return K.mean(dice)
def dice_coef(y_true, y_pred):
smooth = 1.0
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
dice_smooth = ((2. * intersection) + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return (dice_smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def dice_coef_batch_loss(y_true, y_pred):
return -dice_coef_batch(y_true, y_pred)
#Define the neural network
def get_unet():
droprate = 0.25
filt_size = 32
inputs = Input((None, None, 1))
conv1 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(inputs)
conv1 = Dropout(droprate)(conv1)
conv1 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
filt_size = filt_size*2
conv2 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Dropout(droprate)(conv2)
conv2 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
filt_size = filt_size*2
conv3 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Dropout(droprate)(conv3)
conv3 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
filt_size = filt_size*2
conv4 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Dropout(droprate)(conv4)
conv4 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
filt_size = filt_size*2
conv5 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(pool4)
conv5 = Dropout(droprate)(conv5)
conv5 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(conv5)
filt_size = filt_size/2
up6 = concatenate([Conv2DTranspose(filt_size, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(up6)
conv6 = Dropout(droprate)(conv6)
conv6 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(conv6)
filt_size = filt_size/2
up7 = concatenate([Conv2DTranspose(filt_size, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(up7)
conv7 = Dropout(droprate)(conv7)
conv7 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(conv7)
filt_size = filt_size/2
up8 = concatenate([Conv2DTranspose(filt_size, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(up8)
conv8 = Dropout(droprate)(conv8)
conv8 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(conv8)
filt_size = filt_size/2
up9 = concatenate([Conv2DTranspose(filt_size, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(up9)
conv9 = Dropout(droprate)(conv9)
conv9 = Conv2D(filt_size, (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
model = Model(inputs=[inputs], outputs=[conv10])
#model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Nadam(lr=1e-3), loss=dice_coef_loss, metrics=[dice_coef])
#model.compile(optimizer=Adadelta(), loss=dice_coef_loss, metrics=[dice_coef])
return model
def save_model_to_json(model,model_json_fname):
#model = unet.UResNet152(input_shape=(None, None, 3), classes=1,encoder_weights="imagenet11k")
#model = get_unet()
#model.summary()
# serialize model to JSON
model_json = model.to_json()
with open(model_json_fname, "w") as json_file:
json_file.write(model_json)
def preprocess_data(do_prediction,inputnpyfname,targetnpyfname,expandChannel,backbone):
# Preprocess the data (beyond what I already did before)
print('-'*30)
print('Loading and preprocessing data...')
print('-'*30)
# Load, normalize, and cast the data
imgs_input = ( np.load(inputnpyfname).astype('float32') / (2**16-1) * (2**8-1) ).astype('uint8')
print('Input images information:')
print(imgs_input.shape)
print(imgs_input.dtype)
hist,bins = np.histogram(imgs_input)
print(hist)
print(bins)
if not do_prediction:
imgs_mask_train = np.load(targetnpyfname).astype('uint8')
print('Input masks information:')
print(imgs_mask_train.shape)
print(imgs_mask_train.dtype)
hist,bins = np.histogram(imgs_mask_train)
print(hist)
print(bins)
# Make the grayscale images RGB since that's what the model expects apparently
if expandChannel:
imgs_input = np.stack((imgs_input,)*3, -1)
else:
imgs_input = np.expand_dims(imgs_input, 3)
print('New shape of input images:')
print(imgs_input.shape)
if not do_prediction:
imgs_mask_train = np.expand_dims(imgs_mask_train, 3)
print('New shape of masks:')
print(imgs_mask_train.shape)
# Preprocess as per https://github.com/qubvel/segmentation_models
preprocessing_fn = get_preprocessing(backbone)
imgs_input = preprocessing_fn(imgs_input)
# Return appropriate variables
if not do_prediction:
return(imgs_input,imgs_mask_train)
else:
return(imgs_input)
# Import relevant modules and functions
import sys
sys.path.append(gParameters['segmentation_models_repo'])
import numpy as np
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose, Dropout
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint,ReduceLROnPlateau,EarlyStopping,CSVLogger
from keras.layers.normalization import BatchNormalization
from keras.backend import binary_crossentropy
import keras
import random
import tensorflow as tf
from keras.models import model_from_json
from segmentation_models import Unet
from segmentation_models.backbones import get_preprocessing
K.set_image_data_format('channels_last') # TF dimension ordering in this code
# Basically constants
expandChannel = True
modelwtsfname = 'model_weights.h5'
model_json_fname = 'model.json'
csvfname = 'model.csv'
do_prediction = gParameters['predict']
if not do_prediction: # Train...
print('Training...')
# Parameters
inputnpyfname = gParameters['images']
labels = gParameters['labels']
initialize = gParameters['initialize']
backbone = gParameters['backbone']
encoder = gParameters['encoder']
lr = float(gParameters['lr'])
batch_size = gParameters['batch_size']
obj_return = gParameters['obj_return']
epochs = gParameters['epochs']
# Preprocess the data
imgs_train,imgs_mask_train = preprocess_data(do_prediction,inputnpyfname,labels,expandChannel,backbone)
# Load, save, and compile the model
model = Unet(backbone_name=backbone, encoder_weights=encoder)
save_model_to_json(model,model_json_fname)
model.compile(optimizer=Adam(lr=lr), loss='binary_crossentropy', metrics=['binary_crossentropy','mean_squared_error',dice_coef, dice_coef_batch, focal_loss()])
# Load previous weights for restarting, if desired and possible
if os.path.isfile(initialize):
print('-'*30)
print('Loading previous weights ...')
model.load_weights(initialize)
# Set up the training callback functions
model_checkpoint = ModelCheckpoint(modelwtsfname, monitor=obj_return, save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor=obj_return, factor=0.1,patience=100, min_lr=0.001,verbose=1)
model_es = EarlyStopping(monitor=obj_return, min_delta=0.00000001, patience=100, verbose=1, mode='auto')
csv_logger = CSVLogger(csvfname, append=True)
# Train the model
history_callback = model.fit(imgs_train, imgs_mask_train, batch_size=batch_size, epochs=epochs, verbose=2, shuffle=True, validation_split=0.10, callbacks=[model_checkpoint, reduce_lr, model_es, csv_logger])
print("Minimum validation loss:")
print(min(history_callback.history[obj_return]))
else: # ...or predict
print('Inferring...')
# Parameters
inputnpyfname = gParameters['images']
initialize = gParameters['initialize']
backbone = gParameters['backbone']
# lr = float(gParameters['lr']) # this isn't needed but we're keeping it for the U-Net, where it is "needed"
# Preprocess the data
imgs_infer = preprocess_data(do_prediction,inputnpyfname,'',expandChannel,backbone)
# Load the model
#model = get_model(model_json_fname,initialize)
model = get_model(os.path.dirname(initialize)+'/'+model_json_fname,initialize)
# Run inference
imgs_test_predict = model.predict(imgs_infer, batch_size=1, verbose=1)
# Save the predicted masks
np.save('mask_predictions.npy', np.squeeze(np.round(imgs_test_predict).astype('uint8')))
history_callback = None
#### End model input ############################################################################################
return(history_callback)
def main():
print('Running main program...')
gParameters = initialize_parameters()
run(gParameters)
if __name__ == '__main__':
main()
try:
K.clear_session()
except AttributeError:
pass
|
1655032
|
from rest_framework import viewsets
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from .models import CopyFileDriver
from .serializers import CopyFileDriverSerializer
class CopyFileDriverViewSet(viewsets.ModelViewSet):
queryset = CopyFileDriver.objects.all()
serializer_class = CopyFileDriverSerializer
@detail_route(methods=['GET'])
def test(self, request, pk=None):
# This is a test method in itself so has no unit test associated with it
fd = self.get_object()
int_dict = {
'project_identifier': 'PI12345',
'product_identifier': 'PRO12345',
'run_identifier': '12345-67890-12345-567-987',
}
for cpth in fd.locations.all():
print(cpth.copy_to_path(int_dict))
print(cpth.copy_from_path(int_dict))
return Response('')
|
1655043
|
import tensorflow as tf
from extra_layers.Clip import Clip
from extra_layers.OutputSplit import OutputSplit
from extra_layers.Padding import ReflectPadding2D
from extra_layers.BinaryOp import Div
from extra_layers.UnaryOp import Sqrt
from extra_layers.UnaryOp import Swish
from extra_layers.Resize import Interp
from extra_layers.UnaryOp import DropConnect
extra_custom_objects = {'relu6': tf.nn.relu6,
'OutputSplit': OutputSplit, 'Clip': Clip, 'ReflectPadding2D': ReflectPadding2D,
'Div': Div, 'Sqrt': Sqrt, 'Interp': Interp, 'Swish': Swish, 'DropConnect': DropConnect}
|
1655047
|
import os
import re
import nibabel as nib
import numpy as np
from glob import glob
from sys import argv
dirname=argv[1]
print(dirname)
### COLLECT ALL MNCs
all_fls = []
for dirs, things, fls in os.walk(dirname):
if len(fls) > 0:
for fl in fls:
all_fls.append(os.path.join(dirs,fl))
all_mncs = [x for x in all_fls if '.mnc' in x]
print('%s .mnc and .mnc.gz files found'%(len(all_mncs)))
### SEARCH TO SEE IF NIFTI VERSIONS ALREADY EXIST
already_done = []
for mnc in all_mncs:
print(mnc)
flnm = re.sub('.mnc.gz', '', re.sub('.mnc', '', mnc))
print(flnm)
ni = glob('%s.ni*'%flnm)
if len(ni) > 0:
already_done.append(mnc)
print('%s mncs already have a nifti version. Skipping these files...'%(len(already_done)))
[all_mncs.remove(x) for x in already_done]
print('the following files will be converted:')
[print(x) for x in all_mncs]
### TRANSFORM FILES
for mnc in all_mncs:
flnm = re.sub('.mnc', '', re.sub('.mnc.gz', '', mnc))
if mnc[-1] == 'z':
new_nm = '%s.nii.gz'%flnm
else:
new_nm = '%s.nii.gz'%flnm
print(new_nm)
img = nib.load(mnc)
data = img.get_data()
affine =img.affine
if len(data.shape) == 4 :
out = np.zeros( [ data.shape[1], data.shape[2], data.shape[3], data.shape[0] ] )
for t in range(data.shape[0]) :
out[:,:,:,t] = data[t,:,:,:]
else : out = data
nifti = nib.Nifti1Image(out, affine)
nifti.to_filename(new_nm)
print('converted %s to %s'%(mnc,new_nm))
#if ans:
# os.remove(mnc)
|
1655048
|
def could_be(original, another):
if not original.strip() or not another.strip():
return False
return set(another.split()).issubset(original.split())
|
1655049
|
import cv2
import numpy as np
image = cv2.imread('calvinHobbes.jpeg')
height, width = image.shape[:2]
quarter_height, quarter_width = height/4, width/4
T = np.float32([[1, 0, quarter_width], [0, 1, quarter_height]])
img_translation = cv2.warpAffine(image, T, (width, height))
cv2.imshow("Originalimage", image)
cv2.imshow('Translation', img_translation)
cv2.imwrite('Translation.jpg', img_translation)
cv2.waitKey()
cv2.destroyAllWindows()
# cv2.imshow("Nearest Neighbour", scaled)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# cv2.imshow("Bilinear", scaled)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# cv2.imshow("Bicubic", scaled)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
|
1655053
|
from fastai.basic_train import load_learner
import pandas as pd
from pydub import AudioSegment
from librosa import get_duration
from pathlib import Path
from numpy import floor
from audio.data import AudioConfig, SpectrogramConfig, AudioList
import os
import shutil
import tempfile
def load_model(mPath, mName="stg2-rn18.pkl"):
return load_learner(mPath, mName)
def get_wave_file(wav_file):
'''
Function to load a wav file
'''
return AudioSegment.from_wav(wav_file)
def export_wave_file(audio, begin, end, dest):
'''
Function to extract a smaller wav file based start and end duration information
'''
sub_audio = audio[begin * 1000:end * 1000]
sub_audio.export(dest, format="wav")
def extract_segments(audioPath, sampleDict, destnPath, suffix):
'''
Function to exctact segments given a audio path folder and proposal segments
'''
# Listing the local audio files
local_audio_files = str(audioPath) + '/'
for wav_file in sampleDict.keys():
audio_file = get_wave_file(local_audio_files + wav_file)
for begin_time, end_time in sampleDict[wav_file]:
output_file_name = wav_file.lower().replace(
'.wav', '') + '_' + str(begin_time) + '_' + str(
end_time) + suffix + '.wav'
output_file_path = destnPath + output_file_name
export_wave_file(audio_file, begin_time,
end_time, output_file_path)
class FastAIModel():
def __init__(self, model_path, model_name="stg2-rn18.pkl", threshold=0.5, min_num_positive_calls_threshold=3):
self.model = load_model(model_path, model_name)
self.threshold = threshold
self.min_num_positive_calls_threshold = min_num_positive_calls_threshold
def predict(self, wav_file_path):
'''
Function which generates local predictions using wavefile
'''
# Creates local directory to save 2 second clops
# local_dir = "./fastai_dir/"
local_dir = tempfile.mkdtemp()+"/"
if os.path.exists(local_dir):
shutil.rmtree(local_dir, ignore_errors=False, onerror=None)
os.makedirs(local_dir)
else:
os.makedirs(local_dir)
# infer clip length
max_length = get_duration(filename=wav_file_path)
print(os.path.basename(wav_file_path))
print("Length of Audio Clip:{0}".format(max_length))
#max_length = 60
# Generating 2 sec proposal with 1 sec hop length
twoSecList = []
for i in range(int(floor(max_length)-1)):
twoSecList.append([i, i+2])
# Creating a proposal dictionary
two_sec_dict = {}
two_sec_dict[Path(wav_file_path).name] = twoSecList
# Creating 2 sec segments from the defined wavefile using proposals built above.
# "use_a_real_wavname.wav" will generate -> "use_a_real_wavname_1_3.wav", "use_a_real_wavname_2_4.wav" etc. files in fastai_dir folder
extract_segments(
str(Path(wav_file_path).parent),
two_sec_dict,
local_dir,
""
)
# Definining Audio config needed to create on the fly mel spectograms
config = AudioConfig(standardize=False,
sg_cfg=SpectrogramConfig(
f_min=0.0, # Minimum frequency to Display
f_max=10000, # Maximum Frequency to Display
hop_length=256,
n_fft=2560, # Number of Samples for Fourier
n_mels=256, # Mel bins
pad=0,
to_db_scale=True, # Converting to DB sclae
top_db=100, # Top decible sound
win_length=None,
n_mfcc=20)
)
config.duration = 4000 # 4 sec padding or snip
config.resample_to = 20000 # Every sample at 20000 frequency
config.downmix=True
# Creating a Audio DataLoader
test_data_folder = Path(local_dir)
tfms = None
test = AudioList.from_folder(
test_data_folder, config=config).split_none().label_empty()
testdb = test.transform(tfms).databunch(bs=32)
# Scoring each 2 sec clip
predictions = []
pathList = list(pd.Series(test_data_folder.ls()).astype('str'))
for item in testdb.x:
predictions.append(self.model.predict(item)[2][1])
# clean folder
shutil.rmtree(local_dir)
# Aggregating predictions
# Creating a DataFrame
prediction = pd.DataFrame({'FilePath': pathList, 'confidence': predictions})
# Converting prediction to float
prediction['confidence'] = prediction.confidence.astype(float)
# Extracting Starting time from file name
prediction['start_time_s'] = prediction.FilePath.apply(lambda x: int(x.split('_')[-2]))
# Sorting the file based on start_time_s
prediction = prediction.sort_values(
['start_time_s']).reset_index(drop=True)
# Rolling Window (to average at per second level)
submission = pd.DataFrame(
{
'wav_filename': Path(wav_file_path).name,
'duration_s': 1.0,
'confidence': list(prediction.rolling(2)['confidence'].mean().values)
}
).reset_index().rename(columns={'index': 'start_time_s'})
# Updating first row
submission.loc[0, 'confidence'] = prediction.confidence[0]
# Adding lastrow
lastLine = pd.DataFrame({
'wav_filename': Path(wav_file_path).name,
'start_time_s': [submission.start_time_s.max()+1],
'duration_s': 1.0,
'confidence': [prediction.confidence[prediction.shape[0]-1]]
})
submission = submission.append(lastLine, ignore_index=True)
submission = submission[['wav_filename', 'start_time_s', 'duration_s', 'confidence']]
# initialize output JSON
result_json = {}
result_json = dict(
submission=submission,
local_predictions=list((submission['confidence'] > self.threshold).astype(int)),
local_confidences=list(submission['confidence'])
)
result_json['global_prediction'] = int(sum(result_json["local_predictions"]) > self.min_num_positive_calls_threshold)
result_json['global_confidence'] = submission.loc[(submission['confidence'] > self.threshold), 'confidence'].mean()*100
if pd.isnull(result_json["global_confidence"]):
result_json["global_confidence"] = 0
return result_json
|
1655061
|
import logging
import json
from datetime import datetime, timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from requests import (Request, Session)
_LOGGER = logging.getLogger(__name__)
from homeassistant.helpers.entity import Entity
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.util import Throttle
from homeassistant.const import (CONF_NAME)
CONF_STATION_ID = 'station_id'
DEFAULT_NAME = 'Cracow Air Quality'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_STATION_ID): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string
})
class AirQualityData(object):
def __init__(self, stationId):
self.stationId = stationId
self.table = {}
self.update = Throttle(timedelta(minutes=30))(self._update)
def _update(self):
try:
data_request = Request(
"POST",
"http://monitoring.krakow.pios.gov.pl/dane-pomiarowe/pobierz",
data=self._body()
).prepare()
_LOGGER.info("Downloading sensor inforamtion")
with Session() as sess:
response = sess.send(data_request, timeout=10)
data = json.loads(response.text)['data']
temp_table = {}
for metric in data['series']:
temp_table[metric['paramId']] = metric
self.table = temp_table
except Exception as e:
_LOGGER.error(e)
def get(self, key):
return self.table.get(key, {})
def _body(self):
return {
'query': json.dumps({
'measType': 'Auto',
'viewType': 'Station',
'viewTypeEntityId': self.stationId,
'channels': [49,54,61,57,211,53,50,55],
'dateRange': 'Day',
'date': datetime.now().strftime("%d.%m.%Y")
})
}
class AirCracowQualitySensor(Entity):
def __init__(self, data, metric_type, name):
self.metric_type = metric_type
self.data = data
self._friendly_name = self.get('paramLabel')
self._name = "{}_{}".format(name,metric_type)
self._state = 0
self._unit = None
@property
def friendly_name(self):
return self._friendly_name
@property
def name(self):
return self._name
@property
def state(self):
return self._state
@property
def unit_of_measurement(self):
return self._unit
@property
def icon(self):
return "mdi:skull"
def get(self, key):
return self.data.get(self.metric_type).get(key)
def update(self):
try:
self.data.update()
self._unit = self.get('unit')
self._state = self.get('data')[-1][1]
except Exception as e:
_LOGGER.error("Could not update sensor!")
_LOGGER.error(e)
def setup_platform(hass, config, add_devices, discovery_info=None):
stationId = config.get(CONF_STATION_ID)
name = config.get(CONF_NAME)
data = AirQualityData(stationId)
data.update()
sensors = []
for metric_type in data.table:
_LOGGER.info("Adding sensor: {}".format(metric_type))
sensors.append(AirCracowQualitySensor(data, metric_type, name))
add_devices(sensors, True)
|
1655086
|
import jsonschema
import pytest
from barrage import config, defaults as d
def test_merge_defaults_dataset():
cfg = {}
result = config._merge_defaults(cfg.copy())
assert isinstance(result, dict)
assert result["dataset"]["transformer"] == d.TRANSFORMER
assert result["dataset"]["augmentor"] == d.AUGMENTOR
cfg = {
"dataset": {
"transformer": {"import": "IdentityTransformer"},
"augmentor": {"import": "unit-test", "params": {"unit": "test"}},
}
}
result = config._merge_defaults(cfg.copy())
assert result["dataset"] == cfg["dataset"]
cfg = {"dataset": []}
with pytest.raises(jsonschema.ValidationError):
config._merge_defaults(cfg)
def test_merge_defaults_solver():
cfg = {}
result = config._merge_defaults(cfg.copy())
assert isinstance(result, dict)
assert result["solver"]["batch_size"] == d.BATCH_SIZE
assert result["solver"]["epochs"] == d.EPOCHS
assert result["solver"]["optimizer"] == d.OPTIMIZER
cfg = {"solver": {"batch_size": 42, "epochs": 7, "optimizer": {"import": "SGD"}}}
result = config._merge_defaults(cfg.copy())
assert result["solver"] == cfg["solver"]
cfg = {"solver": []}
with pytest.raises(jsonschema.ValidationError):
config._merge_defaults(cfg)
def test_merge_defaults_services():
cfg = {}
result = config._merge_defaults(cfg.copy())
assert isinstance(result, dict)
assert result["services"]["best_checkpoint"] == d.BEST_CHECKPOINT
assert result["services"]["tensorboard"] == d.TENSORBOARD
assert result["services"]["train_early_stopping"] == d.TRAIN_EARLY_STOPPING
assert (
result["services"]["validation_early_stopping"] == d.VALIDATION_EARLY_STOPPING
)
cfg = {
"services": {
"best_checkpoint": {"monitor": "val_unit_test", "mode": "min"},
"tensorboard": {"batch_size": 42},
"train_early_stopping": {
"monitor": "unit_test",
"mode": "min",
"min_delta": 1e-7,
"patience": 42,
},
"validation_early_stopping": {
"monitor": "val_unit_test",
"mode": "max",
"min_delta": 1e-7,
"patience": 42,
},
}
}
result = config._merge_defaults(cfg.copy())
assert result["services"] == cfg["services"]
cfg = {"services": []}
with pytest.raises(jsonschema.ValidationError):
config._merge_defaults(cfg)
@pytest.fixture
def base_cfg():
return {
"dataset": {
"loader": {"import": "my_loader", "params": {"unit": "test"}},
"transformer": {"import": "my_tr", "params": {"unit": "test"}},
"augmentor": [
{"import": "my_aug_1", "params": {"unit": "test"}},
{"import": "my_aug_2"},
],
},
"model": {
"network": {"import": "my_net", "params": {"unit": "test"}},
"outputs": [
{
"name": "classification",
"loss": {"import": "crossentropy", "params": {"name": "ce"}},
"loss_weight": 1,
},
{
"name": "regression",
"loss": {"import": "mse"},
"loss_weight": 1,
"sample_weight_mode": "temporal",
"metrics": [{"import": "mse"}, {"import": "mae"}],
},
],
},
"solver": {
"optimizer": {
"import": "Adam",
"learning_rate": 1e-3,
"params": {"beta1": 0.9},
},
"learning_rate_reducer": {
"monitor": "val_loss",
"mode": "min",
"patience": 5,
"factor": 0.07,
},
"batch_size": 32,
"epochs": 10,
},
"services": {
"best_checkpoint": {"monitor": "val_loss", "mode": "min"},
"tensorboard": {},
"train_early_stopping": {
"monitor": "loss",
"mode": "min",
"patience": 2,
"min_delta": 1e-2,
"verbose": 0,
},
"validation_early_stopping": {
"monitor": "val_loss",
"mode": "min",
"patience": 2,
"min_delta": 1e-2,
},
},
}
def _test_validate_schema_pass_fail(cfg, result):
"""Helper function to test if config should pass or fail."""
if result:
config._validate_schema(cfg)
else:
with pytest.raises(jsonschema.ValidationError):
config._validate_schema(cfg)
def test_base_cfg(base_cfg):
cfg_1 = base_cfg.copy()
_test_validate_schema_pass_fail(base_cfg, True)
cfg_2 = base_cfg.copy()
assert cfg_1 == cfg_2
@pytest.mark.parametrize("section", ["dataset", "model", "solver", "services"])
def test_validate_schema_missing_sections(base_cfg, section):
del base_cfg[section]
_test_validate_schema_pass_fail(base_cfg, False)
def test_validate_schema_extra_sections(base_cfg):
base_cfg["unit"] = "test"
_test_validate_schema_pass_fail(base_cfg, False)
def set_nested(d, lst, val):
from functools import reduce
import operator
loc = reduce(operator.getitem, lst[:-1], d)
if val is not None:
loc[lst[-1]] = val
else:
del loc[lst[-1]]
return d
@pytest.mark.parametrize(
"lst,val,result",
[
(["loader"], "my_loader", False),
(["loader"], None, False),
(["loader", "params"], None, True),
(["loader", "import"], None, False),
(["loader"], {"import": "my_loader", "params": {"unit": "test"}}, True),
(["transformer"], "my_tr", False),
(["transformer"], None, False),
(["transformer", "params"], None, True),
(["transformer", "import"], None, False),
(["transformer"], {"import": "my_tr", "params": {"unit": "test"}}, True),
(["augmentor"], [], True),
(["augmentor"], False, False),
(["augmentor"], [{"import": "my_aug"}], True),
(["augmentor"], [{"import": "my_aug1"}, {"import": "my_aug2"}], True),
(["augmentor"], [{"import": "my_aug", "params": {"unit": "test"}}], True),
(["sample_count"], "sample_count", True),
(["sample_count"], 1, False),
(["seed"], "one", False),
(["seed"], 32, True),
(["seed"], -1, False),
(["extra"], False, False),
(["extra"], [], False),
],
)
def test_validate_schema_dataset(base_cfg, lst, val, result):
base_cfg["dataset"] = set_nested(base_cfg["dataset"], lst, val)
_test_validate_schema_pass_fail(base_cfg, result)
@pytest.mark.parametrize(
"lst,val,result",
[
(["network"], "my_net", False),
(["network"], None, False),
(["network", "params"], None, True),
(["network", "import"], None, False),
(["network"], {"import": "my_net", "params": {"unit": "test"}}, True),
(["outputs"], "classification", False),
(["outputs"], [], False),
(["outputs"], None, False),
(["outputs"], [{"name": 42, "loss": {"import": "mse"}}], False),
(["outputs"], [{"loss": {"import": "mse"}}], False),
(["outputs"], [{"name": "test", "loss": "mse"}], False),
(["outputs"], [{"name": "test", "loss": {"import": "mse"}}], True),
(["outputs", 0, "loss_weight"], -1, False),
(["outputs", 0, "loss_weight"], "one", False),
(["outputs", 0, "loss_weight"], 7, True),
(["outputs", 0, "sample_weight_mode"], -1, False),
(["outputs", 0, "sample_weight_mode"], "temporal", True),
(["outputs", 0, "sample_weight_mode"], 7, False),
(["outputs", 0, "metrics"], ["mse", "mse"], False),
(["outputs", 0, "metrics"], ["mse", {"import": "mse"}], False),
(["outputs", 0, "name"], None, False),
(["outputs", 0, "name"], 42, False),
(["outputs", 0, "name"], "regression", False),
(["outputs", 1, "name"], "classification", False),
(["outputs", 0, "loss_weight"], None, False),
(["outputs", 1, "loss_weight"], None, False),
],
)
def test_validate_schema_model(base_cfg, lst, val, result):
base_cfg["model"] = set_nested(base_cfg["model"], lst, val)
_test_validate_schema_pass_fail(base_cfg, result)
@pytest.mark.parametrize(
"lst,val,result",
[
(["batch_size"], [32, 64, 128], False),
(["batch_size"], "32", False),
(["batch_size"], 7.13, False),
(["batch_size"], -1, False),
(["batch_size"], 32, True),
(["batch_size"], 64, True),
(["batch_size"], 13, True),
(["batch_size"], 67, True),
(["epochs"], [32, 64, 128], False),
(["epochs"], "32", False),
(["epochs"], 7.13, False),
(["epochs"], -1, False),
(["epochs"], 32, True),
(["epochs"], 64, True),
(["epochs"], 13, True),
(["epochs"], 67, True),
(["steps"], [32, 64, 128], False),
(["steps"], "32", False),
(["steps"], 7.13, False),
(["steps"], -1, False),
(["steps"], 32, True),
(["steps"], 64, True),
(["steps"], 13, True),
(["steps"], 67, True),
(["optimizer"], {"import": "Adam"}, False),
(["optimizer"], {"import": "Adam", "lr": 1e-5}, False),
(["optimizer"], {"import": "Adam", "learning_rate": 0}, False),
(["optimizer", "learning_rate"], {"import": "decay"}, True),
(["optimizer", "learning_rate"], {"import": "decay", "params": {"a": 1}}, True),
(["optimizer", "learning_rate"], {"import": "decay", "a": 1}, False),
(["optimizer", "learning_rate"], {"params": {"a": 1}}, False),
(["optimizer", "beta"], 0.9, False),
(["optimizer"], {"import": "RMSProp", "learning_rate": 1e-1}, True),
(["learning_rate_reducer", "monitor"], None, False),
(["learning_rate_reducer", "monitor"], 42, False),
(["learning_rate_reducer", "monitor"], "loss", True),
(["learning_rate_reducer", "mode"], None, False),
(["learning_rate_reducer", "mode"], 42, False),
(["learning_rate_reducer", "mode"], "auto", False),
(["learning_rate_reducer", "mode"], "min", True),
(["learning_rate_reducer", "mode"], "max", True),
(["learning_rate_reducer", "patience"], None, False),
(["learning_rate_reducer", "patience"], "zero", False),
(["learning_rate_reducer", "patience"], -1, False),
(["learning_rate_reducer", "patience"], 1.5, False),
(["learning_rate_reducer", "patience"], 7, True),
(["learning_rate_reducer", "factor"], None, False),
(["learning_rate_reducer", "factor"], "zero", False),
(["learning_rate_reducer", "factor"], -1, False),
(["learning_rate_reducer", "factor"], 0.07, True),
(["learning_rate_reducer", "cooldown"], 5, True),
(["extra_param"], 42, False),
(["extra_param"], "7", False),
],
)
def test_validate_schema_solver(base_cfg, lst, val, result):
base_cfg["solver"] = set_nested(base_cfg["solver"], lst, val)
_test_validate_schema_pass_fail(base_cfg, result)
@pytest.mark.parametrize(
"lst,val,result",
[
(["best_checkpoint"], [], False),
(["best_checkpoint", "mode"], None, False),
(["best_checkpoint", "mode"], 42, False),
(["best_checkpoint", "monitor"], None, False),
(["best_checkpoint", "monitor"], 42, False),
(["best_checkpoint", "extra"], "param", False),
(["best_checkpoint"], {"monitor": "val_acc", "mode": "max"}, True),
(["best_checkpoint"], {"monitor": "val_regret", "mode": "min"}, True),
(["tensorboard"], [], False),
(["tensorboard"], {"batch_size": 42}, True),
(["train_early_stopping", "monitor"], None, False),
(["train_early_stopping", "monitor"], 42, False),
(["train_early_stopping", "monitor"], "loss", True),
(["train_early_stopping", "mode"], None, False),
(["train_early_stopping", "mode"], 42, False),
(["train_early_stopping", "mode"], "auto", False),
(["train_early_stopping", "mode"], "min", True),
(["train_early_stopping", "mode"], "max", True),
(["train_early_stopping", "patience"], None, False),
(["train_early_stopping", "patience"], "zero", False),
(["train_early_stopping", "patience"], -1, False),
(["train_early_stopping", "patience"], 1.5, False),
(["train_early_stopping", "patience"], 7, True),
(["train_early_stopping", "min_delta"], None, False),
(["train_early_stopping", "min_delta"], "zero", False),
(["train_early_stopping", "min_delta"], -1, False),
(["train_early_stopping", "min-delta"], 0.07, True),
(["validation_early_stopping", "monitor"], None, False),
(["validation_early_stopping", "monitor"], 42, False),
(["validation_early_stopping", "monitor"], "loss", True),
(["validation_early_stopping", "mode"], None, False),
(["validation_early_stopping", "mode"], 42, False),
(["validation_early_stopping", "mode"], "auto", False),
(["validation_early_stopping", "mode"], "min", True),
(["validation_early_stopping", "mode"], "max", True),
(["validation_early_stopping", "patience"], None, False),
(["validation_early_stopping", "patience"], "zero", False),
(["validation_early_stopping", "patience"], -1, False),
(["validation_early_stopping", "patience"], 1.5, False),
(["validation_early_stopping", "patience"], 7, True),
(["validation_early_stopping", "min_delta"], None, False),
(["validation_early_stopping", "min_delta"], "zero", False),
(["validation_early_stopping", "min_delta"], -1, False),
(["validation_early_stopping", "min-delta"], 0.07, True),
],
)
def test_validate_schema_services(base_cfg, lst, val, result):
base_cfg["services"] = set_nested(base_cfg["services"], lst, val)
_test_validate_schema_pass_fail(base_cfg, result)
def test_render_params():
cfg = {
"hello": "{{param_1}}",
"fizz": {"buzz": "{{param_2}}", "life": "{{param_3}}"},
"count": "{{param_4}}",
"unit": "{{param_5}}",
}
params = {
"param_1": "world",
"param_2": "fizzbuzz",
"param_3": 42,
"param_4": [1, 2, 3],
"extra": 7,
}
expected = {
"hello": "world",
"fizz": {"buzz": "fizzbuzz", "life": 42},
"unit": "{{param_5}}",
"count": [1, 2, 3],
}
assert config._render_params(cfg, params) == expected
cfg = {"hello": "world"}
params = {"param_1": "world", "param_2": "fizzbuzz", "param_3": 42}
expected = {"hello": "world"}
assert config._render_params(cfg, params) == expected
|
1655125
|
from django.contrib.auth.models import User
from restless.dj import DjangoResource
from restless.preparers import FieldsPreparer
from posts.models import Post
class PostResource(DjangoResource):
preparer = FieldsPreparer(fields={
'id': 'id',
'title': 'title',
'author': 'user.username',
'body': 'content',
'posted_on': 'posted_on',
})
def list(self):
return Post.objects.all()
def detail(self, pk):
return Post.objects.get(id=pk)
def create(self):
return Post.objects.create(
title=self.data['title'],
user=User.objects.get(username=self.data['author']),
content=self.data['body']
)
def update(self, pk):
try:
post = Post.objects.get(id=pk)
except Post.DoesNotExist:
post = Post()
post.title = self.data['title']
post.user = User.objects.get(username=self.data['author'])
post.content = self.data['body']
post.save()
return post
def delete(self, pk):
Post.objects.get(id=pk).delete()
|
1655130
|
from .keys import (BadSignatureError, BadPrefixError,
create_keypair, SigningKey, VerifyingKey,
remove_prefix, to_ascii, from_ascii)
(BadSignatureError, BadPrefixError,
create_keypair, SigningKey, VerifyingKey,
remove_prefix, to_ascii, from_ascii) # hush pyflakes
from ._version import get_versions
__version__ = str(get_versions()['version'])
del get_versions
|
1655131
|
from typing import TypeVar, List, Callable # noqa: F401
from functools import reduce
from operator import add
T = TypeVar('T')
S = TypeVar('S')
def flatten(l):
# type: (List[List[T]]) -> List[T]
return reduce(add, l, [])
def flat_map(f, l):
# type: (Callable[[S], List[T]], List[S]) -> List[T]
return flatten([f(x) for x in l])
|
1655135
|
from math import ceil
from typing import Optional
from typing import Union
import torch
import torch.nn.functional as fn
from torch import Tensor
from torch.distributions.normal import Normal
from torch.distributions.utils import broadcast_all
from pfhedge._utils.typing import TensorOrScalar
def european_payoff(input: Tensor, call: bool = True, strike: float = 1.0) -> Tensor:
"""Returns the payoff of a European option.
Args:
input (torch.Tensor): The input tensor representing the price trajectory.
call (bool, default=True): Specifies whether the option is call or put.
strike (float, default=1.0): The strike price of the option.
Shape:
- input: :math:`(*, T)` where
:math:`T` is the number of time steps and
:math:`*` means any number of additional dimensions.
- output: :math:`(*)`
Returns:
torch.Tensor
"""
if call:
return fn.relu(input[..., -1] - strike)
else:
return fn.relu(strike - input[..., -1])
def lookback_payoff(input: Tensor, call: bool = True, strike: float = 1.0) -> Tensor:
"""Returns the payoff of a lookback option with a fixed strike.
Args:
input (torch.Tensor): The input tensor representing the price trajectory.
call (bool, default=True): Specifies whether the option is call or put.
strike (float, default=1.0): The strike price of the option.
Shape:
- input: :math:`(*, T)` where
:math:`T` is the number of time steps and
:math:`*` means any number of additional dimensions.
- output: :math:`(*)`
Returns:
torch.Tensor
"""
if call:
return fn.relu(input.max(dim=-1).values - strike)
else:
return fn.relu(strike - input.min(dim=-1).values)
def american_binary_payoff(
input: Tensor, call: bool = True, strike: float = 1.0
) -> Tensor:
"""Returns the payoff of an American binary option.
Args:
input (torch.Tensor): The input tensor representing the price trajectory.
call (bool, default=True): Specifies whether the option is call or put.
strike (float, default=1.0): The strike price of the option.
Shape:
- input: :math:`(*, T)` where
:math:`T` is the number of time steps and
:math:`*` means any number of additional dimensions.
- output: :math:`(*)`
Returns:
torch.Tensor
"""
if call:
return (input.max(dim=-1).values >= strike).to(input)
else:
return (input.min(dim=-1).values <= strike).to(input)
def european_binary_payoff(
input: Tensor, call: bool = True, strike: float = 1.0
) -> Tensor:
"""Returns the payoff of a European binary option.
Args:
input (torch.Tensor): The input tensor representing the price trajectory.
call (bool, default=True): Specifies whether the option is call or put.
strike (float, default=1.0): The strike price of the option.
Shape:
- input: :math:`(*, T)` where
:math:`T` is the number of time steps and
:math:`*` means any number of additional dimensions.
- output: :math:`(*)`
Returns:
torch.Tensor
"""
if call:
return (input[..., -1] >= strike).to(input)
else:
return (input[..., -1] <= strike).to(input)
def exp_utility(input: Tensor, a: float = 1.0) -> Tensor:
r"""Applies an exponential utility function.
An exponential utility function is defined as:
.. math::
u(x) = -\exp(-a x) \,.
Args:
input (torch.Tensor): The input tensor.
a (float, default=1.0): The risk aversion coefficient of the exponential
utility.
Returns:
torch.Tensor
"""
return -(-a * input).exp()
def isoelastic_utility(input: Tensor, a: float) -> Tensor:
r"""Applies an isoelastic utility function.
An isoelastic utility function is defined as:
.. math::
u(x) = \begin{cases}
x^{1 - a} & a \neq 1 \\
\log{x} & a = 1
\end{cases} \,.
Args:
input (torch.Tensor): The input tensor.
a (float): Relative risk aversion coefficient of the isoelastic
utility.
Returns:
torch.Tensor
"""
if a == 1.0:
return input.log()
else:
return input.pow(1.0 - a)
def entropic_risk_measure(input: Tensor, a: float = 1.0) -> Tensor:
"""Returns the entropic risk measure.
See :class:`pfhedge.nn.EntropicRiskMeasure` for details.
"""
return (-exp_utility(input, a=a).mean(0)).log() / a
def topp(input: Tensor, p: float, dim: Optional[int] = None, largest: bool = True):
"""Returns the largest :math:`p * N` elements of the given input tensor,
where :math:`N` stands for the total number of elements in the input tensor.
If ``dim`` is not given, the last dimension of the ``input`` is chosen.
If ``largest`` is ``False`` then the smallest elements are returned.
A namedtuple of ``(values, indices)`` is returned, where the ``indices``
are the indices of the elements in the original ``input`` tensor.
Args:
input (torch.Tensor): The input tensor.
p (float): The quantile level.
dim (int, optional): The dimension to sort along.
largest (bool, default=True): Controls whether to return largest or smallest
elements.
Returns:
torch.Tensor
Examples:
>>> from pfhedge.nn.functional import topp
>>>
>>> input = torch.arange(1.0, 6.0)
>>> input
tensor([1., 2., 3., 4., 5.])
>>> topp(input, 3 / 5)
torch.return_types.topk(
values=tensor([5., 4., 3.]),
indices=tensor([4, 3, 2]))
"""
if dim is None:
return input.topk(ceil(p * input.numel()), largest=largest)
else:
return input.topk(ceil(p * input.size(dim)), dim=dim, largest=largest)
def expected_shortfall(input: Tensor, p: float, dim: Optional[int] = None) -> Tensor:
"""Returns the expected shortfall of the given input tensor.
Args:
input (torch.Tensor): The input tensor.
p (float): The quantile level.
dim (int, optional): The dimension to sort along.
Returns:
torch.Tensor
Examples:
>>> from pfhedge.nn.functional import expected_shortfall
>>>
>>> input = -torch.arange(10.0)
>>> input
tensor([-0., -1., -2., -3., -4., -5., -6., -7., -8., -9.])
>>> expected_shortfall(input, 0.3)
tensor(8.)
"""
if dim is None:
return -topp(input, p=p, largest=False).values.mean()
else:
return -topp(input, p=p, largest=False, dim=dim).values.mean(dim=dim)
def _min_values(input: Tensor, dim: Optional[int] = None) -> Tensor:
return input.min() if dim is None else input.min(dim=dim).values
def _max_values(input: Tensor, dim: Optional[int] = None) -> Tensor:
return input.max() if dim is None else input.max(dim=dim).values
def value_at_risk(input: Tensor, p: float, dim: Optional[int] = None) -> Tensor:
"""Returns the value at risk of the given input tensor.
Note:
If :math:`p \leq 1 / N`` with :math:`N` being the number of elements to sort,
returns the smallest element in the tensor.
If :math:`p > 1 - 1 / N``, returns the largest element in the tensor.
Args:
input (torch.Tensor): The input tensor.
p (float): The quantile level.
dim (int, optional): The dimension to sort along.
Returns:
torch.Tensor
Examples:
>>> from pfhedge.nn.functional import value_at_risk
>>>
>>> input = -torch.arange(10.0)
>>> input
tensor([-0., -1., -2., -3., -4., -5., -6., -7., -8., -9.])
>>> value_at_risk(input, 0.3)
tensor(-7.)
"""
n = input.numel() if dim is None else input.size(dim)
if p <= 1 / n:
output = _min_values(input, dim=dim)
elif p > 1 - 1 / n:
output = _max_values(input, dim=dim)
else:
q = (p - (1 / n)) / (1 - (1 / n))
output = input.quantile(q, dim=dim)
return output
def leaky_clamp(
input: Tensor,
min: Optional[Tensor] = None,
max: Optional[Tensor] = None,
clamped_slope: float = 0.01,
inverted_output: str = "mean",
) -> Tensor:
r"""Leakily clamp all elements in ``input`` into the range :math:`[\min, \max]`.
See :class:`pfhedge.nn.LeakyClamp` for details.
"""
x = input
if min is not None:
min = torch.as_tensor(min).to(x)
x = x.maximum(min + clamped_slope * (x - min))
if max is not None:
max = torch.as_tensor(max).to(x)
x = x.minimum(max + clamped_slope * (x - max))
if min is not None and max is not None:
if inverted_output == "mean":
y = (min + max) / 2
elif inverted_output == "max":
y = max
else:
raise ValueError("inverted_output must be 'mean' or 'max'.")
x = x.where(min <= max, y)
return x
def clamp(
input: Tensor,
min: Optional[Tensor] = None,
max: Optional[Tensor] = None,
inverted_output: str = "mean",
) -> Tensor:
r"""Clamp all elements in ``input`` into the range :math:`[\min, \max]`.
See :class:`pfhedge.nn.Clamp` for details.
"""
if inverted_output == "mean":
output = leaky_clamp(input, min, max, clamped_slope=0.0, inverted_output="mean")
elif inverted_output == "max":
output = torch.clamp(input, min, max)
else:
raise ValueError("inverted_output must be 'mean' or 'max'.")
return output
def realized_variance(input: Tensor, dt: TensorOrScalar) -> Tensor:
r"""Returns the realized variance of the price.
Realized variance :math:`\sigma^2` of the stock price :math:`S` is defined as:
.. math::
\sigma^2 = \frac{1}{T - 1} \sum_{i = 1}^{T - 1}
\frac{1}{dt} \log(S_{i + 1} / S_i)^2
where :math:`T` is the number of time steps.
Note:
The mean of log return is assumed to be zero.
Args:
input (torch.Tensor): The input tensor.
dt (torch.Tensor or float): The intervals of the time steps.
Shape:
- input: :math:`(*, T)` where
:math:`T` stands for the number of time steps and
:math:`*` means any number of additional dimensions.
- output: :math:`(*)`
Returns:
torch.Tensor
"""
return input.log().diff(dim=-1).square().mean(dim=-1) / dt
def realized_volatility(input: Tensor, dt: Union[Tensor, float]) -> Tensor:
"""Returns the realized volatility of the price.
It is square root of :func:`realized_variance`.
Args:
input (torch.Tensor): The input tensor.
dt (torch.Tensor or float): The intervals of the time steps.
Shape:
- input: :math:`(*, T)` where
:math:`T` stands for the number of time steps and
:math:`*` means any number of additional dimensions.
- output: :math:`(*)`
Returns:
torch.Tensor
"""
return realized_variance(input, dt=dt).sqrt()
def terminal_value(
spot: Tensor,
unit: Tensor,
cost: float = 0.0,
payoff: Optional[Tensor] = None,
deduct_first_cost: bool = True,
) -> Tensor:
r"""Returns the terminal portfolio value.
The terminal value of a hedger's portfolio is given by
.. math::
\text{PL}(Z, \delta, S) =
- Z
+ \sum_{i = 0}^{T - 2} \delta_{i - 1} (S_{i} - S_{i - 1})
- c \sum_{i = 0}^{T - 1} |\delta_{i} - \delta_{i - 1}| S_{i}
where :math:`Z` is the payoff of the derivative, :math:`T` is the number of
time steps, :math:`S` is the spot price, :math:`\delta` is the signed number
of shares held at each time step.
We define :math:`\delta_0 = 0` for notational convenience.
A hedger sells the derivative to its customer and
obliges to settle the payoff at maturity.
The dealer hedges the risk of this liability
by trading the underlying instrument of the derivative.
The resulting profit and loss is obtained by adding up the payoff to the
customer, capital gains from the underlying asset, and the transaction cost.
References:
- <NAME>., <NAME>., <NAME>. and <NAME>., 2019.
Deep hedging. Quantitative Finance, 19(8), pp.1271-1291.
[arXiv:`1802.03042 <https://arxiv.org/abs/1802.03042>`_ [q-fin]]
Args:
spot (torch.Tensor): The spot price of the underlying asset :math:`S`.
unit (torch.Tensor): The signed number of shares of the underlying asset
:math:`\delta`.
cost (float, default=0.0): The proportional transaction cost rate of
the underlying asset :math:`c`.
payoff (torch.Tensor, optional): The payoff of the derivative :math:`Z`.
deduct_first_cost (bool, default=True): Whether to deduct the transaction
cost of the stock at the first time step.
If ``False``, :math:`- c |\delta_0| S_1` is omitted the above
equation of the terminal value.
Shape:
- spot: :math:`(N, *, T)` where
:math:`T` is the number of time steps and
:math:`*` means any number of additional dimensions.
- unit: :math:`(N, *, T)`
- payoff: :math:`(N, *)`
- output: :math:`(N, *)`.
Returns:
torch.Tensor
"""
if spot.size() != unit.size():
raise RuntimeError(f"unmatched sizes: spot {spot.size()}, unit {unit.size()}")
if payoff is not None and spot.size()[:-1] != payoff.size():
raise RuntimeError(
f"unmatched sizes: spot {spot.size()}, payoff {payoff.size()}"
)
value = unit[..., :-1].mul(spot.diff(dim=-1)).sum(-1)
value += -cost * unit.diff(dim=-1).abs().mul(spot[..., 1:]).sum(-1)
if payoff is not None:
value -= payoff
if deduct_first_cost:
value -= cost * unit[..., 0].abs() * spot[..., 0]
return value
def ncdf(input: Tensor) -> Tensor:
r"""Returns a new tensor with the normal cumulative distribution function.
.. math::
\text{ncdf}(x) =
\int_{-\infty}^x
\frac{1}{\sqrt{2 \pi}} e^{-\frac{y^2}{2}} dy
Args:
input (torch.Tensor): The input tensor.
Returns:
torch.Tensor
Examples:
>>> from pfhedge.nn.functional import ncdf
>>>
>>> input = torch.tensor([-1.0, 0.0, 10.0])
>>> ncdf(input)
tensor([0.1587, 0.5000, 1.0000])
"""
return Normal(0.0, 1.0).cdf(input)
def npdf(input: Tensor) -> Tensor:
r"""Returns a new tensor with the normal distribution function.
.. math::
\text{npdf}(x) = \frac{1}{\sqrt{2 \pi}} e^{-\frac{x^2}{2}}
Args:
input (torch.Tensor): The input tensor.
Returns:
torch.Tensor
Examples:
>>> from pfhedge.nn.functional import npdf
>>>
>>> input = torch.tensor([-1.0, 0.0, 10.0])
>>> npdf(input)
tensor([2.4197e-01, 3.9894e-01, 7.6946e-23])
"""
return Normal(0.0, 1.0).log_prob(input).exp()
def d1(log_moneyness: Tensor, time_to_maturity: Tensor, volatility: Tensor) -> Tensor:
r"""Returns :math:`d_1` in the Black-Scholes formula.
.. math::
d_1 = \frac{s + \frac12 \sigma^2 t}{\sigma \sqrt{t}}
where
:math:`s` is the log moneyness,
:math:`t` is the time to maturity, and
:math:`\sigma` is the volatility.
Note:
Risk-free rate is set to zero.
Args:
log_moneyness (torch.Tensor or float): Log moneyness of the underlying asset.
time_to_maturity (torch.Tensor or float): Time to maturity of the derivative.
volatility (torch.Tensor or float): Volatility of the underlying asset.
Returns:
torch.Tensor
"""
s, t, v = broadcast_all(log_moneyness, time_to_maturity, volatility)
return (s + (v.square() / 2) * t).div(v * t.sqrt())
def d2(log_moneyness: Tensor, time_to_maturity: Tensor, volatility: Tensor) -> Tensor:
r"""Returns :math:`d_2` in the Black-Scholes formula.
.. math::
d_2 = \frac{s - \frac12 \sigma^2 t}{\sigma \sqrt{t}}
where
:math:`s` is the log moneyness,
:math:`t` is the time to maturity, and
:math:`\sigma` is the volatility.
Note:
Risk-free rate is set to zero.
Args:
log_moneyness (torch.Tensor or float): Log moneyness of the underlying asset.
time_to_maturity (torch.Tensor or float): Time to maturity of the derivative.
volatility (torch.Tensor or float): Volatility of the underlying asset.
Returns:
torch.Tensor
"""
s, t, v = broadcast_all(log_moneyness, time_to_maturity, volatility)
return (s - (v.square() / 2) * t).div(v * t.sqrt())
def ww_width(
gamma: Tensor, spot: Tensor, cost: TensorOrScalar, a: TensorOrScalar = 1.0
) -> Tensor:
r"""Returns half-width of the no-transaction band for
Whalley-Wilmott's hedging strategy.
See :class:`pfhedge.nn.WhalleyWilmott` for details.
Args:
gamma (torch.Tensor): The gamma of the derivative,
spot (torch.Tensor): The spot price of the underlier.
cost (torch.Tensor or float): The cost rate of the underlier.
a (torch.Tensor or float, default=1.0): Risk aversion parameter in exponential utility.
Returns:
torch.Tensor
"""
return (cost * (3 / 2) * gamma.square() * spot / a).pow(1 / 3)
|
1655153
|
from __future__ import division, print_function
__author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2019 Cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
from flask import Flask, request, jsonify
# hack to get Flask RestPlus to work!
import werkzeug
werkzeug.cached_property = werkzeug.utils.cached_property
from flask_restplus import Resource, Api
import json
from plotly.utils import PlotlyJSONEncoder
from tcapy.analysis.tcaengine import TCAEngineImpl
from tcapy.analysis.tcarequest import TCARequest
from tcapy.util.fxconv import FXConv
from tcapy.util.loggermanager import LoggerManager
from tcapy.analysis.algos.metric import *
from tcapy.analysis.algos.metric import *
from tcapy.analysis.algos.resultsform import *
from tcapy.data.databasesource import DatabaseSourceCSVBinary
from collections import OrderedDict
tca_engine = TCAEngineImpl()
application = Flask(__name__)
api = Api(application,
version='0.1',
title='tcapy API',
description='This is the API for tcapy',
)
@api.route('/tca_computation')
class TCAComputation(Resource):
def get(self):
logger = LoggerManager.getLogger(__name__)
if request.content_type == 'application/json':
json_input = request.json
if 'trade_df' in json_input.keys() and 'username' in json_input.keys() and 'password' in json_input.keys():
username = json_input['username']
# TODO check passwords
password = json_input['password']
logger.info("Received API request from user: " + username)
trade_df = json_input['trade_df']
# assume that the user uploaded a binary CSV file/JSON
trade_df = DatabaseSourceCSVBinary(trade_data_database_csv=trade_df).fetch_trade_order_data()
data_frame_trade_order_mapping = OrderedDict([('trade_df', trade_df)])
start_date = trade_df.index[0];
finish_date = trade_df.index[-1]
ticker_val = FXConv().correct_unique_notation_list(trade_df['ticker'].unique().tolist())
if 'ticker' in json_input.keys():
ticker_val = list(set(ticker_val).intersection(set(json_input['ticker'])))
for t in ticker_val:
if t not in constants.available_tickers_dictionary['All']:
ticker_val.remove(t)
metric_val = 'slippage'
results_form = [
# show the distribution of the selected metric for trades weighted by notional
# aggregated by ticker and then by venue
DistResultsForm(market_trade_order_list=['trade_df'], metric_name=metric_val,
aggregate_by_field=['ticker', 'broker_id', 'venue'],
weighting_field='executed_notional_in_reporting_currency'),
# display the timeline of metrics average by day (and weighted by notional)
TimelineResultsForm(market_trade_order_list=['trade_df'], by_date='date',
metric_name=metric_val,
aggregation_metric='mean',
aggregate_by_field=['ticker'], scalar=10000.0,
weighting_field='executed_notional_in_reporting_currency'),
# display a bar chart showing the average metric weighted by notional and aggregated by ticker
# venue
BarResultsForm(market_trade_order_list=['trade_df'],
metric_name=metric_val,
aggregation_metric='mean',
aggregate_by_field=['ticker', 'venue', 'broker_id'], scalar=10000.0,
weighting_field='executed_notional_in_reporting_currency'),
# create a table the markout of every trade
TableResultsForm(market_trade_order_list=['trade_df'], metric_name='markout', filter_by='all',
replace_text={'markout_': '', 'executed_notional': 'exec not',
'notional_currency': 'exec not cur'},
keep_fields=['executed_notional', 'side', 'notional_currency'],
scalar={'all': 10000.0, 'exclude': ['executed_notional', 'side']},
round_figures_by={'all': 2, 'executed_notional': 0, 'side': 0},
weighting_field='executed_notional')
]
tca_request=TCARequest(start_date=start_date, finish_date=finish_date, ticker=ticker_val,
tca_type='aggregated', summary_display='candlestick',
market_data_store='arctic-ncfx', trade_data_store='dataframe',
trade_order_mapping=data_frame_trade_order_mapping,
metric_calcs=[MetricSlippage(), MetricMarkout(trade_order_list=['trade_df'])],
results_form=results_form, dummy_market=True, use_multithreading=True)
try:
dict_of_df = tca_engine.calculate_tca(tca_request)
dict_of_df = UtilFunc().convert_dict_of_dataframe_to_json(dict_of_df)
except Exception as e:
logger.error("Failed to complete request for user: " + username + " - " + str(e))
return "Failed to complete request"
logger.info("Completed API request from user: " + username)
else:
return 'Missing fields in request'
return jsonify({'response': 200, 'results': dict_of_df})
else:
return "Unsupported media type, only accepts JSON"
##except Exception as e:
## return {'Encountered exception during calculation: ' + str(e)}
if __name__ == '__main__':
application.run(debug=True, port=9000)
|
1655174
|
import torch
from .batch import BatchMeter
class CategoricalAccuracy(BatchMeter):
""" Accuracy on categorical targets
"""
name = 'acc'
DEFAULT_MODE = 'max'
INVALID_BATCH_DIMENSION_MESSAGE = (
'Expected both tensors have at less two dimension and same shape'
)
INVALID_INPUT_TYPE_MESSAGE = (
'Expected types (Tensor, LongTensor) as inputs'
)
def __init__(self, k=1, aggregator=None):
super(CategoricalAccuracy, self).__init__(aggregator=aggregator)
self.k = k
def check_tensors(self, a, b):
if not torch.is_tensor(a):
raise TypeError(self.INVALID_INPUT_TYPE_MESSAGE)
if (not isinstance(b, torch.LongTensor) and
not isinstance(b, torch.cuda.LongTensor)):
raise TypeError(self.INVALID_INPUT_TYPE_MESSAGE)
if len(a.size()) != 2 or len(b.size()) != 1 or len(b) != a.size()[0]:
raise ValueError(self.INVALID_BATCH_DIMENSION_MESSAGE)
def _get_result(self, a, b):
return torch.sum((a.topk(k=self.k, dim=1)[1] ==
b.unsqueeze(-1)).float(), dim=1)
|
1655177
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'', include('osmaxx.excerptexport.urls', namespace='excerptexport')),
url(r'^admin/django-rq/', include('django_rq.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'', include('social_django.urls', namespace='social')),
url(r'^version/', include('osmaxx.version.urls', namespace='version')),
# browsable REST API
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/', include('osmaxx.excerptexport.rest_api.urls', namespace='excerptexport_api')),
url(r'^job_progress/', include('osmaxx.job_progress.urls', namespace='job_progress')),
url(r'^pages/', include('osmaxx.core.urls', namespace='pages')),
url(r'^profile/', include('osmaxx.profile.urls', namespace='profile')),
] + \
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT, show_indexes=True) + \
static(settings.STATIC_URL, document_root=settings.STATIC_ROOT, show_indexes=True)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
1655180
|
import os
import numpy as np
import astrodash
directoryPath = '/Users/dmuthukrishna/Documents/OzDES_data/ATEL_9742_Run26'
atel9742 = [
('DES16E1ciy_E1_combined_161101_v10_b00.dat', 0.174),
('DES16S1cps_S1_combined_161101_v10_b00.dat', 0.274),
('DES16E2crb_E2_combined_161102_v10_b00.dat', 0.229),
('DES16E2clk_E2_combined_161102_v10_b00.dat', 0.367),
('DES16E2cqq_E2_combined_161102_v10_b00.dat', 0.426),
('DES16X2ceg_X2_combined_161103_v10_b00.dat', 0.335),
('DES16X2bkr_X2_combined_161103_v10_b00.dat', 0.159),
('DES16X2crr_X2_combined_161103_v10_b00.dat', 0.312),
('DES16X2cpn_X2_combined_161103_v10_b00.dat', 0.28),
('DES16X2bvf_X2_combined_161103_v10_b00.dat', 0.135),
('DES16C1cbg_C1_combined_161103_v10_b00.dat', 0.111),
('DES16C2cbv_C2_combined_161103_v10_b00.dat', 0.109),
('DES16C1bnt_C1_combined_161103_v10_b00.dat', 0.351),
('DES16C3at_C3_combined_161031_v10_b00.dat', 0.217),
('DES16X3cpl_X3_combined_161031_v10_b00.dat', 0.205),
('DES16E2cjg_E2_combined_161102_v10_b00.dat', 0.48),
('DES16X2crt_X2_combined_161103_v10_b00.dat', 0.57)]
filenames = [os.path.join(directoryPath, i[0]) for i in atel9742]
knownRedshifts = [i[1] for i in atel9742]
classification = astrodash.Classify(filenames, knownRedshifts)
bestFits, bestTypes = classification.list_best_matches(n=1)
print(bestFits)
np.savetxt('Run26_fits.txt', bestFits, fmt='%s')
# classification.plot_with_gui(indexToPlot=1)
|
1655225
|
from typing import Dict, NewType, Any, Union, Optional, List
from typing_extensions import (
TypedDict
)
SystemId = int
Identifier = Union[str, int]
ParameterId = Identifier
Parameter = Dict[str, Any]
StatusItemIcon = Dict[str, Any]
ParameterSet = Dict[ParameterId, Parameter]
SecurityLevel = Dict[str, Any]
ConnectionStatus = Dict[str, Any]
Address = Dict[str, Any]
class System(TypedDict):
systemId: int
name: str
productName: str
productImage: str
securityLevel: SecurityLevel
serialNumber: str
lastActivityDate: str
connectionStatus: ConnectionStatus
address: Optional[Address]
hasAlarmed: bool
class SystemUnit(TypedDict):
systemUnitId: int
name: str
shortName: str
product: str
softwareVersion: str
class SmartHomeSystem(TypedDict):
name: str
class Thermostat(TypedDict):
smartHomeSystem: SmartHomeSystem
name: str
climateSystems: Optional[List[int]]
currentTemperature: Optional[str]
targetTemperature: Optional[str]
class SetThermostatModel(TypedDict):
externalId: int
name: str
actualTemp: Optional[int] # Multiplied by 10
targetTemp: Optional[int] # Multiplied by 10
valvePosition: Optional[int]
climateSystems: Optional[List[int]]
class ParameterType(TypedDict, total=False):
parameterId: int
Name: str
title: str
designation: str
unit: str
displayValue: str
rawValue: int
## Extension by library
value: Union[str, float, None]
class CategoryType(TypedDict, total=False):
categoryId: int
name: str
parameters: List[ParameterType]
|
1655226
|
import torch
import pandas as pd
import numpy as np
import pickle as pkl
import yaml
import cv2
from io import BytesIO
from .utils import make_detections_from_segmentation
from .datasets_cfg import make_urdf_dataset
from pathlib import Path
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
class SyntheticSceneDataset:
def __init__(self, ds_dir, train=True):
self.ds_dir = Path(ds_dir)
assert self.ds_dir.exists()
keys_path = ds_dir / (('train' if train else 'val') + '_keys.pkl')
keys = pkl.loads(keys_path.read_bytes())
self.cfg = yaml.load((ds_dir / 'config.yaml').read_text(), Loader=yaml.FullLoader)
self.object_set = self.cfg.scene_kwargs['urdf_ds']
self.keys = keys
urdf_ds_name = self.cfg.scene_kwargs['urdf_ds']
urdf_ds = make_urdf_dataset(urdf_ds_name)
self.all_labels = [obj['label'] for _, obj in urdf_ds.index.iterrows()]
self.frame_index = pd.DataFrame(dict(scene_id=np.arange(len(keys)), view_id=np.arange(len(keys))))
def __len__(self):
return len(self.frame_index)
@staticmethod
def _deserialize_im_cv2(im_buf, rgb=True):
stream = BytesIO(im_buf)
stream.seek(0)
file_bytes = np.asarray(bytearray(stream.read()), dtype=np.uint8)
if rgb:
im = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
im = im[..., [2, 1, 0]]
else:
im = cv2.imdecode(file_bytes, cv2.IMREAD_UNCHANGED).astype(np.uint8)
return torch.as_tensor(im)
def __getitem__(self, idx):
key = self.keys[idx]
pkl_path = (self.ds_dir / 'dumps' / key).with_suffix('.pkl')
dic = pkl.loads(pkl_path.read_bytes())
cam = dic['camera']
rgb = self._deserialize_im_cv2(cam['rgb'])
mask = self._deserialize_im_cv2(cam['mask'], rgb=False)
cam = {k: v for k, v in cam.items() if k not in {'rgb', 'mask'}}
objects = dic['objects']
dets_gt = make_detections_from_segmentation(torch.as_tensor(mask).unsqueeze(0))[0]
mask_uniqs = set(np.unique(mask[mask > 0]))
for obj in objects:
if obj['id_in_segm'] in mask_uniqs:
obj['bbox'] = dets_gt[obj['id_in_segm']].numpy()
state = dict(camera=cam, objects=objects, frame_info=self.frame_index.iloc[idx].to_dict())
return rgb, mask, state
|
1655310
|
from app.HTMLScraper import SiteExplorer
import argparse
CLASSES = {
'textblock': ['.jumbotron', '.footer-single', '.timeline-panel', '.post-preview', '.masthead-content',
'.intro-text', '.p-5', '.lead', '.blockquote', '.blog-post', '.featurette-heading', '.article', 'p',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', '.review-text-sub-contents', ".a-span1",
".nav_a", "reviewText",
".a-text-bold", ".a-text-normal", ".s-item__title", "s-item__price", ".s-item__shipping",
".s-item__hotness", ".b-textlink", '.a-link-normal'
".b-pageheader__text", ".SECONDARY_INFO"],
'section': ['.section', '.container', '.page-section', '.container-fluid', '.history-wrapper', 'section', 'footer',
'header', '.header', '.rhf-border', '.a-section', '.ws-widget-container', '.feature', '.a-container'],
'button': ['.btn', '.about__social', '.btn-group', '.social-media-link', '.social-info', '.social-buttons',
'.social', '.social-icons', '.social-profile', '.social', '.facebook', '.twitter', '.instagram',
'.google',
".w5_btn_label", ".a-button-input", ".a-button-inner", ".a-button-text", ".a-box-inner", ".ghspr"
],
'form': ['.form-group', '.input-group', '.subscribe', '.form-control'],
}
def run_pipeline(sites, index, output_folder):
sites = open(sites, 'r').readlines()
sites = [site.strip() for site in sites]
for counter, site in enumerate(sites[index:]):
site_url = site.strip()
print(f'{counter + index}/{len(sites)} Current website: {site_url}')
se = SiteExplorer(site_url, output_folder)
for k in CLASSES:
v = ", ".join(CLASSES[k])
se.detect_clickable_elements(k, v)
se.export_json()
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("--website-list", help="Webiste list from which to start scraping")
argparser.add_argument("--start-index", help="Start index in the list. This is used for resuming processses",
default=0)
argparser.add_argument("--output-folder", help="Output folder for the scraping", default='parsed_websites')
args = argparser.parse_args()
run_pipeline(args.website_list, int(args.start_index), args.output_folder)
|
1655330
|
import pickle
from ..data import Dataset
def GMM2d(path='./data'):
with open('{}/GMMs/gmm-2d-syn-set.pkl'.format(path), 'rb') as f:
raw_dataset = pickle.load(f)
return Dataset(raw_dataset['x'], raw_dataset['y'])
|
1655337
|
import io
import functools
import PIL.Image
import numpy as np
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.responses import JSONResponse
from starlette.responses import StreamingResponse
import uvicorn
from fastapi.middleware.cors import CORSMiddleware
from sm.browser import utils
from sm.browser.main import preprocess_dataset_peaks, DatasetBrowser
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@functools.lru_cache(maxsize=128)
def load_dataset_browser(s3_path: str):
return DatasetBrowser(s3_path)
class DatasetPreprocessItem(BaseModel):
s3_path: str
@app.post("/preprocess")
async def preprocess(item: DatasetPreprocessItem):
preprocess_dataset_peaks(item.s3_path)
return {"status": "ok"}
class PixelSearchItem(BaseModel):
s3_path: str
x: int
y: int
class MzSearchItem(BaseModel):
s3_path: str
mz: float
ppm: int = 3
class PngStreamingResponse(StreamingResponse):
media_type = "image/png"
@app.post("/search", response_class=PngStreamingResponse)
async def perform_search(item: MzSearchItem):
dataset_browser = load_dataset_browser(item.s3_path)
mz_lo, mz_hi = utils.mz_ppm_bin(mz=item.mz, ppm=item.ppm)
rgba_array = dataset_browser.search(mz_lo, mz_hi)
image = PIL.Image.fromarray((rgba_array * 255).astype(np.uint8), mode="RGBA")
fp = io.BytesIO()
image.save(fp, format="PNG")
fp.seek(0)
return PngStreamingResponse(fp)
@app.post("/search_pixel")
async def perform_search(item: PixelSearchItem):
dataset_browser = load_dataset_browser(item.s3_path)
return JSONResponse(dataset_browser.search_pixel(item.x, item.y))
if __name__ == "__main__":
uvicorn.run(app)
|
1655340
|
from dota2.features.player import Player
from dota2.features.match import Match
from dota2.features.lobby import Lobby
from dota2.features.chat import ChatBase
from dota2.features.sharedobjects import SOBase
from dota2.features.party import Party
class FeatureBase(Player, Match, Lobby, Party, ChatBase, SOBase):
"""
This object is used to all high level functionality to Dota2Client.
The features are seperated into submodules with a single class.
"""
pass
|
1655393
|
classes = {}
def get_class(obj):
return classes.get(name_key(obj.__class__.__name__))
def get_class_by_name(name):
return classes.get(name_key(name))
def setup_class(cls):
classes[name_key(cls.name)] = cls
def name_key(name):
return name.lower().replace("_", "")
class Class:
def __init__(self, typeObj, fields, methods):
self.typeObj = typeObj
self.name = typeObj.__name__
self.fields = {}
for field in fields:
field.cls = self
self.fields[name_key(field.name)] = field
self.methods = {}
for method in methods:
method.cls = self
self.methods[name_key(method.name)] = method
def get_field(self, name):
return self.fields.get(name_key(name))
def get_method(self, name):
return self.methods.get(name_key(name))
def get_fields(self):
return self.fields.values()
def get_methods(self):
return self.methods.values()
class Field:
def __init__(self, name, is_static, type):
self.cls = None
self.name = name
self.is_static = is_static
self.type = type
def get_value(self, obj):
realObj = self.cls.typeObj if self.is_static else obj
return getattr(realObj, self.name)
def set_value(self, obj, value):
realObj = self.cls.typeObj if self.is_static else obj
setattr(realObj, self.name, value)
class Method:
def __init__(self, name, is_static, return_type, args):
self.cls = None
self.name = name
self.is_static = is_static
self.return_type = return_type
self.args = args
def call(self, obj, args):
realObj = self.cls.typeObj if self.is_static else obj
method = getattr(realObj, self.name)
return method(*args)
class MethodArgument:
def __init__(self, name, type):
self.name = name
self.type = type
|
1655424
|
from __future__ import print_function
import cw
import bluelet
class Master(object):
def __init__(self):
self.queued_tasks = [] # (TaskMessage, client connection) pairs
self.idle_workers = [] # connections
self.active_tasks = {} # {jobid: client connection}
self.connections = set() # all connections (client + worker)
def _show_workers(self):
print('workers:', len(self.idle_workers) + len(self.active_tasks))
def communicate(self, conn):
self.connections.add(conn)
while True:
# Read a message on the socket.
msg = yield cw._readmsg(conn)
if msg is None:
break
if isinstance(msg, cw.TaskMessage):
self.queued_tasks.append((msg, conn))
elif isinstance(msg, cw.ResultMessage):
client = self.active_tasks.pop(msg.jobid)
self.idle_workers.append(conn)
if client in self.connections:
# Ensure client has not disappeared.
yield cw._sendmsg(client, msg)
elif isinstance(msg, cw.WorkerRegisterMessage):
self.idle_workers.append(conn)
self._show_workers()
elif isinstance(msg, cw.WorkerDepartMessage):
self.idle_workers.remove(conn)
self._show_workers()
else:
assert False
# Dispatch as many waiting tasks as we can.
while self.queued_tasks and self.idle_workers:
task_message, client = self.queued_tasks.pop(0)
if client in self.connections:
worker = self.idle_workers.pop(0)
self.active_tasks[task_message.jobid] = client
yield cw._sendmsg(worker, task_message)
self.connections.remove(conn)
def run(self):
bluelet.run(bluelet.server('', cw.PORT, self.communicate))
if __name__ == '__main__':
Master().run()
|
1655425
|
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
# Hyper parameters
n_epochs = 5
num_classes = 10
batch_size = 100
learning_rate = 0.001
interval = 100
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='../../../_data', train=True, transform=transforms.ToTensor(), download=True)
test_dataset = torchvision.datasets.MNIST(root='../../../_data', train=False, transform=transforms.ToTensor(), download=True)
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
def display_multiple_img(images, predictions, rows=1, cols=1):
figure, ax = plt.subplots(nrows=rows, ncols=cols)
for i, image in enumerate(images):
ax[i].imshow(image[0])
ax[i].set_title(f"{predictions[i]}")
ax[i].set_axis_off()
plt.tight_layout()
plt.show()
# Convolutional neural network (two convolutional layers)
class ConvNet(nn.Module):
def __init__(self, num_classes=10):
super(ConvNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc = nn.Linear(7*7*32, num_classes)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
model = ConvNet(num_classes)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
for epoch in range(n_epochs):
loss_batch = 0
for i, (images, labels) in enumerate(train_loader):
images = images
labels = labels
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_batch += loss.item()
print (f'\rEpoch [{epoch+1}/{n_epochs}], loss: {loss_batch}', end="")
# get accucy
for images, labels in test_loader:
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# result network
for images, labels in test_loader:
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
maximum = min(10, len(images))
display_multiple_img(images[:maximum], predicted[:maximum], 1, maximum)
break
|
1655460
|
import inspect
from abc import ABCMeta, abstractmethod
class BaseStorageBackend(metaclass=ABCMeta):
"""Abstract class of storage backends.
All backends need to implement two apis: `get()` and `get_text()`.
`get()` reads the file as a byte stream and `get_text()` reads the file
as texts.
"""
@abstractmethod
def get(self, filepath):
pass
@abstractmethod
def get_text(self, filepath):
pass
class CephBackend(BaseStorageBackend):
"""Ceph storage backend."""
def __init__(self):
try:
import ceph
except ImportError:
raise ImportError('Please install ceph to enable CephBackend.')
self._client = ceph.S3Client()
def get(self, filepath):
value = self._client.Get(filepath)
value_buf = memoryview(value)
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class MemcachedBackend(BaseStorageBackend):
"""Memcached storage backend.
Attributes:
server_list_cfg (str): Config file for memcached server list.
client_cfg (str): Config file for memcached client.
sys_path (str | None): Additional path to be appended to `sys.path`.
Default: None.
"""
def __init__(self,
server_list_cfg="/mnt/lustre/share/memcached_client/server_list.conf",
client_cfg="/mnt/lustre/share/memcached_client/client.conf",
sys_path=None):
if sys_path is not None:
import sys
sys.path.append(sys_path)
try:
import mc
except ImportError:
raise ImportError(
'Please install memcached to enable MemcachedBackend.')
self.server_list_cfg = server_list_cfg
self.client_cfg = client_cfg
self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg,
self.client_cfg)
# mc.pyvector servers as a point which points to a memory cache
self._mc_buffer = mc.pyvector()
def get(self, filepath):
import mc
self._client.Get(filepath, self._mc_buffer)
value_buf = mc.ConvertBuffer(self._mc_buffer)
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class HardDiskBackend(BaseStorageBackend):
"""Raw hard disks storage backend."""
def get(self, filepath):
with open(filepath, 'rb') as f:
value_buf = f.read()
return value_buf
def get_text(self, filepath):
with open(filepath, 'r') as f:
value_buf = f.read()
return value_buf
class FileClient(object):
"""A general file client to access files in different backend.
The client loads a file or text in a specified backend from its path
and return it as a binary file. it can also register other backend
accessor with a given name and backend class.
Attributes:
backend (str): The storage backend type. Options are "disk", "ceph" and
"memcached".
client (:obj:`BaseStorageBackend`): The backend object.
"""
_backends = {
'disk': HardDiskBackend,
'ceph': CephBackend,
'memcached': MemcachedBackend
}
def __init__(self, backend='disk', **kwargs):
if backend not in self._backends:
raise ValueError(
'Backend {} is not supported. Currently supported ones are {}'.
format(backend, list(self._backends.keys())))
self.backend = backend
self.client = self._backends[backend](**kwargs)
@classmethod
def register_backend(cls, name, backend):
if not inspect.isclass(backend):
raise TypeError('backend should be a class but got {}'.format(
type(backend)))
if not issubclass(backend, BaseStorageBackend):
raise TypeError(
'backend {} is not a subclass of BaseStorageBackend'.format(
backend))
if not callable(getattr(backend, 'get', None)):
raise AssertionError('backend must implement `get()`')
cls._backends[name] = backend
def get(self, filepath):
return self.client.get(filepath)
def get_text(self, filepath):
return self.client.get_text(filepath)
|
1655492
|
def batch(iterable, n=1):
size = len(iterable)
for ndx in range(0, size, n):
yield iterable[ndx:min(ndx + n, size)]
|
1655531
|
from rest_framework import pagination
class UserPagination(pagination.PageNumberPagination):
page_size = 1000
|
1655579
|
def matching_parens(a, b):
return (
(a == '(' and b == ')') or
(a == '{' and b == '}') or
(a == '[' and b == ']')
)
def has_valid_parens(text):
stack = []
for c in text:
if c in ['(', '[', '{']:
stack.append(c)
elif c in [')', ']', '}']:
closing_parens = c
try:
opening_parens = stack.pop()
except IndexError:
return False
if not matching_parens(opening_parens, closing_parens):
return False
return len(stack) == 0
def test_paren():
assert not has_valid_parens('([')
assert has_valid_parens('([{}])')
assert has_valid_parens('(([]){})')
assert not has_valid_parens('([)')
assert not has_valid_parens('((((([{()}[]]]{{{[]}}})))))')
|
1655649
|
from fabric.api import cd, run
app_name = "flask-api"
def deploy():
run("rm -fr %s" % app_name)
run("mkdir %s" % app_name)
with cd("%s" % app_name):
run("virtualenv %s-env" % app_name)
run("source %s-env/bin/activate" % app_name)
run("sudo pip install supervisor gunicorn")
checkoutGitRepo("https://github.com/tomaszguzialek/flask-api.git")
run("sudo pip install -r requirements.txt")
run("echo \"[supervisord]\n# Empty\n\n[program:flask-api]\ncommand=gunicorn -b 0.0.0.0:8000 src.runner:app\" > supervisord.conf")
# Supervisord searches for supervisord.conf in current working directory
run("supervisord")
def checkoutGitRepo(url):
run("git init")
run("git remote add origin %s" % url)
run("git fetch")
run("git checkout -t origin/master")
|
1655669
|
ReferenceArchitectureSpec = {
'EDB-RA-1': {
'pg_count': 1,
'pem_server': True,
'barman': True,
'barman_server_count': 1,
'bdr_server_count': 0,
'bdr_witness_count': 0,
'pooler_count': 0,
'pooler_type': None,
'pooler_local': False,
'efm': False,
'replication_type': None,
'dbt2': False,
'dbt2_client': False,
'dbt2_client_count': 0,
'dbt2_driver': False,
'dbt2_driver_count': 0,
'hammerdb': False,
'hammerdb_server': False
},
'EDB-RA-2': {
'pg_count': 3,
'pem_server': True,
'barman': True,
'barman_server_count': 1,
'bdr_server_count': 0,
'bdr_witness_count': 0,
'pooler_count': 0,
'pooler_type': None,
'pooler_local': False,
'efm': True,
'replication_type': "synchronous",
'dbt2': False,
'dbt2_client': False,
'dbt2_client_count': 0,
'dbt2_driver': False,
'dbt2_driver_count': 0,
'hammerdb': False,
'hammerdb_server': False
},
'EDB-RA-3': {
'pg_count': 3,
'pem_server': True,
'barman': True,
'barman_server_count': 1,
'bdr_server_count': 0,
'bdr_witness_count': 0,
'pooler_count': 3,
'pooler_type': "pgpool2",
'pooler_local': False,
'efm': True,
'replication_type': "synchronous",
'dbt2': False,
'dbt2_client': False,
'dbt2_client_count': 0,
'dbt2_driver': False,
'dbt2_driver_count': 0,
'hammerdb': False,
'hammerdb_server': False
},
'HammerDB-DBaaS': {
'pg_count': 0,
'pem_server': False,
'barman': False,
'barman_server_count': 0,
'bdr_server_count': 0,
'bdr_witness_count': 0,
'pooler_count': 0,
'pooler_type': None,
'pooler_local': False,
'efm': False,
'replication_type': None,
'dbt2': False,
'dbt2_client': False,
'dbt2_client_count': 0,
'dbt2_driver': False,
'dbt2_driver_count': 0,
'hammerdb': False,
'hammerdb_server': True
},
'HammerDB-TPROC-C': {
'pg_count': 1,
'pem_server': False,
'barman': False,
'barman_server_count': 0,
'bdr_server_count': 0,
'bdr_witness_count': 0,
'pooler_count': 0,
'pooler_type': None,
'pooler_local': False,
'efm': False,
'replication_type': None,
'dbt2': False,
'dbt2_client': False,
'dbt2_client_count': 0,
'dbt2_driver': False,
'dbt2_driver_count': 0,
'hammerdb': True,
'hammerdb_server': True
},
'EDB-RA': {
'pg_count': 3,
'pem_server': True,
'barman': True,
'barman_server_count': 1,
'bdr_server_count': 0,
'bdr_witness_count': 0,
'pooler_count': 0,
'pooler_type': None,
'pooler_local': False,
'efm': True,
'replication_type': "synchronous",
'dbt2': False,
'dbt2_client': False,
'dbt2_client_count': 0,
'dbt2_driver': False,
'dbt2_driver_count': 0,
'hammerdb': False,
'hammerdb_server': False
},
'EDB-Always-On-Platinum': {
'pg_count': 0,
'pem_server': True,
'barman': True,
'barman_server_count': 2,
'bdr_server_count': 6,
'bdr_witness_count': 1,
'pooler_count': 4,
'pooler_type': "pgbouncer",
'pooler_local': False,
'efm': True,
'replication_type': "synchronous",
'dbt2': False,
'dbt2_client': False,
'dbt2_client_count': 0,
'dbt2_driver': False,
'dbt2_driver_count': 0,
'hammerdb': False,
'hammerdb_server': False
},
'EDB-Always-On-Silver': {
'pg_count': 0,
'pem_server': True,
'barman': True,
'barman_server_count': 1,
'bdr_server_count': 3,
'bdr_witness_count': 0,
'pooler_count': 2,
'pooler_type': "pgbouncer",
'pooler_local': False,
'efm': False,
'replication_type': "synchronous",
'dbt2': False,
'dbt2_client': False,
'dbt2_client_count': 0,
'dbt2_driver': False,
'dbt2_driver_count': 0,
'hammerdb': False,
'hammerdb_server': False
}
}
|
1655691
|
from helper import *
def doTest():
equal(getFixed('.test {margin:0 0 0 0}', 'margin'), '0', 'margin is fixed')
equal(getFixed('.test {margin:0 auto 0 auto}', 'margin'), '0 auto', 'margin 2 is fixed')
equal(getFixed('.test {margin:auto 0 0 auto}', 'margin'), 'auto 0 0 auto', 'margin 3 is fixed')
equal(getFixed('.test {margin:0 0}', 'margin'), '0', 'margin 4 is fixed')
equal(getFixed('.test {margin:0px 0}', 'margin'), '0', 'margin 5 is fixed')
equal(getFixed('.test {margin:0px 1px 0px 1px}', 'margin'), '0 1px', 'margin 6 is fixed')
equal(getFixed('.test {margin:0px auto 0px auto}', 'margin'), '0 auto', 'margin 7 is fixed')
equal(getFixed('.test {margin:50px auto 0 auto}', 'margin'), '50px auto 0', 'margin 8 is fixed')
equal(getFixed('.test {margin: -15px -15px 0 -15px}', 'margin'), '-15px -15px 0', 'margin 9 is fixed')
|
1655715
|
import sys
import csv
import os.path as path
base_directory = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(path.join(base_directory, 'engine'))
from deparse import feature_string
from segment import Segment
def test_feature_string():
segment = Segment(['consonantal', 'voice', 'labial'],
['syllabic', 'stress', 'long', 'sonorant', 'continuant', 'delayedrelease', 'approximant', 'tap', 'trill', 'nasal', 'spreadglottis', 'constrictedglottis', 'round', 'labiodental', 'coronal', 'lateral', 'dorsal'])
assert feature_string(segment) == '---+-------+--+---000--00000'
|
1655725
|
import collections.abc
import itertools
Seq = collections.abc.Sequence
_chain = itertools.chain
_repeat = itertools.repeat
_islice = itertools.islice
# noinspection PyMethodParameters,PyMethodFirstArgAssignment
class List(Union):
"""
An immutable singly-linked list.
"""
@property
def uncons(self):
"""
Tuple with (head, tail) of cons cell.
"""
if self:
return self.head, self.tail
else:
raise ValueError("cannot desconstruct empty list.")
# Generic implementations
__contains__ = Seq.__contains__
__reversed__ = Seq.__reversed__
index = Seq.index
count = Seq.count
@classmethod
def __union_constructor__(cls, seq=(), tail=None):
"""
Examples:
>>> List([1, 2, 3])
List([1, 2, 3])
"""
if isinstance(seq, List):
return seq
# Efficient non-recursive list constructor
set_head = Cons.head.__set__
set_tail = Cons.tail.__set__
new = object.__new__
cons = Cons
it = iter(seq)
try:
result = last = new(cons)
set_head(last, next(it))
except StopIteration:
return tail or Nil
for x in it:
cell = new(cons)
set_head(cell, x)
set_tail(last, cell)
last = cell
set_tail(last, tail or Nil)
return result
# Methods
def __iter__(lst): # noqa: N805
while lst is not Nil:
yield lst.head
lst = lst.tail
def __len__(lst): # noqa: N805
return sum(1 for _ in lst)
def __getitem__(self, idx): # noqa: N805
if idx < 0:
raise IndexError("negative indexes are not supported")
for i, x in enumerate(self):
if i == idx:
return x
else:
raise IndexError(idx)
def __repr__(self):
data = ", ".join(map(str, self))
return "List([{}])".format(data)
# Concatenation
def __add__(self, other):
if isinstance(other, List):
cons = Cons
for x in reversed(list(self)):
other = cons(x, other)
return other
return NotImplemented
def __mul__(self, other):
if isinstance(other, int):
if other == 0:
return Nil
elif other == 1:
return self
elif other < 0:
raise ValueError("negative numbers")
return List(_chain.from_iterable(_repeat(self, other - 1)), tail=self)
return NotImplemented
__rmul__ = __mul__
# Lexicographical comparisons
def __eq__(a, b):
nil = Nil
if isinstance(b, List):
while a is not nil and b is not nil:
if a is b:
return True
elif a.head != b.head:
return False
a, b = a.tail, b.tail
return a is b
return NotImplemented
def __gt__(a, b):
nil = Nil
if isinstance(b, List):
while a is not nil and b is not nil:
if a is b:
return False
elif a.head == b.head:
a, b = a.tail, b.tail
else:
return a.head > b.head
return a is not Nil
return NotImplemented
def __ge__(a, b):
nil = Nil
if isinstance(b, List):
while a is not nil and b is not nil:
if a is b:
return True
elif a.head == b.head:
a, b = a.tail, b.tail
else:
return a.head > b.head
return a is b
return NotImplemented
#
# Inserting and removing elements
#
def cons(self, x):
"""
Adds an element to the beginning of the list.
"""
return Cons(x, self)
def take(self, n): # noqa: N805
"""
Return a list with at most n elements taken from the beginning of the
list.
"""
return List(_islice(self, n))
def drop(lst, n): # noqa: N805
"""
Return a list that removes at most n elements from the beginning of the
list.
"""
for _ in range(n):
try:
lst = lst.tail
except AttributeError:
break
return lst
#
# Reorganizing the list
#
def reversed(lst) -> "List": # noqa: N805
"""
Reversed copy of the list.
"""
acc = Nil
while lst:
x, lst = lst.uncons
acc = acc.cons(x)
return acc
def partition(lst, pred):
"""
Separate list on predicate.
"""
start = []
append = start.append
while lst:
x, lst_ = lst.uncons
if pred(x):
break
lst = lst_
append(x)
return List(start), lst
#
# Monadic interface
#
def map(self, func):
"""
Maps function into list.
"""
return List(map(func, self))
def map_bound(self, func):
"""
Maps a function that return sequences into the list and flatten all
intermediate results.
"""
def iter_all():
for x in self:
yield from func(x)
return List(iter_all())
class Cons(List):
"""
A link in the linked list.
"""
head: object
tail: List
__bool__ = lambda lst: True
class Nil(List):
"""
Terminal element in a linked list.
Represents an empty list.
"""
__bool__ = lambda x: False
|
1655741
|
import logging
import flask_profiler
logger = logging.getLogger(__name__)
def setup_profiler(app):
profiler = app.config['POLYSWARMD'].profiler
if not profiler.enabled:
return
if profiler.db_uri is None:
logger.error('Profiler enabled but no db configured')
return
app.config['flask_profiler'] = {
'enabled': True,
'measurement': True,
'gui': False,
'storage': {
'engine': 'sqlalchemy',
'db_url': profiler.db_uri,
},
}
flask_profiler.init_app(app)
|
1655751
|
import conans
class PMM(conans.ConanFile):
name = 'pmm'
version = '1.5.1'
settings = None
exports_sources = '*'
generators = 'cmake'
# build_requires = (
# 'libman-generator/[*]@vector-of-bool/test'
# )
# generators = 'cmake', 'LibMan'
def build(self):
cmake = conans.CMake(self)
cmake.configure()
cmake.build()
|
1655762
|
from pymacaroons.binders import *
from pymacaroons.utils import *
class HashSignaturesBinder1(HashSignaturesBinder):
def __init__(self, root):
super(HashSignaturesBinder1, self).__init__(
root, truncate_or_pad(b'12345')
)
class HashSignaturesBinder2(HashSignaturesBinder):
def __init__(self, root):
super(HashSignaturesBinder2, self).__init__(
root, truncate_or_pad(b'56789')
)
|
1655826
|
from django.core.exceptions import ImproperlyConfigured
import pytest
from pretix_eth.network.tokens import IToken
from pretix_eth.network import helpers
def create_token():
class Test(IToken):
NETWORK_IDENTIFIER = "Test"
NETWORK_VERBOSE_NAME = "Test Network"
TOKEN_SYMBOL = "T"
CHAIN_ID = 1
return Test()
def test_token_get_ticket_price_in_token_is_correct():
test_token: IToken = create_token()
price_in_token_weis = test_token.get_ticket_price_in_token(10, {"T_RATE": 1000})
# price of 1 T token = 1000. So 10$ = 0.001 T tokens or 10^16.
assert price_in_token_weis == 10000000000000000
def test_token_get_price_in_token_gives_error_if_no_rate_given():
test_token: IToken = create_token()
with pytest.raises(ImproperlyConfigured) as execinfo:
test_token.get_ticket_price_in_token(10, {"ANOTHER_TOKEN_RATE": 1000})
assert (
execinfo.value.args[0]
== "Token Symbol not defined in TOKEN_RATES admin settings: T"
)
# TODO Test IToken.is_allowed()
def test_make_erc_681_url_for_native_asset():
to_address = "0xtest1"
payment_amount = "10"
chain_id = 3
erc681_url = helpers.make_erc_681_url(to_address, payment_amount, chain_id)
assert erc681_url == f"ethereum:{to_address}@{chain_id}?value={payment_amount}"
def test_make_erc_681_url_for_token():
to_address = "0xtest1"
payment_amount = "10"
chain_id = 1
is_token = True
token_address = "0xtoken"
erc681_url = helpers.make_erc_681_url(
to_address, payment_amount, chain_id, is_token, token_address
)
assert (
erc681_url
== f"ethereum:{token_address}/transfer?address={to_address}&uint256={payment_amount}"
)
def test_make_erc_681_url_for_token_fails_if_no_addres_specified():
to_address = "0xtest1"
payment_amount = "10"
chain_id = 3
is_token = True
token_address = None
with pytest.raises(ValueError) as execinfo:
helpers.make_erc_681_url(
to_address, payment_amount, chain_id, is_token, token_address
)
assert (
execinfo.value.args[0]
== "if is_token is true, then you must pass contract address of the token."
)
# helpers.uniswap, web3modal-checkout links.
|
1655846
|
from aws_cdk import (
core,
aws_lambda,
aws_ec2 as ec2,
aws_cloudwatch as cloudwatch,
aws_logs as logs,
)
from . import names
class ZipFunction(core.Construct):
def __init__(
self,
scope: core.Construct,
id: str,
name,
lambda_code_bucket,
environment,
timeout=30,
memory_size=128,
vpc_id=None,
security_group_id=None,
handler=None,
):
super().__init__(scope, id)
self.stack_name = core.Stack.of(self).stack_name
environment = {
key: str(val) for key, val in environment.items() if val
}
if not handler:
handler = f"{name}.handler"
function_props = {
"function_name": f"{self.stack_name}-{name}",
"runtime": aws_lambda.Runtime.PYTHON_3_8,
"code": aws_lambda.Code.from_bucket(
bucket=lambda_code_bucket, key=f"{self.stack_name}/{name}.zip"
),
"handler": handler,
"timeout": core.Duration.seconds(timeout),
"memory_size": memory_size,
"environment": environment,
"log_retention": logs.RetentionDays.SIX_MONTHS,
}
if vpc_id and security_group_id:
opencast_vpc = ec2.Vpc.from_lookup(
self, "OpencastVpc", vpc_id=vpc_id
)
opencast_security_group = ec2.SecurityGroup.from_security_group_id(
self,
"OpencastSecurityGroup",
security_group_id=security_group_id,
)
function_props.update(
{
"vpc": opencast_vpc,
"security_groups": [opencast_security_group],
}
)
self.function = aws_lambda.Function(self, "function", **function_props)
self.alias = aws_lambda.Alias(
self,
"alias",
version=self.function.add_version("$LATEST"),
description="initial release",
alias_name=names.LAMBDA_RELEASE_ALIAS,
)
def add_monitoring(self, monitoring):
errors_alarm = cloudwatch.Alarm(
self,
"ErrorsAlarm",
metric=self.function.metric_errors(),
alarm_name=f"{self.function.function_name}-errors",
statistic="sum",
comparison_operator=cloudwatch.ComparisonOperator.GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
threshold=1,
period=core.Duration.minutes(1),
evaluation_periods=1,
)
monitoring.add_alarm_action(errors_alarm)
class ZipOnDemandFunction(ZipFunction):
pass
class ZipWebhookFunction(ZipFunction):
def add_monitoring(self, monitoring):
super().add_monitoring(monitoring)
logs.MetricFilter(
self,
"RecordingCompletedLogMetric",
log_group=self.function.log_group,
metric_name="RecordingCompleted",
metric_value="1",
metric_namespace=monitoring.custom_metric_namespace,
filter_pattern=logs.FilterPattern.all(
logs.JsonPattern(
'$.message.payload.status = "RECORDING_MEETING_COMPLETED"'
)
),
)
logs.MetricFilter(
self,
"MeetingStartedLogMetric",
log_group=self.function.log_group,
metric_name="MeetingStarted",
metric_value="1",
metric_namespace=monitoring.custom_metric_namespace,
filter_pattern=logs.FilterPattern.all(
logs.JsonPattern('$.message.payload.status= "STARTED"')
),
)
logs.MetricFilter(
self,
"MeetingEndedLogMetric",
log_group=self.function.log_group,
metric_name="MeetingEnded",
metric_value="1",
metric_namespace=monitoring.custom_metric_namespace,
filter_pattern=logs.FilterPattern.all(
logs.JsonPattern('$.message.payload.status= "ENDED"')
),
)
class ZipDownloaderFunction(ZipFunction):
def add_monitoring(self, monitoring):
super().add_monitoring(monitoring)
invocations_alarm = cloudwatch.Alarm(
self,
"InvocationsAlarm",
metric=self.function.metric_invocations(),
alarm_name=f"{self.function.function_name}-invocations",
statistic="sum",
comparison_operator=cloudwatch.ComparisonOperator.LESS_THAN_THRESHOLD,
threshold=1,
period=core.Duration.minutes(1440),
evaluation_periods=1,
)
monitoring.add_alarm_action(invocations_alarm)
logs.MetricFilter(
self,
"RecordingDurationLogMetric",
log_group=self.function.log_group,
metric_name="RecordingDuration",
metric_value="$.message.duration",
metric_namespace=monitoring.custom_metric_namespace,
filter_pattern=logs.FilterPattern.all(
logs.JsonPattern("$.message.duration > 0")
),
)
logs.MetricFilter(
self,
"RecordingSkippedLogMetric",
log_group=self.function.log_group,
metric_name="SkippedForDuration",
metric_value="1",
metric_namespace=monitoring.custom_metric_namespace,
filter_pattern=logs.FilterPattern.literal("Skipping"),
)
class ZipUploaderFunction(ZipFunction):
def add_monitoring(self, monitoring):
super().add_monitoring(monitoring)
logs.MetricFilter(
self,
"MinutesInPipelineLogMetric",
log_group=self.function.log_group,
metric_name="MinutesInPipeline",
metric_value="$.message.minutes_in_pipeline",
metric_namespace=monitoring.custom_metric_namespace,
filter_pattern=logs.FilterPattern.all(
logs.JsonPattern("$.message.minutes_in_pipeline > 0")
),
)
logs.MetricFilter(
self,
"WorkflowInitiatedLogMetric",
log_group=self.function.log_group,
metric_name="WorkflowInitiated",
metric_value="1",
metric_namespace=monitoring.custom_metric_namespace,
filter_pattern=logs.FilterPattern.literal("Workflow"),
)
class ZipOpCountsFunction(ZipFunction):
pass
class ZipLogNotificationsFunction(ZipFunction):
pass
class ZipScheduleUpdateFunction(ZipFunction):
pass
class ZipStatusQueryFunction(ZipFunction):
pass
class ZipSlackQueryFunction(ZipFunction):
pass
|
1655872
|
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import onqg.dataset.Constants as Constants
from onqg.models.modules.Attention import ConcatAttention
from onqg.models.modules.MaxOut import MaxOut
from onqg.models.modules.DecAssist import StackedRNN, DecInit
class RNNDecoder(nn.Module):
"""
Input: (1) inputs['tgt_seq']
(2) inputs['src_seq']
(3) inputs['src_indexes']
(4) inputs['enc_output']
(5) inputs['hidden']
(6) inputs['feat_seqs']
Output: (1) rst['pred']
(2) rst['attn']
(3) rst['context']
(4) rst['copy_pred']; rst['copy_gate']
(5) rst['coverage_pred']
"""
def __init__(self, n_vocab, ans_n_vocab, d_word_vec, d_model, n_layer,
rnn, d_k, feat_vocab, d_feat_vec, d_enc_model, n_enc_layer,
input_feed, copy, answer, separate, coverage, layer_attn,
maxout_pool_size, dropout, device=None, encoder_word_emb=None):
self.name = 'rnn'
super(RNNDecoder, self).__init__()
self.n_layer = n_layer
self.layer_attn = layer_attn
self.separate = separate
self.coverage = coverage
self.copy = copy
self.maxout_pool_size = maxout_pool_size
self.n_vocab_size = n_vocab
input_size = d_word_vec
self.input_feed = input_feed
if input_feed:
input_size += d_enc_model
self.ans_emb_weight = encoder_word_emb
self.answer = answer
tmp_in = d_word_vec if answer else d_enc_model
self.decInit = DecInit(d_enc=tmp_in, d_dec=d_model, n_enc_layer=n_enc_layer)
self.feature = False if not feat_vocab else True
if self.feature:
self.feat_embs = nn.ModuleList([
nn.Embedding(n_f_vocab, d_feat_vec, padding_idx=Constants.PAD) for n_f_vocab in feat_vocab
])
# input_size += len(feat_vocab) * d_feat_vec # PS: only for test !!!
feat_size = len(feat_vocab) * d_feat_vec if self.feature else 0
self.d_enc_model = d_enc_model
self.word_emb_type = ans_n_vocab == n_vocab
self.word_emb = nn.Embedding(n_vocab, d_word_vec, padding_idx=Constants.PAD)
self.rnn = StackedRNN(n_layer, input_size, d_model, dropout, rnn=rnn)
self.attn = ConcatAttention(d_enc_model + feat_size, d_model, d_k, coverage)
self.readout = nn.Linear((d_word_vec + d_model + self.d_enc_model), d_model)
self.maxout = MaxOut(maxout_pool_size)
if copy:
self.copy_switch = nn.Linear(d_enc_model + d_model, 1)
self.hidden_size = d_model
self.dropout = nn.Dropout(dropout)
self.device = device
@classmethod
def from_opt(cls, opt):
return cls(opt['n_vocab'], opt['ans_n_vocab'], opt['d_word_vec'], opt['d_model'], opt['n_layer'],
opt['rnn'], opt['d_k'], opt['feat_vocab'], opt['d_feat_vec'],
opt['d_enc_model'], opt['n_enc_layer'], opt['input_feed'], opt['copy'], opt['answer'], opt['separate'],
opt['coverage'], opt['layer_attn'], opt['maxout_pool_size'], opt['dropout'],
opt['device'], opt['encoder_word_emb'])
def attn_init(self, context):
if isinstance(context, list):
context = context[-1]
if isinstance(context, tuple):
context = torch.cat(context, dim=-1)
batch_size = context.size(0)
hidden_sizes = (batch_size, self.d_enc_model)
return Variable(context.data.new(*hidden_sizes).zero_(), requires_grad=False)
def forward(self, inputs, max_length=300, rl_type='', generator=None):
tgt_seq, src_seq, src_indexes = inputs['tgt_seq'], inputs['src_seq'], inputs['src_indexes']
if self.answer:
ans_seq = inputs['ans_seq']
enc_output, hidden, feat_seqs = inputs['enc_output'], inputs['hidden'], inputs['feat_seqs']
src_pad_mask = Variable(src_seq.data.eq(50256).float(), requires_grad=False, volatile=False) # TODO: fix this magic number later
if self.layer_attn:
n_enc_layer = len(enc_output)
src_pad_mask = src_pad_mask.repeat(1, n_enc_layer)
enc_output = torch.cat(enc_output, dim=1)
feat_inputs = None
if self.feature:
feat_inputs = [feat_emb(feat_seq) for feat_seq, feat_emb in zip(feat_seqs, self.feat_embs)]
feat_inputs = torch.cat(feat_inputs, dim=2)
if self.layer_attn:
feat_inputs = feat_inputs.repeat(1, n_enc_layer, 1)
# enc_output = torch.cat((enc_output, feat_inputs), dim=2) # PS: only for test !!!
cur_context = self.attn_init(enc_output)
if self.answer:
ans_words = torch.sum(F.embedding(ans_seq, self.ans_emb_weight), dim=1)
hidden = self.decInit(ans_words).unsqueeze(0)
else:
hidden = self.decInit(hidden).unsqueeze(0)
self.attn.apply_mask(src_pad_mask)
if rl_type:
return self.rl_forward(rl_type, generator, tgt_seq, cur_context, hidden, enc_output,
feat_inputs, src_indexes)
else:
return self.nll_forward(tgt_seq, cur_context, hidden, enc_output, feat_inputs, src_indexes)
def nll_forward(self, tgt_seq, cur_context, hidden, enc_output, feat_inputs, src_indexes):
tmp_context, tmp_coverage = None, None
dec_outputs, coverage_output, copy_output, copy_gate_output = [], [], [], []
dec_input = self.word_emb(tgt_seq)
dec_input = dec_input.transpose(0, 1)
for seq_idx, dec_input_emb in enumerate(dec_input.split(1)):
dec_input_emb = dec_input_emb.squeeze(0)
raw_dec_input_emb = dec_input_emb
if self.input_feed:
dec_input_emb = torch.cat((dec_input_emb, cur_context), dim=1)
dec_output, hidden = self.rnn(dec_input_emb, hidden)
if self.coverage:
if tmp_coverage is None:
tmp_coverage = Variable(torch.zeros((enc_output.size(0), enc_output.size(1))))
if self.device:
tmp_coverage = tmp_coverage.to(self.device)
cur_context, attn, tmp_context, next_coverage = self.attn(dec_output, enc_output, precompute=tmp_context,
coverage=tmp_coverage, feat_inputs=feat_inputs,
feature=self.feature)
avg_tmp_coverage = tmp_coverage / max(1, seq_idx)
coverage_loss = torch.sum(torch.min(attn, avg_tmp_coverage), dim=1)
tmp_coverage = next_coverage
coverage_output.append(coverage_loss)
else:
cur_context, attn, tmp_context = self.attn(dec_output, enc_output, precompute=tmp_context,
feat_inputs=feat_inputs, feature=self.feature)
if self.copy:
copy_prob = self.copy_switch(torch.cat((dec_output, cur_context), dim=1))
copy_prob = torch.sigmoid(copy_prob)
if self.layer_attn:
attn = attn.view(attn.size(0), len(enc_output), -1)
attn = attn.sum(1)
if self.separate:
out = torch.zeros([len(attn), max_length], device=self.device if self.device else None)
for i in range(len(attn)):
data_length = src_indexes[i]
out[i].narrow(0, 1, data_length - 1).copy_(attn[i][1:src_indexes[i]])
attn = out
norm_term = attn.sum(1, keepdim=True)
attn = attn / norm_term
copy_output.append(attn)
copy_gate_output.append(copy_prob)
readout = self.readout(torch.cat((raw_dec_input_emb, dec_output, cur_context), dim=1))
maxout = self.maxout(readout)
output = self.dropout(maxout)
dec_outputs.append(output)
dec_output = torch.stack(dec_outputs).transpose(0, 1)
rst = {}
rst['pred'], rst['attn'], rst['context'] = dec_output, attn, cur_context
if self.copy:
copy_output = torch.stack(copy_output).transpose(0, 1)
copy_gate_output = torch.stack(copy_gate_output).transpose(0, 1)
rst['copy_pred'], rst['copy_gate'] = copy_output, copy_gate_output
if self.coverage:
coverage_output = torch.stack(coverage_output).transpose(0, 1)
rst['coverage_pred'] = coverage_output
return rst
def rl_forward(self, rl_type, generator, tgt_seq, cur_context, hidden, enc_output,
feat_inputs, src_indexes):
tmp_context, tmp_coverage, seq_idx = None, None, 0
dec_outputs, coverage_output, copy_output, copy_gate_output = [], [], [], []
max_length, input_seq = tgt_seq.size(-1), tgt_seq.transpose(0, 1)[0]
rand_input_seq = input_seq.clone().detach()
decoded_text, rand_decoded_text = [], []
init_tokens = torch.zeros(input_seq.size(), device=input_seq.device).long()
rand_tokens = torch.zeros(input_seq.size(), device=input_seq.device).long()
rand_choice_list = [0, 102] + [idd for idd in range(1001, self.n_vocab_size)]
for i in range(max_length):
decoded_text.append(input_seq.long())
rand_decoded_text.append(rand_input_seq.long())
dec_input_emb = self.word_emb(input_seq.long())
raw_dec_input_emb = dec_input_emb
if self.input_feed:
dec_input_emb = torch.cat((dec_input_emb, cur_context), dim=1)
dec_output, hidden = self.rnn(dec_input_emb, hidden)
if self.coverage:
if tmp_coverage is None:
tmp_coverage = Variable(torch.zeros((enc_output.size(0), enc_output.size(1))))
if self.device:
tmp_coverage = tmp_coverage.to(self.device)
cur_context, attn, tmp_context, next_coverage = self.attn(dec_output, enc_output, precompute=tmp_context,
coverage=tmp_coverage, feat_inputs=feat_inputs,
feature=self.feature)
avg_tmp_coverage = tmp_coverage / max(1, seq_idx)
coverage_loss = torch.sum(torch.min(attn, avg_tmp_coverage), dim=1)
tmp_coverage = next_coverage
coverage_output.append(coverage_loss)
else:
cur_context, attn, tmp_context = self.attn(dec_output, enc_output, precompute=tmp_context,
feat_inputs=feat_inputs, feature=self.feature)
if self.copy:
copy_prob = self.copy_switch(torch.cat((dec_output, cur_context), dim=1))
copy_prob = torch.sigmoid(copy_prob)
if self.layer_attn:
attn = attn.view(attn.size(0), len(enc_output), -1)
attn = attn.sum(1)
if self.separate:
out = torch.zeros([len(attn), max_length], device=self.device if self.device else None)
for i in range(len(attn)):
data_length = src_indexes[i]
out[i].narrow(0, 1, data_length - 1).copy_(attn[i][1:src_indexes[i]])
attn = out
norm_term = attn.sum(1, keepdim=True)
attn = attn / norm_term
copy_output.append(attn)
copy_gate_output.append(copy_prob)
readout = self.readout(torch.cat((raw_dec_input_emb, dec_output, cur_context), dim=1))
maxout = self.maxout(readout)
output = self.dropout(maxout)
dec_outputs.append(output)
paddings = (input_seq.eq(Constants.PAD).float() + input_seq.eq(102).float()).eq(0).float() # TODO magic number [SEP]
rand_paddings = (rand_input_seq.eq(Constants.PAD).float() + rand_input_seq.eq(102).float()).eq(0).float()
##=== next token predict ===##
token_dict = F.softmax(generator(output), dim=-1)
for b in range(input_seq.size(0)):
## sampling strategy 1
selected_idx = token_dict[b].multinomial(1, replacement=False).view(-1).data[0]
## sampling strategy 2
# topk = torch.topk(token_dict[b], k=5, dim=-1) # TODO magic number
# selected_idx = topk[1][random.choice(range(5))].data
init_tokens[b] = selected_idx.item()
rand_tokens[b] = random.choice(rand_choice_list)
input_seq = torch.where(paddings > 0, init_tokens, paddings.long())
rand_input_seq = torch.where(rand_paddings > 0, rand_tokens, rand_paddings.long())
seq_idx += 1
decoded_text.append(input_seq)
rand_decoded_text.append(rand_input_seq)
dec_output = torch.stack(dec_outputs).transpose(0, 1)
rst = {}
rst['pred'], rst['attn'], rst['context'] = dec_output, attn, cur_context
rst['decoded_text'] = torch.stack(decoded_text).transpose(0, 1)
rst['rand_decoded_text'] = torch.stack(rand_decoded_text).transpose(0, 1)
if self.copy:
copy_output = torch.stack(copy_output).transpose(0, 1)
copy_gate_output = torch.stack(copy_gate_output).transpose(0, 1)
rst['copy_pred'], rst['copy_gate'] = copy_output, copy_gate_output
if self.coverage:
coverage_output = torch.stack(coverage_output).transpose(0, 1)
rst['coverage_pred'] = coverage_output
return rst
|
1655933
|
from pathlib import Path
from .text_files import write_text_files
from .xlsx import write_xlsx_files
from .xlsx.templates.submission import SubmissionDocumentParams
from .xlsx.templates.dataset_description import DatasetDescriptionParams
from .xlsx.templates.code_description import CodeDescriptionParams
def write_sds_directory_content(
base_path: Path,
submission_params: SubmissionDocumentParams,
dataset_description_params: DatasetDescriptionParams,
code_description_params: CodeDescriptionParams,
) -> None:
write_text_files(base_path=base_path)
write_xlsx_files(
base_path=base_path,
submission_params=submission_params,
dataset_description_params=dataset_description_params,
code_description_params=code_description_params,
)
__all__ = ["write_sds_directory_content"]
|
1655942
|
MODE_ZWSP = 0
MODE_FULL = 1
ZERO_WIDTH_SPACE = '\u200b'
ZERO_WIDTH_NON_JOINER = '\u200c'
ZERO_WIDTH_JOINER = '\u200d'
LEFT_TO_RIGHT_MARK = '\u200e'
RIGHT_TO_LEFT_MARK = '\u200f'
list_ZWSP = [
ZERO_WIDTH_SPACE,
ZERO_WIDTH_NON_JOINER,
ZERO_WIDTH_JOINER,
]
list_FULL = [
ZERO_WIDTH_SPACE,
ZERO_WIDTH_NON_JOINER,
ZERO_WIDTH_JOINER,
LEFT_TO_RIGHT_MARK,
RIGHT_TO_LEFT_MARK,
]
def get_padding_length(mode):
return 11 if mode == MODE_ZWSP else 7 # Keep padding as small as possible
def to_base(num, b, numerals='0123456789abcdefghijklmnopqrstuvwxyz'):
"""
Python implementation of number.toString(radix)
Thanks to jellyfishtree from https://stackoverflow.com/a/2267428
"""
return ((num == 0) and numerals[0]) or (to_base(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b])
def encode(message, mode=MODE_FULL):
if not isinstance(message, str):
raise TypeError('Cannot encode {0}'.format(type(message).__name__))
alphabet = list_ZWSP if mode == MODE_ZWSP else list_FULL
padding = get_padding_length(mode)
encoded = ''
if (len(message) == 0):
return ''
for message_char in message:
code = '{0}{1}'.format('0' * padding, int(str(to_base(ord(message_char), len(alphabet)))))
code = code[len(code) - padding:]
for code_char in code:
index = int(code_char)
encoded = encoded + alphabet[index]
return encoded
def decode(message, mode=MODE_FULL):
if not isinstance(message, str):
raise TypeError('Cannot decode {0}'.format(type(message).__name__))
alphabet = list_ZWSP if mode == MODE_ZWSP else list_FULL
padding = get_padding_length(mode)
encoded = ''
decoded = ''
for message_char in message:
if message_char in alphabet:
encoded = encoded + str(alphabet.index(message_char))
if (len(encoded) % padding != 0):
raise TypeError('Unknown encoding detected!')
cur_encoded_char = ''
for index, encoded_char in enumerate(encoded):
cur_encoded_char = cur_encoded_char + encoded_char
if index > 0 and (index + 1) % padding == 0:
decoded = decoded + chr(int(cur_encoded_char, len(alphabet)))
cur_encoded_char = ''
return decoded
|
1655960
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt") as fh:
requirements = fh.readlines()
setuptools.setup(
name="tiara",
version="1.0.2",
description="A tool for classifying metagenomic data",
author="<NAME> and <NAME>",
author_email="<EMAIL>",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://ibe-uw.github.io/tiara/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
keywords="machine-learning computational-biology",
install_requires=requirements,
include_package_data=True,
entry_points={
"console_scripts": ["tiara=tiara.main:main", "tiara-test=tiara.test.test:test",]
},
)
|
1656013
|
from collections import OrderedDict
import logging
import os
import pandas as pd
import seaborn as sns
import torch
log = logging.getLogger('main')
class ResultPack(object):
def __init__(self, exp_name, columns=None, metadata={}):
self.exp_name = exp_name.split('/')[-1] # get basename
self.metadata = metadata
if columns is not None:
self.initialize(columns)
else:
self.columns = None
self.df = None
def __repr__(self):
return f"ResultPack: \n{self.df.__repr__()}"
def __getstate__(self):
return {
'exp_name': self.exp_name,
'columns': self.columns,
'df': self.df,
'metadata': self.metadata,
}
def __setstate__(self, state):
self.exp_name = state['exp_name']
self.columns = state['columns']
self.df = state['df']
self.metadata = state['metadata']
def initialize(self, columns):
self.columns = columns
# attach exp_name for the case where df is merged from different experiments
self.df = pd.DataFrame(columns=['exp_name'] + list(columns))
def new(self, exp_name=None):
return ResultPack(exp_name or self.exp_name, self.columns)
def append(self, epoch, **kwargs):
if self.columns is None:
assert self.df is None
self.initialize(tuple(kwargs.keys()))
else:
assert set(kwargs.keys()) == set(self.columns), f"expected keys: {self.columns}"
# convert a tensor to a float type if there is
args = [kwargs[key].item()
if isinstance(kwargs[key], torch.Tensor) else kwargs[key]
for key in self.columns]
self.df.loc[epoch] = [self.exp_name] + args
return self
def get_the_latest_row(self, **kwargs):
ret = self.df.tail(1).rename_axis('epochs').reset_index().to_dict()
ret.update(kwargs)
return ret
def tail(self, n):
pack_ret = self.new()
pack_ret.df = self.df.tail(n)
return pack_ret
def get_latest_by_key(self, key):
return self.df.tail(1).rename_axis('epochs').reset_index().to_dict()[key][0]
def get_latest_by_keys(self, keys):
dict_latest = self.df.tail(1).rename_axis(
'epochs').reset_index().to_dict()
dict_ret = OrderedDict()
try:
for key in keys:
dict_ret[key] = dict_latest[key][0]
except KeyError:
raise KeyError(
f"Can't find column '{key}'. Available columns: {self.columns}")
return dict_ret
def get_latest_columns(self):
return self.get_latest_by_keys(self.columns)
def save_as_csv(self, save_dir, name):
save_dir = os.path.join(save_dir, f"{name}.csv")
self.df.rename_axis('epochs').to_csv(save_dir)
log.info(f"Result pack has been saved as a CSV file to: {save_dir}")
@staticmethod
def concat(exp_name, result_packs):
assert isinstance(result_packs, (list, tuple))
assert isinstance(result_packs[0], ResultPack)
pack_ret = result_packs[0].new(exp_name=exp_name)
pack_ret.df = pd.concat([result_pack.df for result_pack in result_packs])
return pack_ret
def save_as_plot(self, save_dir, title=""):
sns.set_theme(style="darkgrid")
data = self.df.rename_axis('epochs')
for column in self.columns:
plot = sns.lineplot(data=data, x="epochs", y=column, hue='exp_name',
palette='pastel', legend="full")
save_dir_ = os.path.join(save_dir, f'{column}.png')
plot.get_figure().savefig(save_dir_)
plot.get_figure().clf()
log.info(f"Result pack has been saved as a figure to: {save_dir_}")
|
1656028
|
import typing
import asyncio
from datetime import datetime, timedelta
import discord
from discord.ext import commands
from nerdlandbot.translations.Translations import get_text as translate
from nerdlandbot.helpers.TranslationHelper import get_culture_from_context as culture
from nerdlandbot.helpers.channel import get_channel
from nerdlandbot.commands.GuildData import get_all_guilds_data, get_guild_data, GuildData
class Kerk(commands.Cog, name="Church"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="16uchurch", hidden=True)
async def cmd_kerk16(self, ctx: commands.Context, mention:typing.Optional[str] = None, *, message:typing.Optional[str] = None):
guild_data = await get_guild_data(ctx.message.guild.id)
lang = await culture(ctx)
temp_date = datetime.now()
# If it's past 16:00 it will be scheduled for the next day
if temp_date.hour >= 16:
church_day = (temp_date + timedelta(days=1)).day
else:
church_day = temp_date.day
# If the user doesn't mention someone tell him to do so
if mention is None:
msg = translate("church_no_mention", lang)
return await ctx.send(msg)
# Adding a church_event to the guild data.
await guild_data.set_church_event(ctx.author.id, mention, church_day, lang, message)
msg = translate("church_event_success", lang)
return await ctx.send(msg)
@commands.command(name="church_channel", hidden=True)
async def cmd_set_kerk(self, ctx: commands.Context, *, channel_id:typing.Optional[str] = None):
guild_data = await get_guild_data(ctx.message.guild.id)
lang = await culture(ctx)
# Error if not admin
if not guild_data.user_is_admin(ctx.author):
gif = translate("not_admin_gif", lang)
return await ctx.send(gif)
# Give error if the channel is a voice channel
channel = get_channel(ctx,channel_id)
if isinstance(channel, discord.VoiceChannel):
return await ctx.send(translate("channel_is_voice", lang))
if not channel:
return await ctx.send(translate("channel_nonexistant", lang))
if(await guild_data.update_church_channel(channel_id)):
msg = translate("church_channel_success", lang).format(channel)
else:
msg = translate("church_channel_error", lang)
return await ctx.send(msg)
def setup(bot: commands.Bot):
bot.add_cog(Kerk(bot))
|
1656034
|
def get_variables(m, n):
candidates = list()
for a in range(m // 2 + 1):
b = m - a
if a ^ b == n:
candidates.append((a, b))
return candidates
# Tests
assert get_variables(100, 4) == [(48, 52)]
|
1656079
|
from QuickPotato.harness.results import BoundariesTestEvidence
from functools import wraps
from datetime import datetime
def save_boundary_evidence(fnc):
"""
Parameters
----------
fnc
Returns
-------
"""
@wraps(fnc)
def encapsulated_function(*args, **kwargs):
"""
Parameters
----------
args
kwargs
Returns
-------
"""
evidence = BoundariesTestEvidence()
evidence.test_id = kwargs["test_id"]
evidence.database_name = kwargs["database_name"]
evidence.test_case_name = kwargs["test_case_name"]
evidence.epoch_timestamp = datetime.now().timestamp()
evidence.human_timestamp = datetime.now()
evidence.verification_name = kwargs["validation_name"]
evidence.value = float(kwargs["value"])
evidence.boundary = float(kwargs["boundary"])
# Scrub unused meta data
del kwargs["test_id"]
del kwargs["test_case_name"]
del kwargs["validation_name"]
del kwargs["database_name"]
evidence.status = fnc(*args, **kwargs)
evidence.save()
return evidence.status
return encapsulated_function
|
1656131
|
import numpy as np
import h5py
import sys
import logging
sys.path.append('..')
# Neural network stuff
from fielutil import load_verbatimnet
from demo_pipeline.featextractor import extract_imfeats
#pdb
# Logging
# logging.getLogger('featextractor').setLevel(logging.DEBUG)
shingle_dims=(120,120)
# ### Parameters
# Do you want to load the features in? Or save them to a file?
load_features = False
# All the images that you require extraction should be in this HDF5 file
# hdf5images='icdar13data/benchmarking-processed/icdar_be.hdf5'
# hdf5images = 'icdar13data/experimental-processed/icdar13_ex.hdf5'
# hdf5images='nmecdata/nmec_scaled_flat.hdf5'
hdf5images='/fileserver/nmec-handwriting/flat_nmec_cropped_bin_uint8.hdf5'
# This is the file that you will load the features from or save the features to
# featurefile = 'icdar13data/benchmarking-processed/icdar13be_fiel657.npy'
# featurefile = 'icdar13data/experimental-processed/icdar13ex_fiel657.npy'
# featurefile = '/fileserver/nmec-handwriting/nmec_bw.deNN_fiel657.step5_noE.npy'
# featurefile = '/fileserver/nmec-handwriting/nmec_bw_crop.deNN_fiel657.step5_250.npy'
featurefile = 'nmec_bw_crop.fiel657_120.step20_250.npy'
# This is the neural networks and parameters you are deciding to use
# paramsfile = '/fileserver/iam/iam-processed/models/fiel_657.hdf5'
paramsfile = 'fielnet120-nmec.hdf5'
# ### Full image HDF5 file
#
# Each entry in the HDF5 file is a full image/form/document
labels = h5py.File(hdf5images).keys()
# ### Load feature extractor neural network
vnet = load_verbatimnet( 'fc7', input_shape=(1,)+shingle_dims, paramsfile=paramsfile )
vnet.compile(loss='mse', optimizer='sgd')
print "Finished loading neural network in and compilation"
# ### Image features
#
# Currently taken as averages of all shard features in the image. You can either load them or extract everything manually, depending on if you have the .npy array.
if load_features:
print "Loading features in from "+featurefile
imfeats = np.load(featurefile)
print "Loaded features"
else:
print "Begin extracting features from "+hdf5images
imfeats = extract_imfeats( hdf5images, vnet, shingle_dims=shingle_dims, steps=(20,20), compthresh=250 )
print h5py.File(hdf5images).keys()
np.save( featurefile, imfeats )
# ### Build classifier
imfeats = ( imfeats.T / np.linalg.norm( imfeats, axis=1 ) ).T
F = imfeats.dot(imfeats.T)
np.fill_diagonal( F , -1 )
# ### Evaluate classifier on HDF5 file (ICDAR 2013)
# Top k (soft criteria)
k = 10
# Max top (hard criteria)
maxtop = 3
# Number of examples per image
g = 8
# Run through the adjacency matrix
softcorrect = 0
hardcorrect = 0
totalnum = 0
for j, i in enumerate(F):
if (g+1)%8 == 0:
continue
topk = i.argsort()[-k:]
# Soft criteria
if j/g in topk/g:
softcorrect += 1
totalnum +=1
# Hard criteria
hardindivid = sum([1 for jj in (j/g == topk[-maxtop:]/g) if jj])
if hardindivid == maxtop:
hardcorrect += 1
# Print out results
print "Top %d (soft criteria) = %f" %( k, (softcorrect+0.0) / totalnum )
print "Top %d (hard criteria) = %f" %( k, (hardcorrect+0.0) / totalnum / maxtop )
|
1656165
|
import numpy as np
import pandas as pd
import anndata as ad
import squidpy as sq
import eggplant as eg
from scipy.spatial.distance import cdist
import torch as t
import unittest
import gpytorch as gp
from . import utils as ut
class GetLandmarkDistance(unittest.TestCase):
def test_default_wo_ref(
self,
):
adata = ut.create_adata()
eg.pp.get_landmark_distance(adata)
def test_standard_ref(
self,
):
adata = ut.create_adata()
reference_input = ut.create_model_input()
ref = eg.m.Reference(
domain=reference_input["domain"],
landmarks=pd.DataFrame(reference_input["landmarks"]),
meta=reference_input["meta"],
)
eg.pp.get_landmark_distance(
adata,
reference=ref,
)
def test_np_ref(
self,
):
adata = ut.create_adata()
reference_input = ut.create_model_input()
eg.pp.get_landmark_distance(
adata,
reference=reference_input["landmarks"].numpy(),
)
class ReferenceToGrid(unittest.TestCase):
def test_default_bw_image(
self,
):
side_size = 500
ref_img, counts = ut.create_image(
color=False, side_size=side_size, return_counts=True
)
ref_crd, mta = eg.pp.reference_to_grid(
ref_img,
n_approx_points=int(side_size**2),
n_regions=1,
background_color="black",
)
def test_default_color_image(
self,
):
side_size = 32
ref_img, counts = ut.create_image(
color=True,
side_size=side_size,
return_counts=True,
)
ref_crd, mta = eg.pp.reference_to_grid(
ref_img,
n_approx_points=int(side_size**2),
n_regions=3,
background_color="black",
)
_, mta_counts = np.unique(mta, return_counts=True)
obs_prop = np.sort(mta_counts / sum(mta_counts))
true_prop = np.sort(counts / sum(counts))
for ii in range(3):
self.assertAlmostEqual(
obs_prop[ii],
true_prop[ii],
places=0,
)
class MatchScales(unittest.TestCase):
def test_default(
self,
):
adata = ut.create_adata()
reference_input = ut.create_model_input()
ref = eg.m.Reference(
domain=reference_input["domain"],
landmarks=reference_input["landmarks"],
meta=reference_input["meta"],
)
eg.pp.match_scales(adata, ref)
del adata.uns["spatial"]
eg.pp.match_scales(adata, ref)
def test_pd_lmk_obs(
self,
):
adata = ut.create_adata()
adata.uns["curated_landmarks"] = pd.DataFrame(adata.uns["curated_landmarks"])
reference_input = ut.create_model_input()
ref = eg.m.Reference(
domain=reference_input["domain"],
landmarks=pd.DataFrame(reference_input["landmarks"]),
meta=reference_input["meta"],
)
eg.pp.match_scales(adata, ref)
def test_not_implemented_lmk_obs(
self,
):
adata = ut.create_adata()
adata.uns["curated_landmarks"] = 0
reference_input = ut.create_model_input()
ref = eg.m.Reference(
domain=reference_input["domain"],
landmarks=pd.DataFrame(reference_input["landmarks"]),
meta=reference_input["meta"],
)
self.assertRaises(
NotImplementedError,
eg.pp.match_scales,
adata,
ref,
)
def test_no_landmarks(
self,
):
adata = ut.create_adata()
del adata.uns["curated_landmarks"]
reference_input = ut.create_model_input()
ref = eg.m.Reference(
domain=reference_input["domain"],
landmarks=pd.DataFrame(reference_input["landmarks"]),
meta=reference_input["meta"],
)
self.assertRaises(
Exception,
eg.pp.match_scales,
adata,
ref,
)
def test_ref_pd(
self,
):
adata = ut.create_adata()
reference_input = ut.create_model_input()
eg.pp.match_scales(adata, pd.DataFrame(reference_input["landmarks"].numpy()))
def test_ref_np(
self,
):
adata = ut.create_adata()
reference_input = ut.create_model_input()
eg.pp.match_scales(adata, reference_input["landmarks"].numpy())
def test_ref_not_implemented(
self,
):
adata = ut.create_adata()
self.assertRaises(
NotImplementedError,
eg.pp.match_scales,
adata,
4,
)
class Normalization(unittest.TestCase):
def test_default(
self,
):
adata = ut.create_adata()
eg.pp.default_normalization(adata)
def test_custom(
self,
):
adata = ut.create_adata()
eg.pp.default_normalization(
adata,
min_cells=0.1,
total_counts=1e3,
exclude_highly_expressed=True,
)
class JoinAdatas(unittest.TestCase):
def test_default(
self,
):
adata_1 = ut.create_adata(n_features=3, n_obs=4)[0:4, :]
adata_2 = ut.create_adata(n_features=2, n_obs=4)[0:3, :]
adata_1.obs.index = ["A1", "A2", "A3", "A4"]
adata_2.obs.index = ["B1", "B2", "B3"]
adata_1.var.index = ["fA1", "fA2", "fC1"]
adata_2.var.index = ["fB1", "fC1"]
new_adata = eg.pp.join_adatas((adata_1, adata_2))
n_nas = np.isnan(new_adata.X).sum()
self.assertEqual(n_nas, 0)
new_var_index_true = pd.Index(["fA1", "fA2", "fC1", "fB1", "fB1"])
new_obs_index_true = ["A1", "A2", "A3", "A4"] + ["B1", "B2", "B3"]
self.assertTrue(all([x in new_var_index_true for x in new_adata.var.index]))
self.assertTrue(all([x in new_obs_index_true for x in new_adata.obs.index]))
class SpatialSmoothing(unittest.TestCase):
def test_default(
self,
):
adata = ut.create_adata()
eg.pp.spatial_smoothing(adata)
def test_custom_structured(
self,
):
adata = ut.create_adata()
adata.obsm["test"] = adata.obsm["spatial"].copy()
del adata.obsm["spatial"]
eg.pp.spatial_smoothing(
adata,
spatial_key="test",
coord_type="generic",
n_neigh=6,
sigma=20,
)
def test_custom_random(
self,
):
adata = ut.create_adata()
adata.obsm["spatial"] = np.random.uniform(0, 1, size=(adata.shape[0], 2))
eg.pp.spatial_smoothing(
adata,
spatial_key="spatial",
coord_type="generic",
)
if __name__ == "__main__":
unittest.main()
|
1656174
|
import pandas as pd
from marcottievents.models.common.enums import (ConfederationType, ActionType, ModifierType,
ModifierCategoryType, NameOrderType, PositionType,
GroupRoundType, KnockoutRoundType, SurfaceType)
from marcottievents.models.common.suppliers import (MatchEventMap, MatchMap, CompetitionMap,
VenueMap, PositionMap, PlayerMap, ManagerMap,
RefereeMap)
from marcottievents.models.common.overview import Countries, Timezones, Competitions, Seasons, Venues, Surfaces
from marcottievents.models.common.personnel import Players, Managers, Referees
from marcottievents.models.club import Clubs, ClubLeagueMatches, ClubMap
from .workflows import WorkflowBase
class MarcottiTransform(WorkflowBase):
"""
Transform and validate extracted data.
"""
@staticmethod
def suppliers(data_frame):
return data_frame
@staticmethod
def years(data_frame):
return data_frame
@staticmethod
def seasons(data_frame):
return data_frame
def competitions(self, data_frame):
if 'country' in data_frame.columns:
transformed_field = 'country'
lambdafunc = lambda x: pd.Series(self.get_id(Countries, name=x[transformed_field]))
id_frame = data_frame.apply(lambdafunc, axis=1)
id_frame.columns = ['country_id']
elif 'confed' in data_frame.columns:
transformed_field = 'confed'
lambdafunc = lambda x: pd.Series(ConfederationType.from_string(x[transformed_field]))
id_frame = data_frame.apply(lambdafunc, axis=1)
id_frame.columns = ['confederation']
else:
raise KeyError("Cannot insert Competition record: No Country or Confederation data present")
return data_frame.join(id_frame).drop(transformed_field, axis=1)
def countries(self, data_frame):
lambdafunc = lambda x: pd.Series(ConfederationType.from_string(x['confed']))
id_frame = data_frame.apply(lambdafunc, axis=1)
id_frame.columns = ['confederation']
joined_frame = data_frame.join(id_frame).drop('confed', axis=1)
return joined_frame
def clubs(self, data_frame):
if 'country' in data_frame.columns:
lambdafunc = lambda x: pd.Series(self.get_id(Countries, name=x['country']))
id_frame = data_frame.apply(lambdafunc, axis=1)
id_frame.columns = ['country_id']
else:
raise KeyError("Cannot insert Club record: No Country data present")
return data_frame.join(id_frame)
def venues(self, data_frame):
lambdafunc = lambda x: pd.Series([
self.get_id(Countries, name=x['country']),
self.get_id(Timezones, name=x['timezone']),
self.get_id(Surfaces, description=x['surface']),
self.make_date_object(x['config_date'])
])
ids_frame = data_frame.apply(lambdafunc, axis=1)
ids_frame.columns = ['country_id', 'timezone_id', 'surface_id', 'eff_date']
joined_frame = data_frame.join(ids_frame).drop(['country', 'timezone', 'surface', 'config_date'], axis=1)
new_frame = joined_frame.where((pd.notnull(joined_frame)), None)
return new_frame
def timezones(self, data_frame):
lambdafunc = lambda x: pd.Series(ConfederationType.from_string(x['confed']))
id_frame = data_frame.apply(lambdafunc, axis=1)
id_frame.columns = ['confederation']
joined_frame = data_frame.join(id_frame).drop('confed', axis=1)
return joined_frame
def positions(self, data_frame):
lambdafunc = lambda x: pd.Series(PositionType.from_string(x['position_type']))
id_frame = data_frame.apply(lambdafunc, axis=1)
id_frame.columns = ['type']
joined_frame = data_frame.join(id_frame).drop('position_type', axis=1)
return joined_frame
def surfaces(self, data_frame):
lambdafunc = lambda x: pd.Series(SurfaceType.from_string(x['surface_type']))
id_frame = data_frame.apply(lambdafunc, axis=1)
id_frame.columns = ['type']
joined_frame = data_frame.join(id_frame).drop('surface_type', axis=1)
return joined_frame
def players(self, data_frame):
lambdafunc = lambda x: pd.Series([
self.make_date_object(x['dob']),
NameOrderType.from_string(x['name_order'] or 'Western'),
self.get_id(Countries, name=x['country']),
self.get_id(PositionMap, remote_id=x['remote_position_id'], supplier_id=self.supplier_id)
])
ids_frame = data_frame.apply(lambdafunc, axis=1)
ids_frame.columns = ['birth_date', 'order', 'country_id', 'position_id']
joined_frame = data_frame.join(ids_frame).drop(
['dob', 'name_order', 'country', 'remote_position_id'], axis=1)
return joined_frame
def managers(self, data_frame):
lambdafunc = lambda x: pd.Series([
self.make_date_object(x['dob']),
NameOrderType.from_string(x['name_order'] or 'Western'),
self.get_id(Countries, name=x['country'])
])
ids_frame = data_frame.apply(lambdafunc, axis=1)
ids_frame.columns = ['birth_date', 'order', 'country_id']
joined_frame = data_frame.join(ids_frame).drop(['dob', 'name_order', 'country'], axis=1)
return joined_frame
def referees(self, data_frame):
lambdafunc = lambda x: pd.Series([
self.make_date_object(x['dob']),
NameOrderType.from_string(x['name_order'] or 'Western'),
self.get_id(Countries, name=x['country'])
])
ids_frame = data_frame.apply(lambdafunc, axis=1)
ids_frame.columns = ['birth_date', 'order', 'country_id']
joined_frame = data_frame.join(ids_frame).drop(['dob', 'name_order', 'country'], axis=1)
return joined_frame
def league_matches(self, data_frame):
lambdafunc = lambda x: pd.Series([
self.get_id(Competitions, name=x['competition']),
self.get_id(Seasons, name=x['season']),
self.get_id(Venues, name=x['venue']),
self.get_id(Clubs, name=x['home_team']),
self.get_id(Clubs, name=x['away_team']),
self.get_id(Managers, full_name=x['home_manager']),
self.get_id(Managers, full_name=x['away_manager']),
self.get_id(Referees, full_name=x['referee'])
])
ids_frame = data_frame.apply(lambdafunc, axis=1)
ids_frame.columns = ['competition_id', 'season_id', 'venue_id', 'home_team_id', 'away_team_id',
'home_manager_id', 'away_manager_id', 'referee_id']
return data_frame.join(ids_frame)
def match_lineups(self, data_frame):
lambdafunc = lambda x: pd.Series([
self.get_id(ClubLeagueMatches,
competition_id=self.get_id(Competitions, name=x['competition']),
season_id=self.get_id(Seasons, name=x['season']),
matchday=x['matchday'],
home_team_id=self.get_id(Clubs, name=x['home_team']),
away_team_id=self.get_id(Clubs, name=x['away_team'])),
self.get_id(Clubs, name=x['player_team']),
self.get_id(Players, full_name=x['player_name'])
])
ids_frame = data_frame.apply(lambdafunc, axis=1)
ids_frame.columns = ['match_id', 'team_id', 'player_id']
return data_frame.join(ids_frame)
def modifiers(self, data_frame):
lambdafunc = lambda x: pd.Series([
ModifierType.from_string(x['modifier']),
ModifierCategoryType.from_string(x['modifier_category'])
])
id_frame = data_frame.apply(lambdafunc, axis=1)
id_frame.columns = ['type', 'category']
joined_frame = data_frame.join(id_frame).drop(["modifier", "modifier_category"], axis=1)
return joined_frame
class MarcottiEventTransform(MarcottiTransform):
def league_matches(self, data_frame):
lambdafunc = lambda x: pd.Series([
self.get_id(CompetitionMap, remote_id=x['remote_competition_id'], supplier_id=self.supplier_id),
self.get_id(Seasons, name=x['season_name']),
self.get_id(VenueMap, remote_id=x['remote_venue_id'], supplier_id=self.supplier_id),
self.get_id(ClubMap, remote_id=x['remote_home_team_id'], supplier_id=self.supplier_id),
self.get_id(ClubMap, remote_id=x['remote_away_team_id'], supplier_id=self.supplier_id),
self.get_id(ManagerMap, remote_id=x['remote_home_manager_id'], supplier_id=self.supplier_id),
self.get_id(ManagerMap, remote_id=x['remote_away_manager_id'], supplier_id=self.supplier_id),
self.get_id(RefereeMap, remote_id=x['remote_referee_id'], supplier_id=self.supplier_id),
self.make_date_object(x['date'])
])
ids_frame = data_frame.apply(lambdafunc, axis=1)
ids_frame.columns = ['competition_id', 'season_id', 'venue_id', 'home_team_id', 'away_team_id',
'home_manager_id', 'away_manager_id', 'referee_id', 'match_date']
joined_frame = data_frame.join(ids_frame).drop(['season_name', 'date'], axis=1)
return joined_frame
def knockout_matches(self, data_frame):
lambdafunc = lambda x: pd.Series([
self.get_id(CompetitionMap, remote_id=x['remote_competition_id'], supplier_id=self.supplier_id),
self.get_id(Seasons, name=x['season_name']),
self.get_id(VenueMap, remote_id=x['remote_venue_id'], supplier_id=self.supplier_id),
self.get_id(ClubMap, remote_id=x['remote_home_team_id'], supplier_id=self.supplier_id),
self.get_id(ClubMap, remote_id=x['remote_away_team_id'], supplier_id=self.supplier_id),
self.get_id(ManagerMap, remote_id=x['remote_home_manager_id'], supplier_id=self.supplier_id),
self.get_id(ManagerMap, remote_id=x['remote_away_manager_id'], supplier_id=self.supplier_id),
self.get_id(RefereeMap, remote_id=x['remote_referee_id'], supplier_id=self.supplier_id),
KnockoutRoundType.from_string(x['round']),
self.make_date_object(x['date'])
])
ids_frame = data_frame.apply(lambdafunc, axis=1)
ids_frame.columns = ['competition_id', 'season_id', 'venue_id', 'home_team_id', 'away_team_id',
'home_manager_id', 'away_manager_id', 'referee_id', 'ko_round', 'match_date']
joined_frame = data_frame.join(ids_frame).drop(['season_name', 'date', 'round'], axis=1)
return joined_frame
def group_matches(self, data_frame):
lambdafunc = lambda x: pd.Series([
self.get_id(CompetitionMap, remote_id=x['remote_competition_id'], supplier_id=self.supplier_id),
self.get_id(Seasons, name=x['season_name']),
self.get_id(VenueMap, remote_id=x['remote_venue_id'], supplier_id=self.supplier_id),
self.get_id(ClubMap, remote_id=x['remote_home_team_id'], supplier_id=self.supplier_id),
self.get_id(ClubMap, remote_id=x['remote_away_team_id'], supplier_id=self.supplier_id),
self.get_id(ManagerMap, remote_id=x['remote_home_manager_id'], supplier_id=self.supplier_id),
self.get_id(ManagerMap, remote_id=x['remote_away_manager_id'], supplier_id=self.supplier_id),
self.get_id(RefereeMap, remote_id=x['remote_referee_id'], supplier_id=self.supplier_id),
GroupRoundType.from_string(x['round']),
self.make_date_object(x['date'])
])
ids_frame = data_frame.apply(lambdafunc, axis=1)
ids_frame.columns = ['competition_id', 'season_id', 'venue_id', 'home_team_id', 'away_team_id',
'home_manager_id', 'away_manager_id', 'referee_id', 'group_round', 'match_date']
joined_frame = data_frame.join(ids_frame).drop(['season_name', 'date', 'round'], axis=1)
return joined_frame
def match_lineups(self, data_frame):
lambdafunc = lambda x: pd.Series([
self.get_id(MatchMap, remote_id=x['remote_match_id'], supplier_id=self.supplier_id),
self.get_id(PlayerMap, remote_id=x['remote_player_id'], supplier_id=self.supplier_id),
self.get_id(ClubMap, remote_id=x['remote_team_id'], supplier_id=self.supplier_id),
self.get_id(PositionMap, remote_id=x['remote_position_id'], supplier_id=self.supplier_id)
])
ids_frame = data_frame.apply(lambdafunc, axis=1)
ids_frame.columns = ['match_id', 'player_id', 'team_id', 'position_id']
return data_frame.join(ids_frame).drop(['remote_match_id', 'remote_player_id',
'remote_team_id', 'remote_position_id'], axis=1)
def events(self, data_frame):
lambdafunc = lambda x: pd.Series([
self.get_id(MatchMap, remote_id=x['remote_match_id'], supplier_id=self.supplier_id),
self.get_id(ClubMap, remote_id=x['remote_team_id'], supplier_id=self.supplier_id)
])
ids_frame = data_frame.apply(lambdafunc, axis=1)
ids_frame.columns = ['match_id', 'team_id']
joined_frame = data_frame.join(ids_frame).drop(['remote_match_id', 'remote_team_id'], axis=1)
new_frame = joined_frame.where((pd.notnull(joined_frame)), None)
return new_frame
def actions(self, data_frame):
match_event_dict = {rec.remote_id: rec.id for rec in
self.session.query(MatchEventMap).filter_by(supplier_id=self.supplier_id)}
match_map_dict = {rec.remote_id: rec.id for rec in
self.session.query(MatchMap).filter_by(supplier_id=self.supplier_id)}
player_map_dict = {rec.remote_id: rec.id for rec in
self.session.query(PlayerMap).filter_by(supplier_id=self.supplier_id)}
lambdafunc = lambda x: pd.Series([
match_event_dict.get(x['remote_event_id'], None),
match_map_dict.get(x['remote_match_id'], None),
player_map_dict.get(x['remote_player_id'], None),
ActionType.from_string(x['action_type']),
])
ids_frame = data_frame.apply(lambdafunc, axis=1)
ids_frame.columns = ['event_id', 'match_id', 'player_id', 'type']
joined_frame = data_frame.join(ids_frame).drop(['remote_event_id', 'remote_match_id',
'remote_player_id', 'action_type'], axis=1)
new_frame = joined_frame.where((pd.notnull(joined_frame)), None)
return new_frame
|
1656181
|
import re
import glob
from json import dumps
from os.path import curdir, abspath, join, splitext, isfile
from os import walk
rfc_2119_keywords_regexes = [
r"MUST",
r"REQUIRED",
r"SHALL",
r"MUST NOT",
r"SHALL NOT",
r"SHOULD",
r"RECOMMENDED",
r"SHOULD NOT",
r"NOT RECOMMENDED",
r"MAY",
r"OPTIONAL",
]
def get_ignored_path_globs(root):
fileName = join(root, ".specignore")
if not isfile(fileName):
return []
with open(fileName, 'r') as f:
# trim whitespace
globs = [line.strip() for line in f.readlines()]
# remove empty lines
globs = [g for g in globs if g]
# remove comments
globs = [g for g in globs if not g.startswith('#')]
return globs
def get_ignored_paths(root):
globs = get_ignored_path_globs(root)
globbed_paths = set()
ignored_files = set()
for g in globs:
globbed_paths.update(glob.glob(g, recursive=True))
for p in globbed_paths:
if isfile(p):
ignored_files.add(join(root, p))
else:
ignored_files.update(glob.glob(join(root, p, "**/*.md"), recursive=True))
return ignored_files
def find_markdown_file_paths(root):
'Finds the .md files in the root provided.'
markdown_file_paths = []
ignored_paths = get_ignored_paths(root)
for root_path, _, file_paths, in walk(root):
for file_path in file_paths:
absolute_file_path = join(root_path, file_path)
if absolute_file_path in ignored_paths:
continue
_, file_extension = splitext(absolute_file_path)
if file_extension == ".md":
markdown_file_paths.append(absolute_file_path)
return markdown_file_paths
def clean_content(content):
'Transmutes markdown content to plain text'
lines = content.splitlines()
content = '\n'.join([x for x in lines if x.strip() != '' and x.strip().startswith('>')])
for rfc_2119_keyword_regex in rfc_2119_keywords_regexes:
content = re.sub(
f"\\*\\*{rfc_2119_keyword_regex}\\*\\*",
rfc_2119_keyword_regex,
content
)
return re.sub(r"\n?>\s*", " ", content.strip()).strip()
def find_rfc_2119_keyword(content):
'Returns the RFC2119 keyword, if present'
for rfc_2119_keyword_regex in rfc_2119_keywords_regexes:
if re.search(
f"\\*\\*{rfc_2119_keyword_regex}\\*\\*", content
) is not None:
return rfc_2119_keyword_regex
def parsed_content_to_heirarchy(parsed_content):
'Turns a bunch of headline & content pairings into a tree of requirements'
content_tree = []
headline_stack = []
node = lambda l,h,c: {'level': l, 'headline': h, 'content': c, 'children': []}
for level, headline, content in parsed_content:
try:
if len(headline_stack) == 0: # top-most node
cur = node(level, headline, content)
content_tree.append(cur)
headline_stack.insert(0, [level, headline, cur])
elif len(headline_stack[0][0]) >= len(level): # Sibling or parent node
if len(headline_stack[0][0]) > len(level): # parent, right?
headline_stack.pop(0)
headline_stack.pop(0)
if len(headline_stack) == 0:
parent = content_tree
else:
parent = headline_stack[0][2]['children']
cur = node(level, headline, content)
parent.append(cur)
headline_stack.insert(0, [level, headline, cur])
elif len(level) > len(headline_stack[0][0]): # child node
# TODO: emit warning if headlines are too deep
cur = node(level, headline, content)
parent = headline_stack[0][2]
parent['children'].append(cur)
headline_stack.insert(0, [level, headline, cur])
else:
headline_stack.pop(0)
except Exception as k:
print(k);
# Specify a root so we know that everything is a node all the way down.
root = node(0, '', '')
root['children'] = content_tree
return content_tree_to_spec(root)
def gen_node(ct):
'given a content node, turn it into a requirements node'
headline = ct['headline']
content = ct['content']
keyword = find_rfc_2119_keyword(content)
req_group = re.search(r'(?P<req>(requirement|condition)[^\n]+)', headline, re.IGNORECASE)
if req_group is None:
return None
_id = req_group.groups()[0]
return {
'id': _id,
'clean id': re.sub(r"[^\w]", "_", _id.lower()),
'content': clean_content(content),
'RFC 2119 keyword': keyword,
'children': [],
}
def content_tree_to_spec(ct):
current = gen_node(ct)
children_grouped = [content_tree_to_spec(x) for x in ct['children']]
# Filter out potential None entries.
children = []
for _iter in children_grouped:
'''
So we might get a None (skip it), an object (add it to the list) or another list (merge it with list).
'''
if _iter is None:
continue
if type(_iter) == list:
children.extend(_iter)
else:
children.append(_iter)
if current is None:
if len(children) > 0:
return children
return
else:
current['children'] = children
return current
def parse(markdown_file_path):
with open(markdown_file_path, "r") as markdown_file:
content_finder = re.compile(r'^(?P<level>#+)(?P<headline>[^\n]+)(?P<rest>[^#]*)', re.MULTILINE)
parsed = content_finder.findall(markdown_file.read())
return parsed_content_to_heirarchy(parsed)
def write_json_specifications(requirements):
for md_absolute_file_path, requirement_sections in requirements.items():
with open(
"".join([splitext(md_absolute_file_path)[0], ".json"]), "w"
) as json_file:
json_file.write(dumps(requirement_sections, indent=4))
if __name__ == "__main__":
for markdown_file_path in find_markdown_file_paths(
join(abspath(curdir))
):
result = parse(markdown_file_path)
if result:
with open(
"".join([splitext(markdown_file_path)[0], ".json"]), "w"
) as json_file:
json_file.write(dumps(result, indent=4))
|
1656218
|
testinfra_hosts = ['instance-3', 'instance-4']
def test_node_is_worker(docker_info):
assert "Is Manager: false" in docker_info
|
1656266
|
from rlbench.backend.task import Task
from typing import List
from pyrep.objects.shape import Shape
from pyrep.objects.proximity_sensor import ProximitySensor
from rlbench.backend.conditions import GraspedCondition, DetectedCondition
class ScoopWithSpatula(Task):
def init_task(self) -> None:
spatula = Shape('scoop_with_spatula_spatula')
self.register_graspable_objects([spatula])
self.register_success_conditions([
DetectedCondition(Shape('Cuboid'), ProximitySensor('success')),
GraspedCondition(self.robot.gripper, spatula)
])
def init_episode(self, index: int) -> List[str]:
return ['scoop up the cube and lift it with the spatula',
'scoop up the block and lift it with the spatula',
'use the spatula to scoop the cube and lift it',
'use the spatula to scoop the block and lift it',
'pick up the cube using the spatula',
'pick up the block using the spatula']
def variation_count(self) -> int:
return 1
|
1656282
|
from abc import ABC, abstractmethod
import numpy as np
import logging
from ..ring_buffer import RingBuffer
pmm_logger = None
class BaseTrailingIndicator(ABC):
@classmethod
def logger(cls):
global pmm_logger
if pmm_logger is None:
pmm_logger = logging.getLogger(__name__)
return pmm_logger
def __init__(self, sampling_length: int = 30, processing_length: int = 15):
self._sampling_length = sampling_length
self._sampling_buffer = RingBuffer(sampling_length)
self._processing_length = processing_length
self._processing_buffer = RingBuffer(processing_length)
def add_sample(self, value: float):
self._sampling_buffer.add_value(value)
indicator_value = self._indicator_calculation()
self._processing_buffer.add_value(indicator_value)
@abstractmethod
def _indicator_calculation(self) -> float:
raise NotImplementedError
def _processing_calculation(self) -> float:
"""
Processing of the processing buffer to return final value.
Default behavior is buffer average
"""
return np.mean(self._processing_buffer.get_as_numpy_array())
@property
def current_value(self) -> float:
return self._processing_calculation()
@property
def is_sampling_buffer_full(self) -> bool:
return self._sampling_buffer.is_full
@property
def is_processing_buffer_full(self) -> bool:
return self._processing_buffer.is_full
|
1656299
|
import pika
import requests
class ExchangeReceiver(object):
def __init__(self, username, password, host, port, exchange, exchange_type, service, service_name, logger):
self.service_worker = service
self.service_name = service_name
self.exchange = exchange
self.logger = logger
credentials = pika.PlainCredentials(username, password)
connection = pika.BlockingConnection(pika.ConnectionParameters(host=host,
port=port,
credentials=credentials))
channel = connection.channel()
channel.exchange_declare(exchange=self.exchange, exchange_type=exchange_type)
result = channel.queue_declare(queue='', exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange=self.exchange, queue=queue_name)
channel.basic_consume(queue=queue_name, on_message_callback=self.on_request, auto_ack=True)
print("Awaiting requests from [x] " + self.exchange + " [x]")
channel.start_consuming()
def on_request(self, ch, method, props, body):
service_instance = self.service_worker()
if self.logger is not None:
params = {"correlation_id": '-',
"queue_name": self.exchange,
"service_name": self.service_name,
"task_type": 'start'
}
try:
requests.post(self.logger, json=params)
except requests.exceptions.RequestException as e:
print('Logger service is not available')
response, task_type = service_instance.call(body)
if self.logger is not None:
params = {"correlation_id": '-',
"queue_name": self.exchange,
"service_name": self.service_name,
"task_type": 'end'
}
try:
requests.post(self.logger, json=params)
except requests.exceptions.RequestException as e:
print('Logger service is not available')
print('Processed request:', task_type)
|
1656346
|
import requests
r = requests.post('INVOKE_URL/STAGE/RESOURCE_NAME',
headers={'x-api-key': 'API KEY'},
json={'test':'test'})
print(r.text)
|
1656419
|
import os
import logging
logger = logging.getLogger(__name__)
import numpy as np
import astropy.io.fits as fits
import matplotlib.pyplot as plt
from ...echelle.imageproc import combine_images
from ...echelle.trace import find_apertures, load_aperture_set
from .common import (get_bias, get_mask, correct_overscan,
fix_badpixels,
TraceFigure, BackgroundFigure,
)
def reduce_feros(config, logtable):
"""Reduce the single fiber data of FEROS.
Args:
config (:class:`configparser.ConfigParser`): Config object.
logtable (:class:`astropy.table.Table`): Table of observing log.
"""
# extract keywords from config file
section = config['data']
rawpath = section.get('rawpath')
statime_key = section.get('statime_key')
exptime_key = section.get('exptime_key')
direction = section.get('direction')
section = config['reduce']
midpath = section.get('midpath')
odspath = section.get('odspath')
figpath = section.get('figpath')
mode = section.get('mode')
fig_format = section.get('fig_format')
oned_suffix = section.get('oned_suffix')
ncores = section.get('ncores')
# create folders if not exist
if not os.path.exists(figpath): os.mkdir(figpath)
if not os.path.exists(odspath): os.mkdir(odspath)
if not os.path.exists(midpath): os.mkdir(midpath)
# determine number of cores to be used
if ncores == 'max':
ncores = os.cpu_count()
else:
ncores = min(os.cpu_count(), int(ncores))
################ parse bias ########################
bias, bias_card_lst = get_bias(config, logtable)
############### find flat groups ##################
# initialize flat_groups for single fiber
flat_groups = {}
# flat_groups = {'flat_M': [fileid1, fileid2, ...],
# 'flat_N': [fileid1, fileid2, ...]}
for logitem in logtable:
if logitem['object']=='FLAT' and logitem['binning']=='(1, 1)':
# find a proper name for this flat
flatname = '{:g}'.format(logitem['exptime'])
# add flatname to flat_groups
if flatname not in flat_groups:
flat_groups[flatname] = []
flat_groups[flatname].append(logitem)
################# Combine the flats and trace the orders ###################
# first combine the flats
for flatname, logitem_lst in flat_groups.items():
nflat = len(logitem_lst) # number of flat fieldings
flat_filename = os.path.join(midpath,
'flat_{}.fits'.format(flatname))
aperset_filename = os.path.join(midpath,
'trace_flat_{}.trc'.format(flatname))
aperset_regname = os.path.join(midpath,
'trace_flat_{}.reg'.format(flatname))
trace_figname = os.path.join(figpath,
'trace_flat_{}.{}'.format(flatname, fig_format))
# get flat_data and mask_array
if mode=='debug' and os.path.exists(flat_filename) \
and os.path.exists(aperset_filename):
pass
else:
# if the above conditions are not satisfied, comine each flat
data_lst = []
head_lst = []
exptime_lst = []
print('* Combine {} Flat Images: {}'.format(nflat, flat_filename))
fmt_str = ' - {:>7s} {:^23} {:^8s} {:^7} {:^8} {:^6}'
head_str = fmt_str.format('frameid', 'FileID', 'Object', 'exptime',
'N(sat)', 'Q95')
for iframe, logitem in enumerate(logitem_lst):
# read each individual flat frame
fname = 'FEROS.{}.fits'.format(logitem['fileid'])
filename = os.path.join(rawpath, fname)
data, head = fits.getdata(filename, header=True)
exptime_lst.append(head[exptime_key])
mask = get_mask(data, head)
sat_mask = (mask&4>0)
bad_mask = (mask&2>0)
if iframe == 0:
allmask = np.zeros_like(mask, dtype=np.int16)
allmask += sat_mask
# correct overscan for flat
data, card_lst = correct_overscan(data, head)
for key, value in card_lst:
head.append((key, value))
# correct bias for flat, if has bias
if bias is None:
message = 'No bias. skipped bias correction'
else:
data = data - bias
message = 'Bias corrected'
logger.info(message)
# print info
if iframe == 0:
print(head_str)
message = fmt_str.format(
'[{:d}]'.format(logitem['frameid']),
logitem['fileid'], logitem['object'],
logitem['exptime'],
logitem['nsat'], logitem['q95'])
print(message)
data_lst.append(data)
if nflat == 1:
flat_data = data_lst[0]
else:
data_lst = np.array(data_lst)
flat_data = combine_images(data_lst,
mode = 'mean',
upper_clip = 10,
maxiter = 5,
maskmode = (None, 'max')[nflat>3],
ncores = ncores,
)
fig = plt.figure(dpi=300)
ax = fig.gca()
ax.plot(flat_data[2166, 0:400],lw=0.5, color='C0')
# fix badpixels in flat
flat_data = fix_badpixels(flat_data, bad_mask)
ax.plot(flat_data[2166, 0:400],lw=0.5, color='C1')
plt.show()
# get mean exposure time and write it to header
head = fits.Header()
exptime = np.array(exptime_lst).mean()
head[exptime_key] = exptime
# find saturation mask
sat_mask = allmask > nflat/2.
flat_mask = np.int16(sat_mask)*4 + np.int16(bad_mask)*2
# get exposure time normalized flats
flat_norm = flat_data/exptime
# create the trace figure
tracefig = TraceFigure()
section = config['reduce.trace']
aperset = find_apertures(flat_data, flat_mask,
transpose = True,
scan_step = section.getint('scan_step'),
minimum = section.getfloat('minimum'),
separation = section.get('separation'),
align_deg = section.getint('align_deg'),
filling = section.getfloat('filling'),
degree = section.getint('degree'),
conv_core = 20,
display = section.getboolean('display'),
fig = tracefig,
)
# save the trace figure
tracefig.adjust_positions()
title = 'Trace for {}'.format(flat_filename)
tracefig.suptitle(title, fontsize=15)
tracefig.savefig(trace_figname)
aperset.save_txt(aperset_filename)
aperset.save_reg(aperset_regname)
# do the flat fielding
# prepare the output mid-prococess figures in debug mode
if mode=='debug':
figname = 'flat_aperpar_{}_%03d.{}'.format(
flatname, fig_format)
fig_aperpar = os.path.join(figpath, figname)
else:
fig_aperpar = None
# prepare the name for slit figure
figname = 'slit_flat_{}.{}'.format(flatname, fig_format)
fig_slit = os.path.join(figpath, figname)
# prepare the name for slit file
fname = 'slit_flat_{}.dat'.format(flatname)
slit_file = os.path.join(midpath, fname)
#section = config['reduce.flat']
# pack results and save to fits
hdu_lst = fits.HDUList([
fits.PrimaryHDU(flat_data, head),
fits.ImageHDU(flat_mask),
fits.ImageHDU(flat_norm),
#fits.ImageHDU(flat_sens),
#fits.BinTableHDU(flat_spec),
])
hdu_lst.writeto(flat_filename, overwrite=True)
# now flt_data and mask_array are prepared
|
1656424
|
import numpy as np
import pandas as pd
import datetime
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier, GradientBoostingClassifier
from sklearn.feature_selection import VarianceThreshold, f_regression, SelectKBest
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier, BernoulliRBM
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
print('Started at ' + str(datetime.datetime.now()))
data = pd.read_csv('~/Dropbox/replays/201803101025.csv', delimiter=',')
print(data.shape)
data = data.replace([np.inf, -np.inf], np.nan)
data = data.dropna()
data = data.loc[:, (data != data.iloc[0]).any()]
print(data.shape)
# print(np.all(np.isfinite(X_train)))
# print(np.any(np.isnan(X_train)))
# print(data.columns)
# print(data.dtypes)
y = data['label']
print(y.shape)
X = data.drop('label', axis=1)
min_max_scaler = preprocessing.MinMaxScaler()
np_scaled = min_max_scaler.fit_transform(X)
X = pd.DataFrame(np_scaled)
# sel = VarianceThreshold()
# X = sel.fit_transform(X)
#
# X = SelectKBest(f_regression, k=200).fit_transform(X, y)
# X = pd.DataFrame(X)
# pca = PCA()
# pca.fit(X)
# X = pca.transform(X)
print(X.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
# for row in X_train.iterrows():
# print(row)
# KNN
print('KNN')
classifier = KNeighborsClassifier(n_neighbors=30)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
# SVC
# 'linear' 0.7633
# 'poly' 0.4917
# 'rbf' 0.7512
# 'sigmoid' 0.7443
# print('SVC')
# classifier = SVC(verbose=True, kernel='linear')
# classifier.fit(X_train, y_train)
# y_pred = classifier.predict(X_test)
# GRADIENT BOOSTING
# print('Gradient Boosting')
# classifier = GradientBoostingClassifier()
# classifier.fit(X_train, y_train)
# y_pred = classifier.predict(X_test)
# ADABOOST
# print('Adaboost')
# classifier = AdaBoostClassifier()
# classifier.fit(X_train, y_train)
# y_pred = classifier.predict(X_test)
# RANDOM FOREST
# print('Random Forest')
# classifier = RandomForestClassifier()
# classifier.fit(X_train, y_train)
# y_pred = classifier.predict(X_test)
# NEURAL NET
# print('MLP')
# classifier = MLPClassifier(verbose=True, solver='adam', max_iter=10000)
# classifier.fit(X_train, y_train)
# y_pred = classifier.predict(X_test)
# NAIVE BAYES
# print("Naive Bayes")
# classifier = GaussianNB()
# classifier.fit(X_train, y_train)
# y_pred = classifier.predict(X_test)
# MULTINOMIAL NAIVE BAYES
# print('Multinomial Naive Bayes')
# classifier = MultinomialNB(fit_prior=False)
# classifier.fit(X_train, y_train)
# y_pred = classifier.predict(X_test)
# BERNOULLI NB
# print('Bernoulli Naive Bayes')
# classifier = BernoulliNB(binarize=0.1)
# classifier.fit(X_train, y_train)
# y_pred = classifier.predict(X_test)
# TREE
# print('Simple Decision Tree')
# classifier = DecisionTreeClassifier()
# classifier.fit(X_train, y_train)
# y_pred = classifier.predict(X_test)
# LOGREG 'newton-cg', 'lbfgs', 'liblinear', 'sag'
# print('Logistic Regression')
# classifier = LogisticRegression(penalty='lbfgs')
# classifier.fit(X_train, y_train)
# y_pred = classifier.predict(X_test)
# SGD
# print('SVM with Stochastic Gradient Descent')
# classifier = SGDClassifier(penalty='l1')
# classifier.fit(X_train, y_train)
# y_pred = classifier.predict(X_test)
acc = accuracy_score(y_test, y_pred)
print('acc: ' + str(acc))
conf = confusion_matrix(y_test, y_pred)
print(conf)
# classifier = KNeighborsClassifier(3)
# classifier = SVC(kernel="linear", C=0.025) # 0.767352703793
# classifier = DecisionTreeClassifier(max_depth=5) # 0.75464083938
# classifier = RandomForestClassifier(max_depth=5, n_estimators=100, max_features=10)
# classifier = MLPClassifier(alpha=1) # 0.758474576271
# classifier = AdaBoostClassifier() # 0.760088781275
# classifier = GaussianNB() # 0.604721549637
|
1656445
|
import random
import numpy as np
import torch.utils.data as data
from PIL import Image
def default_loader(path):
return Image.open(path).convert('RGB')
class Reader(data.Dataset):
def __init__(self, image_list, labels_list=[], transform=None, target_transform=None, use_cache=True,
loader=default_loader):
self.images = image_list
self.loader = loader
if len(labels_list) is not 0:
assert len(image_list) == len(labels_list)
self.labels = labels_list
else:
self.labels = False
self.transform = transform
self.target_transform = target_transform
self.cache = {}
self.use_cache = use_cache
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
if idx not in self.cache:
img = self.loader(self.images[idx])
if self.labels:
target = Image.open(self.labels[idx])
else:
target = None
else:
img, target = self.cache[idx]
if self.use_cache:
self.cache[idx] = (img, target)
seed = np.random.randint(2147483647)
random.seed(seed)
if self.transform is not None:
img = self.transform(img)
random.seed(seed)
if self.labels:
if self.target_transform is not None:
target = self.target_transform(target)
return np.array(img), np.array(target)
|
1656478
|
from tkinter import *
from BlurWindow.blurWindow import GlobalBlur
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import sys
from threading import Thread
class BlurBG(QWidget):
def __init__(self):
super(BlurBG, self).__init__()
self.setAttribute(Qt.WA_TranslucentBackground)
self.setWindowFlag(Qt.FramelessWindowHint)
self.resize(500, 400)
self.setStyleSheet("background-color: rgba(0, 0, 0, 0)")
def configure(e:Event):
janela.tkraise()
mw.move(e.x,e.y)
mw.resize(janela.winfo_width(),janela.winfo_height())
def showTk(e):
mw.show()
janela.tkraise()
janela = Tk()
janela.configure(bg='SystemTransparent')
janela.attributes("-transparent", True)
janela.geometry('200x200')
app = QApplication(sys.argv)
mw = BlurBG()
mw.show()
GlobalBlur(QWidget=mw,HWND=mw.winId())
app.processEvents()
janela.bind('<Configure>',configure)
janela.bind('<Unmap>', lambda e: mw.hide())
janela.bind('<Map>', showTk)
janela.mainloop()
|
1656486
|
import sys
import os
import pandas as pd
import seaborn as sns
STALE_THRESHOLD = 90 * 24 * 60 * 60 # 90 days in seconds
if len(sys.argv) < 2:
print("Path to a csv required as the first argument")
sys.exit(1)
OUTSIDER_ONLY = False
if len(sys.argv) == 3:
OUTSIDER_ONLY = sys.argv[2] == "--outsiders"
path = sys.argv[1]
data = pd.read_csv(path)
if OUTSIDER_ONLY:
data = data.drop(data[data['author'] == "MEMBER"].index)
data = data.drop(data[data['author'] == "OWNER"].index)
def is_stale(created, extracted):
return (extracted - created) > STALE_THRESHOLD
def classify_pr(pr_stats):
if pr_stats["state"] == "MERGED":
return "Successful"
elif pr_stats["state"] == "CLOSED":
return "Rejected"
elif pr_stats["state"] == "OPEN" and is_stale(
pr_stats["created_at"], pr_stats["extracted_at"]
):
return "Stale"
else:
return "Active"
pr_class = data.apply(classify_pr, axis=1)
data = data.assign(PR=pr_class)
plot_order = ["Successful", "Rejected", "Stale", "Active"]
plot = sns.countplot(
data=data, x="PR", palette="colorblind", order=plot_order
).get_figure()
repo_name = os.path.basename(path)
if repo_name.endswith(".csv"):
repo_name = repo_name[:-4]
plot.savefig(f"{repo_name}.png")
|
1656505
|
import io
import pytest
from tentaclio import urls
from tentaclio.clients import exceptions, s3_client
@pytest.fixture()
def mocked_conn(mocker):
with mocker.patch.object(s3_client.S3Client, "_connect", return_value=mocker.Mock()) as m:
yield m
class TestS3Client:
@pytest.mark.parametrize(
"url,username,password,hostname,path",
[
("s3://:@s3", None, None, None, ""),
("s3://public_key:private_key@s3", "public_key", "private_key", None, ""),
("s3://:@bucket", None, None, "bucket", ""),
("s3://:@bucket/prefix", None, None, "bucket", "prefix"),
],
)
def test_parsing_s3_url(self, url, username, password, hostname, path):
client = s3_client.S3Client(url)
assert client.aws_access_key_id == username
assert client.aws_secret_access_key == password
assert client.key_name == path
assert client.bucket == hostname
@pytest.mark.parametrize(
"url,bucket,key",
[("s3://:@s3", None, None), ("s3://:@s3", "bucket", None), ("s3://:@bucket", None, None)],
)
def test_get_invalid_path(self, url, bucket, key, mocked_conn):
with s3_client.S3Client(url) as client:
with pytest.raises(exceptions.S3Error):
client.get(io.StringIO(), bucket_name=bucket, key_name=key)
@pytest.mark.parametrize("url", [("s3:///"), ("s3://")])
def test_get_buckets(self, url, mocked_conn):
with s3_client.S3Client(url) as client:
expected_buckets = ("mocked-0", "mocked-1")
bucket_names = [{"Name": bucket} for bucket in expected_buckets]
client.conn.list_buckets.return_value = {"Buckets": bucket_names}
entries = client.scandir()
expected_urls = set([str(urls.URL("s3://" + bucket)) for bucket in expected_buckets])
result_urls = set([str(entry.url) for entry in entries])
assert result_urls == expected_urls
assert all([entry.is_dir for entry in entries])
class TestKeyLister(object):
def test_files(self, mocker):
client = mocker.MagicMock(bucket="bucket", key_name="deep/key")
client.conn.get_paginator().paginate.return_value = [
{"Contents": [{"Key": "deep/key/file0.txt"}, {"Key": "deep/key/file1.txt"}]}
]
expected_urls = set(["s3://bucket/deep/key/file1.txt", "s3://bucket/deep/key/file0.txt"])
lister = list(s3_client._KeyLister(client))
urls = [str(entry.url) for entry in lister]
assert set(urls) == expected_urls
assert all([entry.is_file for entry in lister])
def test_folders(self, mocker):
client = mocker.MagicMock(bucket="bucket", key_name="deep/key")
client.conn.get_paginator().paginate.return_value = [
{"CommonPrefixes": [{"Prefix": "deep/key/folder0/"}, {"Prefix": "deep/key/folder1/"}]}
]
expected_urls = set(["s3://bucket/deep/key/folder0", "s3://bucket/deep/key/folder1"])
lister = s3_client._KeyLister(client)
urls = [str(entry.url) for entry in lister]
assert set(urls) == expected_urls
assert all([entry.is_dir for entry in lister])
def test_multiple_pages(self, mocker):
client = mocker.MagicMock(bucket="bucket", key_name="deep/key")
client.conn.get_paginator().paginate.return_value = [
{
"CommonPrefixes": [
{"Prefix": "deep/key/folder0/"},
{"Prefix": "deep/key/folder1/"},
],
"Contents": [{"Key": "deep/key/file0.txt"}, {"Key": "deep/key/file1.txt"}],
},
{"Contents": [{"Key": "deep/key/file2.txt"}]},
]
expected_urls = set(
[
"s3://bucket/deep/key/folder0",
"s3://bucket/deep/key/folder1",
"s3://bucket/deep/key/file0.txt",
"s3://bucket/deep/key/file1.txt",
"s3://bucket/deep/key/file2.txt",
]
)
lister = s3_client._KeyLister(client)
urls = [str(entry.url) for entry in lister]
assert set(urls) == expected_urls
@pytest.mark.parametrize(
"url, expected",
(
("s3://bucket", urls.URL("s3://bucket/last_path")),
),
)
def test_build_url(url, expected, mocker):
client = s3_client.S3Client(url)
url = s3_client._build_url(client.bucket, "last_path")
assert url == expected
url = s3_client._build_url(client.bucket, "last_path/")
assert url == expected
|
1656527
|
from pyworkflow.node import VizNode, NodeException
from pyworkflow.parameters import *
import pandas as pd
import altair as alt
class GraphNode(VizNode):
"""Displays a pandas DataFrame in a visual graph.
Raises:
NodeException: any error generating Altair Chart.
"""
name = "Graph Node"
num_in = 1
num_out = 0
OPTIONS = {
"graph_type": SelectParameter(
"Graph Type",
options=["area", "bar", "line", "point"],
default="bar",
docstring="Graph viz type"
),
"mark_options": BooleanParameter(
"Specify mark options",
default=False,
docstring="Specify mark options"
),
"width": IntegerParameter(
"Mark width",
default=10,
docstring="Width of marks"
),
"height": IntegerParameter(
"Mark height",
default=10,
docstring="Height of marks"
),
"encode_options": BooleanParameter(
"Specify encoding options",
default=True,
docstring="Specify encoding options"
),
"x_axis": StringParameter(
"X-Axis",
default="a",
docstring="X-axis values"
),
"y_axis": StringParameter(
"Y-Axis",
default="average(b)",
docstring="Y-axis values"
)
}
def execute(self, predecessor_data, flow_vars):
try:
df = pd.DataFrame.from_dict(predecessor_data[0])
if flow_vars["mark_options"].get_value():
mark_options = {
"height": flow_vars["height"].get_value(),
"width": flow_vars["width"].get_value(),
}
else:
mark_options = {}
if flow_vars["encode_options"].get_value():
encode_options = {
"x": flow_vars["x_axis"].get_value(),
"y": flow_vars["y_axis"].get_value(),
}
else:
encode_options = {}
graph_type = flow_vars["graph_type"].get_value()
# Generate requested chart with options
if graph_type == "area":
chart = alt.Chart(df).mark_area(**mark_options).encode(**encode_options)
elif graph_type == "bar":
chart = alt.Chart(df).mark_bar(**mark_options).encode(**encode_options)
elif graph_type == "line":
chart = alt.Chart(df).mark_line(**mark_options).encode(**encode_options)
elif graph_type == "point":
chart = alt.Chart(df).mark_point(**mark_options).encode(**encode_options)
else:
chart = None
return chart.to_json()
except Exception as e:
print(e)
raise NodeException('graph node', str(e))
|
1656532
|
from py4web import action, request
# pls, run socketio server - look at utils/wsservers.py
# test example for python-socketio
@action("socketio/index")
@action.uses("socketio/index.html")
def index():
return dict()
@action('socketio/echo/<path:path>', method=["GET", "POST"])
def echo(path=None):
print (path)
print ('GET from sio-server')
|
1656555
|
from diagrams import Diagram
from diagrams.generic.blank import Blank
with Diagram("example_dag", show=False):
run_this_1 = Blank("run_this_1")
run_this_2a = Blank("run_this_2a")
run_this_3 = Blank("run_this_3")
run_this_2b = Blank("run_this_2b")
run_this_1 >> run_this_2a
run_this_2a >> run_this_3
run_this_1 >> run_this_2b
run_this_2b >> run_this_3
|
1656580
|
import glob
import pickle
import os
import tqdm
data_type=1
test_set=0
processed_data_dir = "../data/raw_data/ann_data_roberta-base_512/"
trec_save_path = glob.glob(f"data-type-{data_type}_test-set-{test_set}_ckpt-*.trec")
with open(os.path.join(processed_data_dir,'qid2offset.pickle'),'rb') as f:
qid2offset = pickle.load(f)
offset2qid = {}
for k in qid2offset:
offset2qid[qid2offset[k]]=k
with open(os.path.join(processed_data_dir,'pid2offset.pickle'),'rb') as f:
pid2offset = pickle.load(f)
offset2pid = {}
for k in pid2offset:
offset2pid[pid2offset[k]]=k
#for k in offset2qid:
# print(k,offset2qid[k])
for path in tqdm.tqdm(trec_save_path):
with open(path) as f:
lines=f.readlines()
with open(path.replace(".trec",".formatted.trec"),"w") as f:
for line in lines:
qid , Q0, pid, rank, score, tag = line.strip().split(' ')
# print(offset2qid[int(qid)] , Q0, pid, rank, score.replace('-',''), tag)
if data_type==0:
f.write(f"{offset2qid[int(qid)]} {Q0} D{offset2pid[int(pid)]} {rank} {score.replace('-','')} {tag}\n")
else:
f.write(f"{offset2qid[int(qid)]} {Q0} {offset2pid[int(pid)]} {rank} {score.replace('-','')} {tag}\n")
# break
|
1656596
|
from ckan.plugins import toolkit as tk
def archiver_resource_show(resource_id):
data_dict = {'id': resource_id}
return tk.get_action('archiver_resource_show')(data_dict)
def archiver_is_resource_broken_html(resource):
archival = resource.get('archiver')
if not archival:
return tk.literal('<!-- No archival info for this resource -->')
extra_vars = {'resource': resource}
extra_vars.update(archival)
return tk.literal(
tk.render('archiver/is_resource_broken.html',
extra_vars=extra_vars))
def archiver_is_resource_cached_html(resource):
archival = resource.get('archiver')
if not archival:
return tk.literal('<!-- No archival info for this resource -->')
extra_vars = {'resource': resource}
extra_vars.update(archival)
return tk.literal(
tk.render('archiver/is_resource_cached.html',
extra_vars=extra_vars))
# Replacement for the core ckan helper 'format_resource_items'
# but with our own blacklist
def archiver_format_resource_items(items):
blacklist = ['archiver', 'qa']
items_ = [item for item in items
if item[0] not in blacklist]
import ckan.lib.helpers as ckan_helpers
return ckan_helpers.format_resource_items(items_)
|
1656597
|
from django import forms
from django.conf import settings
from django.contrib.admin import widgets
from django.utils.translation import gettext_lazy as _
class ExportDBForm(forms.Form):
from_date = forms.DateField(label=_('from date'),
widget=widgets.AdminDateWidget)
to_date = forms.DateField(label=_('to date'),
widget=widgets.AdminDateWidget)
def clean(self):
cleaned_data = super(ExportDBForm, self).clean()
frm, to = cleaned_data.get('from_date'), cleaned_data.get('to_date')
if frm and to:
if to < frm:
raise forms.ValidationError(
_('The to date must be later than the from date'))
diff = to - frm
if diff.days > settings.EXPORT_MAX_DAYS:
raise forms.ValidationError(
_(
'The delta between from and to date is limited to %d days') % settings.EXPORT_MAX_DAYS
)
return cleaned_data
|
1656634
|
import torch
import torch.nn as nn
from lie_conv.utils import Expression
class ResidualBlock(nn.Module):
def __init__(self, module, dim=None):
super().__init__()
self.module = module
self.dim = dim
def forward(self, input):
if self.dim is None:
return input + self.module(input)
else:
input[self.dim] = self.module(input)[self.dim]
return input
class GroupLift(nn.Module):
def __init__(self, group, liftsamples=1):
super().__init__()
self.group = group
self.liftsamples = liftsamples
def forward(self, x):
return self.group.lift(x, self.liftsamples)
class GlobalPool(nn.Module):
"""computes values reduced over all spatial locations (& group elements) in the mask"""
def __init__(self, mean=False):
super().__init__()
self.mean = mean
def forward(self, x):
"""x [xyz (bs,n,d), vals (bs,n,c), mask (bs,n)]"""
if len(x) == 2:
return x[1].mean(1)
coords, vals, mask = x
summed = torch.where(mask.unsqueeze(-1), vals, torch.zeros_like(vals)).sum(1)
if self.mean:
summed_mask = mask.sum(-1).unsqueeze(-1).clamp(min=1)
summed /= summed_mask
return summed
def Swish():
return Expression(lambda x: x * torch.sigmoid(x))
|
1656683
|
from setuptools import setup, find_packages
def get_version(path):
""" Parse the version number variable __version__ from a script. """
import re
string = open(path).read()
version_re = r"^__version__ = ['\"]([^'\"]*)['\"]"
version_str = re.search(version_re, string, re.M).group(1)
return version_str
setup(
name = 'grocsvs',
version = get_version("src/grocsvs/__init__.py"),
packages = find_packages('src'),
package_dir = {"": "src"},
entry_points = {
'console_scripts' : ["grocsvs = grocsvs.main:main"]
},
install_requires = ["admiral", "h5py", "networkx>=2.0", "pandas", "pybedtools",
"pyfaidx", "pysam>=0.10.0", "scipy", "ipython-cluster-helper",
"pygraphviz", "psutil"],
)
|
1656694
|
import numpy as np
import pandas as pd
import io
from sklearn.utils.validation import check_array, column_or_1d, check_consistent_length
from sklearn.utils import assert_all_finite
from sklearn.utils.multiclass import type_of_target
from ._utils import _assure_2d_array
class DoubleMLData:
"""Double machine learning data-backend.
:class:`DoubleMLData` objects can be initialized from
:class:`pandas.DataFrame`'s as well as :class:`numpy.ndarray`'s.
Parameters
----------
data : :class:`pandas.DataFrame`
The data.
y_col : str
The outcome variable.
d_cols : str or list
The treatment variable(s).
x_cols : None, str or list
The covariates.
If ``None``, all variables (columns of ``data``) which are neither specified as outcome variable ``y_col``, nor
treatment variables ``d_cols``, nor instrumental variables ``z_cols`` are used as covariates.
Default is ``None``.
z_cols : None, str or list
The instrumental variable(s).
Default is ``None``.
use_other_treat_as_covariate : bool
Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates.
Default is ``True``.
force_all_x_finite : bool or str
Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``.
Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are
allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed).
Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used
for the nuisance functions are capable to provide valid predictions with missings and / or infinite values
in the covariates ``x``.
Default is ``True``.
Examples
--------
>>> from doubleml import DoubleMLData
>>> from doubleml.datasets import make_plr_CCDDHNR2018
>>> # initialization from pandas.DataFrame
>>> df = make_plr_CCDDHNR2018(return_type='DataFrame')
>>> obj_dml_data_from_df = DoubleMLData(df, 'y', 'd')
>>> # initialization from np.ndarray
>>> (x, y, d) = make_plr_CCDDHNR2018(return_type='array')
>>> obj_dml_data_from_array = DoubleMLData.from_arrays(x, y, d)
"""
def __init__(self,
data,
y_col,
d_cols,
x_cols=None,
z_cols=None,
use_other_treat_as_covariate=True,
force_all_x_finite=True):
if not isinstance(data, pd.DataFrame):
raise TypeError('data must be of pd.DataFrame type. '
f'{str(data)} of type {str(type(data))} was passed.')
if not data.columns.is_unique:
raise ValueError('Invalid pd.DataFrame: '
'Contains duplicate column names.')
self._data = data
self.y_col = y_col
self.d_cols = d_cols
self.z_cols = z_cols
self.x_cols = x_cols
self._check_disjoint_sets_y_d_x_z()
self.use_other_treat_as_covariate = use_other_treat_as_covariate
self.force_all_x_finite = force_all_x_finite
self._binary_treats = self._check_binary_treats()
self._set_y_z()
# by default, we initialize to the first treatment variable
self.set_x_d(self.d_cols[0])
def __str__(self):
data_info = f'Outcome variable: {self.y_col}\n' \
f'Treatment variable(s): {self.d_cols}\n' \
f'Covariates: {self.x_cols}\n' \
f'Instrument variable(s): {self.z_cols}\n' \
f'No. Observations: {self.n_obs}\n'
buf = io.StringIO()
self.data.info(verbose=False, buf=buf)
df_info = buf.getvalue()
res = '================== DoubleMLData Object ==================\n' + \
'\n------------------ Data summary ------------------\n' + data_info + \
'\n------------------ DataFrame info ------------------\n' + df_info
return res
@classmethod
def from_arrays(cls, x, y, d, z=None, use_other_treat_as_covariate=True,
force_all_x_finite=True):
"""
Initialize :class:`DoubleMLData` from :class:`numpy.ndarray`'s.
Parameters
----------
x : :class:`numpy.ndarray`
Array of covariates.
y : :class:`numpy.ndarray`
Array of the outcome variable.
d : :class:`numpy.ndarray`
Array of treatment variables.
z : None or :class:`numpy.ndarray`
Array of instrumental variables.
Default is ``None``.
use_other_treat_as_covariate : bool
Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates.
Default is ``True``.
force_all_x_finite : bool or str
Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``.
Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are
allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed).
Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used
for the nuisance functions are capable to provide valid predictions with missings and / or infinite values
in the covariates ``x``.
Default is ``True``.
Examples
--------
>>> from doubleml import DoubleMLData
>>> from doubleml.datasets import make_plr_CCDDHNR2018
>>> (x, y, d) = make_plr_CCDDHNR2018(return_type='array')
>>> obj_dml_data_from_array = DoubleMLData.from_arrays(x, y, d)
"""
if isinstance(force_all_x_finite, str):
if force_all_x_finite != 'allow-nan':
raise ValueError("Invalid force_all_x_finite " + force_all_x_finite + ". " +
"force_all_x_finite must be True, False or 'allow-nan'.")
elif not isinstance(force_all_x_finite, bool):
raise TypeError("Invalid force_all_x_finite. " +
"force_all_x_finite must be True, False or 'allow-nan'.")
x = check_array(x, ensure_2d=False, allow_nd=False,
force_all_finite=force_all_x_finite)
d = check_array(d, ensure_2d=False, allow_nd=False)
y = column_or_1d(y, warn=True)
x = _assure_2d_array(x)
d = _assure_2d_array(d)
y_col = 'y'
if z is None:
check_consistent_length(x, y, d)
z_cols = None
else:
z = check_array(z, ensure_2d=False, allow_nd=False)
z = _assure_2d_array(z)
check_consistent_length(x, y, d, z)
if z.shape[1] == 1:
z_cols = ['z']
else:
z_cols = [f'z{i + 1}' for i in np.arange(z.shape[1])]
if d.shape[1] == 1:
d_cols = ['d']
else:
d_cols = [f'd{i+1}' for i in np.arange(d.shape[1])]
x_cols = [f'X{i+1}' for i in np.arange(x.shape[1])]
if z is None:
data = pd.DataFrame(np.column_stack((x, y, d)),
columns=x_cols + [y_col] + d_cols)
else:
data = pd.DataFrame(np.column_stack((x, y, d, z)),
columns=x_cols + [y_col] + d_cols + z_cols)
return cls(data, y_col, d_cols, x_cols, z_cols, use_other_treat_as_covariate, force_all_x_finite)
@property
def data(self):
"""
The data.
"""
return self._data
@property
def x(self):
"""
Array of covariates;
Dynamic! May depend on the currently set treatment variable;
To get an array of all covariates (independent of the currently set treatment variable)
call ``obj.data[obj.x_cols].values``.
"""
return self._X.values
@property
def y(self):
"""
Array of outcome variable.
"""
return self._y.values
@property
def d(self):
"""
Array of treatment variable;
Dynamic! Depends on the currently set treatment variable;
To get an array of all treatment variables (independent of the currently set treatment variable)
call ``obj.data[obj.d_cols].values``.
"""
return self._d.values
@property
def z(self):
"""
Array of instrumental variables.
"""
if self.z_cols is not None:
return self._z.values
else:
return None
@property
def all_variables(self):
"""
All variables available in the dataset.
"""
return self.data.columns
@property
def n_treat(self):
"""
The number of treatment variables.
"""
return len(self.d_cols)
@property
def n_instr(self):
"""
The number of instruments.
"""
if self.z_cols is not None:
n_instr = len(self.z_cols)
else:
n_instr = 0
return n_instr
@property
def n_obs(self):
"""
The number of observations.
"""
return self.data.shape[0]
@property
def binary_treats(self):
"""
Series with logical(s) indicating whether the treatment variable(s) are binary with values 0 and 1.
"""
return self._binary_treats
@property
def x_cols(self):
"""
The covariates.
"""
return self._x_cols
@x_cols.setter
def x_cols(self, value):
reset_value = hasattr(self, '_x_cols')
if value is not None:
if isinstance(value, str):
value = [value]
if not isinstance(value, list):
raise TypeError('The covariates x_cols must be of str or list type (or None). '
f'{str(value)} of type {str(type(value))} was passed.')
if not len(set(value)) == len(value):
raise ValueError('Invalid covariates x_cols: '
'Contains duplicate values.')
if not set(value).issubset(set(self.all_variables)):
raise ValueError('Invalid covariates x_cols. '
'At least one covariate is no data column.')
assert set(value).issubset(set(self.all_variables))
self._x_cols = value
else:
# x_cols defaults to all columns but y_col, d_cols and z_cols
if self.z_cols is not None:
y_d_z = set.union({self.y_col}, set(self.d_cols), set(self.z_cols))
x_cols = [col for col in self.data.columns if col not in y_d_z]
else:
y_d = set.union({self.y_col}, set(self.d_cols))
x_cols = [col for col in self.data.columns if col not in y_d]
self._x_cols = x_cols
if reset_value:
self._check_disjoint_sets()
# by default, we initialize to the first treatment variable
self.set_x_d(self.d_cols[0])
@property
def d_cols(self):
"""
The treatment variable(s).
"""
return self._d_cols
@d_cols.setter
def d_cols(self, value):
reset_value = hasattr(self, '_d_cols')
if isinstance(value, str):
value = [value]
if not isinstance(value, list):
raise TypeError('The treatment variable(s) d_cols must be of str or list type. '
f'{str(value)} of type {str(type(value))} was passed.')
if not len(set(value)) == len(value):
raise ValueError('Invalid treatment variable(s) d_cols: '
'Contains duplicate values.')
if not set(value).issubset(set(self.all_variables)):
raise ValueError('Invalid treatment variable(s) d_cols. '
'At least one treatment variable is no data column.')
self._d_cols = value
if reset_value:
self._check_disjoint_sets()
# by default, we initialize to the first treatment variable
self.set_x_d(self.d_cols[0])
@property
def y_col(self):
"""
The outcome variable.
"""
return self._y_col
@y_col.setter
def y_col(self, value):
reset_value = hasattr(self, '_y_col')
if not isinstance(value, str):
raise TypeError('The outcome variable y_col must be of str type. '
f'{str(value)} of type {str(type(value))} was passed.')
if value not in self.all_variables:
raise ValueError('Invalid outcome variable y_col. '
f'{value} is no data column.')
self._y_col = value
if reset_value:
self._check_disjoint_sets()
self._set_y_z()
@property
def z_cols(self):
"""
The instrumental variable(s).
"""
return self._z_cols
@z_cols.setter
def z_cols(self, value):
reset_value = hasattr(self, '_z_cols')
if value is not None:
if isinstance(value, str):
value = [value]
if not isinstance(value, list):
raise TypeError('The instrumental variable(s) z_cols must be of str or list type (or None). '
f'{str(value)} of type {str(type(value))} was passed.')
if not len(set(value)) == len(value):
raise ValueError('Invalid instrumental variable(s) z_cols: '
'Contains duplicate values.')
if not set(value).issubset(set(self.all_variables)):
raise ValueError('Invalid instrumental variable(s) z_cols. '
'At least one instrumental variable is no data column.')
self._z_cols = value
else:
self._z_cols = None
if reset_value:
self._check_disjoint_sets()
self._set_y_z()
@property
def use_other_treat_as_covariate(self):
"""
Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates.
"""
return self._use_other_treat_as_covariate
@use_other_treat_as_covariate.setter
def use_other_treat_as_covariate(self, value):
reset_value = hasattr(self, '_use_other_treat_as_covariate')
if not isinstance(value, bool):
raise TypeError('use_other_treat_as_covariate must be True or False. '
f'Got {str(value)}.')
self._use_other_treat_as_covariate = value
if reset_value:
# by default, we initialize to the first treatment variable
self.set_x_d(self.d_cols[0])
@property
def force_all_x_finite(self):
"""
Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``.
"""
return self._force_all_x_finite
@force_all_x_finite.setter
def force_all_x_finite(self, value):
reset_value = hasattr(self, '_force_all_x_finite')
if isinstance(value, str):
if value != 'allow-nan':
raise ValueError("Invalid force_all_x_finite " + value + ". " +
"force_all_x_finite must be True, False or 'allow-nan'.")
elif not isinstance(value, bool):
raise TypeError("Invalid force_all_x_finite. " +
"force_all_x_finite must be True, False or 'allow-nan'.")
self._force_all_x_finite = value
if reset_value:
# by default, we initialize to the first treatment variable
self.set_x_d(self.d_cols[0])
def _set_y_z(self):
assert_all_finite(self.data.loc[:, self.y_col])
self._y = self.data.loc[:, self.y_col]
if self.z_cols is None:
self._z = None
else:
assert_all_finite(self.data.loc[:, self.z_cols])
self._z = self.data.loc[:, self.z_cols]
def set_x_d(self, treatment_var):
"""
Function that assigns the role for the treatment variables in the multiple-treatment case.
Parameters
----------
treatment_var : str
Active treatment variable that will be set to d.
"""
if not isinstance(treatment_var, str):
raise TypeError('treatment_var must be of str type. '
f'{str(treatment_var)} of type {str(type(treatment_var))} was passed.')
if treatment_var not in self.d_cols:
raise ValueError('Invalid treatment_var. '
f'{treatment_var} is not in d_cols.')
if self.use_other_treat_as_covariate:
# note that the following line needs to be adapted in case an intersection of x_cols and d_cols as allowed
# (see https://github.com/DoubleML/doubleml-for-py/issues/83)
xd_list = self.x_cols + self.d_cols
xd_list.remove(treatment_var)
else:
xd_list = self.x_cols
assert_all_finite(self.data.loc[:, treatment_var])
if self.force_all_x_finite:
assert_all_finite(self.data.loc[:, xd_list],
allow_nan=self.force_all_x_finite == 'allow-nan')
self._d = self.data.loc[:, treatment_var]
self._X = self.data.loc[:, xd_list]
def _check_binary_treats(self):
is_binary = pd.Series(dtype=bool, index=self.d_cols)
for treatment_var in self.d_cols:
this_d = self.data.loc[:, treatment_var]
binary_treat = (type_of_target(this_d) == 'binary')
zero_one_treat = np.all((np.power(this_d, 2) - this_d) == 0)
is_binary[treatment_var] = (binary_treat & zero_one_treat)
return is_binary
def _check_disjoint_sets(self):
# this function can be extended in inherited subclasses
self._check_disjoint_sets_y_d_x_z()
def _check_disjoint_sets_y_d_x_z(self):
y_col_set = {self.y_col}
x_cols_set = set(self.x_cols)
d_cols_set = set(self.d_cols)
if not y_col_set.isdisjoint(x_cols_set):
raise ValueError(f'{str(self.y_col)} cannot be set as outcome variable ``y_col`` and covariate in '
'``x_cols``.')
if not y_col_set.isdisjoint(d_cols_set):
raise ValueError(f'{str(self.y_col)} cannot be set as outcome variable ``y_col`` and treatment variable in '
'``d_cols``.')
# note that the line xd_list = self.x_cols + self.d_cols in method set_x_d needs adaption if an intersection of
# x_cols and d_cols as allowed (see https://github.com/DoubleML/doubleml-for-py/issues/83)
if not d_cols_set.isdisjoint(x_cols_set):
raise ValueError('At least one variable/column is set as treatment variable (``d_cols``) and as covariate'
'(``x_cols``). Consider using parameter ``use_other_treat_as_covariate``.')
if self.z_cols is not None:
z_cols_set = set(self.z_cols)
if not y_col_set.isdisjoint(z_cols_set):
raise ValueError(f'{str(self.y_col)} cannot be set as outcome variable ``y_col`` and instrumental '
'variable in ``z_cols``.')
if not d_cols_set.isdisjoint(z_cols_set):
raise ValueError('At least one variable/column is set as treatment variable (``d_cols``) and '
'instrumental variable in ``z_cols``.')
if not x_cols_set.isdisjoint(z_cols_set):
raise ValueError('At least one variable/column is set as covariate (``x_cols``) and instrumental '
'variable in ``z_cols``.')
class DoubleMLClusterData(DoubleMLData):
"""Double machine learning data-backend for data with cluster variables.
:class:`DoubleMLClusterData` objects can be initialized from
:class:`pandas.DataFrame`'s as well as :class:`numpy.ndarray`'s.
Parameters
----------
data : :class:`pandas.DataFrame`
The data.
y_col : str
The outcome variable.
d_cols : str or list
The treatment variable(s).
cluster_cols : str or list
The cluster variable(s).
x_cols : None, str or list
The covariates.
If ``None``, all variables (columns of ``data``) which are neither specified as outcome variable ``y_col``, nor
treatment variables ``d_cols``, nor instrumental variables ``z_cols`` are used as covariates.
Default is ``None``.
z_cols : None, str or list
The instrumental variable(s).
Default is ``None``.
use_other_treat_as_covariate : bool
Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates.
Default is ``True``.
force_all_x_finite : bool or str
Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``.
Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are
allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed).
Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used
for the nuisance functions are capable to provide valid predictions with missings and / or infinite values
in the covariates ``x``.
Default is ``True``.
Examples
--------
>>> from doubleml import DoubleMLClusterData
>>> from doubleml.datasets import make_pliv_multiway_cluster_CKMS2021
>>> # initialization from pandas.DataFrame
>>> df = make_pliv_multiway_cluster_CKMS2021(return_type='DataFrame')
>>> obj_dml_data_from_df = DoubleMLClusterData(df, 'Y', 'D', ['cluster_var_i', 'cluster_var_j'], z_cols='Z')
>>> # initialization from np.ndarray
>>> (x, y, d, cluster_vars, z) = make_pliv_multiway_cluster_CKMS2021(return_type='array')
>>> obj_dml_data_from_array = DoubleMLClusterData.from_arrays(x, y, d, cluster_vars, z)
"""
def __init__(self,
data,
y_col,
d_cols,
cluster_cols,
x_cols=None,
z_cols=None,
use_other_treat_as_covariate=True,
force_all_x_finite=True):
# we need to set cluster_cols (needs _data) before call to the super __init__ because of the x_cols setter
if not isinstance(data, pd.DataFrame):
raise TypeError('data must be of pd.DataFrame type. '
f'{str(data)} of type {str(type(data))} was passed.')
if not data.columns.is_unique:
raise ValueError('Invalid pd.DataFrame: '
'Contains duplicate column names.')
self._data = data
self.cluster_cols = cluster_cols
self._set_cluster_vars()
super().__init__(data,
y_col,
d_cols,
x_cols,
z_cols,
use_other_treat_as_covariate,
force_all_x_finite)
self._check_disjoint_sets_cluster_cols()
def __str__(self):
data_info = f'Outcome variable: {self.y_col}\n' \
f'Treatment variable(s): {self.d_cols}\n' \
f'Cluster variable(s): {self.cluster_cols}\n' \
f'Covariates: {self.x_cols}\n' \
f'Instrument variable(s): {self.z_cols}\n' \
f'No. Observations: {self.n_obs}\n'
buf = io.StringIO()
self.data.info(verbose=False, buf=buf)
df_info = buf.getvalue()
res = '================== DoubleMLClusterData Object ==================\n' + \
'\n------------------ Data summary ------------------\n' + data_info + \
'\n------------------ DataFrame info ------------------\n' + df_info
return res
@classmethod
def from_arrays(cls, x, y, d, cluster_vars, z=None, use_other_treat_as_covariate=True,
force_all_x_finite=True):
"""
Initialize :class:`DoubleMLClusterData` from :class:`numpy.ndarray`'s.
Parameters
----------
x : :class:`numpy.ndarray`
Array of covariates.
y : :class:`numpy.ndarray`
Array of the outcome variable.
d : :class:`numpy.ndarray`
Array of treatment variables.
cluster_vars : :class:`numpy.ndarray`
Array of cluster variables.
z : None or :class:`numpy.ndarray`
Array of instrumental variables.
Default is ``None``.
use_other_treat_as_covariate : bool
Indicates whether in the multiple-treatment case the other treatment variables should be added as covariates.
Default is ``True``.
force_all_x_finite : bool or str
Indicates whether to raise an error on infinite values and / or missings in the covariates ``x``.
Possible values are: ``True`` (neither missings ``np.nan``, ``pd.NA`` nor infinite values ``np.inf`` are
allowed), ``False`` (missings and infinite values are allowed), ``'allow-nan'`` (only missings are allowed).
Note that the choice ``False`` and ``'allow-nan'`` are only reasonable if the machine learning methods used
for the nuisance functions are capable to provide valid predictions with missings and / or infinite values
in the covariates ``x``.
Default is ``True``.
Examples
--------
>>> from doubleml import DoubleMLClusterData
>>> from doubleml.datasets import make_pliv_multiway_cluster_CKMS2021
>>> (x, y, d, cluster_vars, z) = make_pliv_multiway_cluster_CKMS2021(return_type='array')
>>> obj_dml_data_from_array = DoubleMLClusterData.from_arrays(x, y, d, cluster_vars, z)
"""
dml_data = DoubleMLData.from_arrays(x, y, d, z, use_other_treat_as_covariate, force_all_x_finite)
cluster_vars = check_array(cluster_vars, ensure_2d=False, allow_nd=False)
cluster_vars = _assure_2d_array(cluster_vars)
if cluster_vars.shape[1] == 1:
cluster_cols = ['cluster_var']
else:
cluster_cols = [f'cluster_var{i + 1}' for i in np.arange(cluster_vars.shape[1])]
data = pd.concat((pd.DataFrame(cluster_vars, columns=cluster_cols), dml_data.data), axis=1)
return(cls(data, dml_data.y_col, dml_data.d_cols,
cluster_cols,
dml_data.x_cols, dml_data.z_cols,
dml_data.use_other_treat_as_covariate, dml_data.force_all_x_finite))
@property
def cluster_cols(self):
"""
The cluster variable(s).
"""
return self._cluster_cols
@cluster_cols.setter
def cluster_cols(self, value):
reset_value = hasattr(self, '_cluster_cols')
if isinstance(value, str):
value = [value]
if not isinstance(value, list):
raise TypeError('The cluster variable(s) cluster_cols must be of str or list type. '
f'{str(value)} of type {str(type(value))} was passed.')
if not len(set(value)) == len(value):
raise ValueError('Invalid cluster variable(s) cluster_cols: '
'Contains duplicate values.')
if not set(value).issubset(set(self.all_variables)):
raise ValueError('Invalid cluster variable(s) cluster_cols. '
'At least one cluster variable is no data column.')
self._cluster_cols = value
if reset_value:
self._check_disjoint_sets()
self._set_cluster_vars()
@property
def n_cluster_vars(self):
"""
The number of cluster variables.
"""
return len(self.cluster_cols)
@property
def cluster_vars(self):
"""
Array of cluster variable(s).
"""
return self._cluster_vars.values
@DoubleMLData.x_cols.setter
def x_cols(self, value):
if value is not None:
# this call might become much easier with https://github.com/python/cpython/pull/26194
super(self.__class__, self.__class__).x_cols.__set__(self, value)
else:
if self.z_cols is not None:
y_d_z = set.union({self.y_col}, set(self.d_cols), set(self.z_cols), set(self.cluster_cols))
x_cols = [col for col in self.data.columns if col not in y_d_z]
else:
y_d = set.union({self.y_col}, set(self.d_cols), set(self.cluster_cols))
x_cols = [col for col in self.data.columns if col not in y_d]
# this call might become much easier with https://github.com/python/cpython/pull/26194
super(self.__class__, self.__class__).x_cols.__set__(self, x_cols)
def _check_disjoint_sets(self):
# apply the standard checks from the DoubleMLData class
super(DoubleMLClusterData, self)._check_disjoint_sets()
self._check_disjoint_sets_cluster_cols()
def _check_disjoint_sets_cluster_cols(self):
# apply the standard checks from the DoubleMLData class
super(DoubleMLClusterData, self)._check_disjoint_sets()
# special checks for the additional cluster variables
cluster_cols_set = set(self.cluster_cols)
y_col_set = {self.y_col}
x_cols_set = set(self.x_cols)
d_cols_set = set(self.d_cols)
if not y_col_set.isdisjoint(cluster_cols_set):
raise ValueError(f'{str(self.y_col)} cannot be set as outcome variable ``y_col`` and cluster '
'variable in ``cluster_cols``.')
if not d_cols_set.isdisjoint(cluster_cols_set):
raise ValueError('At least one variable/column is set as treatment variable (``d_cols``) and '
'cluster variable in ``cluster_cols``.')
# TODO: Is the following combination allowed, or not?
if not x_cols_set.isdisjoint(cluster_cols_set):
raise ValueError('At least one variable/column is set as covariate (``x_cols``) and cluster '
'variable in ``cluster_cols``.')
if self.z_cols is not None:
z_cols_set = set(self.z_cols)
if not z_cols_set.isdisjoint(cluster_cols_set):
raise ValueError('At least one variable/column is set as instrumental variable (``z_cols``) and '
'cluster variable in ``cluster_cols``.')
def _set_cluster_vars(self):
assert_all_finite(self.data.loc[:, self.cluster_cols])
self._cluster_vars = self.data.loc[:, self.cluster_cols]
|
1656725
|
from tlxzoo.datasets import DataLoaders
from tlxzoo.module.wav2vec2 import Wav2Vec2Transform
from tlxzoo.speech.automatic_speech_recognition import AutomaticSpeechRecognition
import tensorlayerx as tlx
import numpy as np
if __name__ == '__main__':
transform = Wav2Vec2Transform(vocab_file="./demo/speech/automatic_speech_recognition/wav2vec/vocab.json")
model = AutomaticSpeechRecognition(backbone="wav2vec")
model.load_weights("./demo/speech/automatic_speech_recognition/wav2vec/model.npz")
import soundfile as sf
file = "./demo/speech/automatic_speech_recognition/wav2vec/1272-128104-0000.flac"
speech, _ = sf.read(file)
input_values, input_ids = transform(speech, "")
mask = np.ones(input_values.shape[0], dtype=np.int32)
input_values = tlx.convert_to_tensor([input_values])
pixel_mask = tlx.convert_to_tensor([mask])
logits = model(inputs=input_values, pixel_mask=pixel_mask)
predicted_ids = tlx.argmax(logits, axis=-1)
transcription = transform.ids_to_string(predicted_ids[0])
print(transcription)
|
1656737
|
import io
import json
import os.path
from typing import Any, BinaryIO, Callable, Dict, List, Optional
import numpy as np
import pandas as pd
import zstandard
from databento.common.data import BIN_COLUMNS, BIN_RECORD_MAP, DERIV_SCHEMAS
from databento.common.enums import Compression, Encoding, Schema
class Bento:
"""The abstract base class for all Bento I/O classes."""
def __init__(
self,
schema: Optional[Schema],
encoding: Optional[Encoding],
compression: Optional[Compression],
):
# Set compression
self._compression = compression or self._infer_compression()
# Set encoding
self._encoding = encoding or self._infer_encoding()
# Set schema
self._schema = schema or self._infer_schema()
self._struct_fmt = np.dtype(BIN_RECORD_MAP[self._schema])
self._struct_size = self._struct_fmt.itemsize
@property
def schema(self) -> Schema:
"""
Return the output schema.
Returns
-------
Schema
"""
return self._schema
@property
def encoding(self) -> Encoding:
"""
Return the output encoding.
Returns
-------
Encoding
"""
return self._encoding
@property
def compression(self) -> Compression:
"""
Return the output compression.
Returns
-------
Compression
"""
return self._compression
@property
def struct_fmt(self) -> np.dtype:
"""
Return the binary struct format for the schema.
Returns
-------
np.dtype
"""
return self._struct_fmt
@property
def struct_size(self) -> int:
"""
Return the schemas binary struct size in bytes.
Returns
-------
int
"""
return self._struct_size
@property
def nbytes(self) -> int:
raise NotImplementedError() # pragma: no cover
@property
def raw(self) -> bytes:
"""
Return the raw data from the I/O stream.
Returns
-------
bytes
"""
raise NotImplementedError() # pragma: no cover
def reader(self, decompress: bool = False) -> BinaryIO:
"""
Return an I/O reader for the data.
Parameters
----------
decompress : bool
If data should be decompressed (if compressed).
Returns
-------
BinaryIO
"""
raise NotImplementedError() # pragma: no cover
def writer(self) -> BinaryIO:
"""
Return a raw I/O writer for the data.
Returns
-------
BinaryIO
"""
raise NotImplementedError() # pragma: no cover
def to_file(self, path: str) -> "FileBento":
"""
Write the data to a file at the given path.
Parameters
----------
path : str
The path to write to.
Returns
-------
FileBento
"""
with open(path, mode="wb") as f:
f.write(self.raw)
return FileBento(
path=path,
schema=self._schema,
encoding=self._encoding,
compression=self._compression,
)
def to_list(self) -> List[Any]:
"""
Return the data as a list records.
- BIN encoding will return a list of `np.void` mixed dtypes.
- CSV encoding will return a list of `str`.
- JSON encoding will return a list of `Dict[str, Any]`.
Returns
-------
List[Any]
"""
if self._encoding == Encoding.BIN:
return self._prepare_list_bin()
elif self._encoding == Encoding.CSV:
return self._prepare_list_csv()
elif self._encoding == Encoding.JSON:
return self._prepare_list_json()
else: # pragma: no cover (design-time error)
raise ValueError(f"invalid encoding, was {self._encoding.value}")
def to_df(self, pretty_ts: bool = False, pretty_px: bool = False) -> pd.DataFrame:
"""
Return the data as a pd.DataFrame.
Parameters
----------
pretty_ts : bool, default False
If the type of any timestamp columns should be converted from UNIX
nanosecond `int` to `pd.Timestamp` (UTC).
pretty_px : bool, default False
If the type of any price columns should be converted from `int` to
`float` at the correct scale (using the fixed precision scalar 1e-9).
Returns
-------
pd.DataFrame
"""
if self._encoding == Encoding.BIN:
df: pd.DataFrame = self._prepare_df_bin()
elif self._encoding == Encoding.CSV:
df = self._prepare_df_csv()
elif self._encoding == Encoding.JSON:
df = self._prepare_df_json()
else: # pragma: no cover (design-time error)
raise ValueError(f"invalid encoding, was {self._encoding.value}")
if pretty_ts:
df.index = pd.to_datetime(df.index, utc=True)
for column in list(df.columns):
if column.startswith("ts_"):
df[column] = pd.to_datetime(df[column], utc=True)
if pretty_px:
for column in list(df.columns):
if (
column in ("price", "open", "high", "low", "close")
or column.startswith("bid_px") # MBP
or column.startswith("ask_px") # MBP
):
df[column] = df[column] * 1e-9
return df
def replay(self, callback: Callable[[Any], None]) -> None:
"""
Pass all data records sequentially to the given callback.
Parameters
----------
callback : callable
The callback to the data handler.
"""
if self._encoding == Encoding.BIN:
self._replay_bin(callback)
elif self._encoding in (Encoding.CSV, Encoding.JSON):
self._replay_csv_or_json(callback)
else: # pragma: no cover (design-time error)
raise ValueError(f"invalid encoding, was {self._encoding.value}")
def _should_decompress(self, decompress: bool) -> bool:
if not decompress:
return False
return self._compression == Compression.ZSTD
def _infer_compression(self) -> Optional[Compression]:
# Infer by checking for zstd header
reader: BinaryIO = self.reader()
header: bytes = reader.read(4)
if header is None:
return None
elif header == b"(\xb5/\xfd":
return Compression.ZSTD
else:
return Compression.NONE
def _infer_encoding(self) -> Optional[Encoding]:
# Infer by checking pattern of initial bytes
reader: BinaryIO = self.reader(decompress=True)
initial: bytes = reader.read(3)
if initial is None:
return None
if initial == b"ts_":
return Encoding.CSV
elif initial == b'{"t':
return Encoding.JSON
else:
return Encoding.BIN
def _infer_schema(self) -> Optional[Schema]:
if hasattr(self, "path"):
path = self.path # type: ignore # (checked above)
else: # pragma: no cover (design-time error)
raise RuntimeError("cannot infer schema without a path to read from")
# Firstly, attempt to infer from file path
extensions: List[str] = path.split(".")
# Iterate schemas in reverse order as MBP-10 needs to be checked prior to MBP-1
for schema in reversed([x for x in Schema]):
if schema.value in extensions:
return schema
raise RuntimeError(
f"unable to infer schema from `path` '{path}' "
f"(add the schema value as an extension e.g. 'my_data.mbo', "
f"or specify the schema explicitly)",
)
def _get_index_column(self) -> str:
return (
"ts_recv"
if self._schema
not in (
Schema.OHLCV_1S,
Schema.OHLCV_1M,
Schema.OHLCV_1H,
Schema.OHLCV_1D,
)
else "ts_event"
)
def _prepare_list_bin(self) -> List[np.void]:
data: bytes = self.reader(decompress=True).read()
return np.frombuffer(data, dtype=BIN_RECORD_MAP[self._schema])
def _prepare_list_csv(self) -> List[str]:
data: bytes = self.reader(decompress=True).read()
return data.decode().splitlines(keepends=False)
def _prepare_list_json(self) -> List[Dict]:
lines: List[str] = self._prepare_list_csv()
return list(map(json.loads, lines))
def _prepare_df_bin(self) -> pd.DataFrame:
df = pd.DataFrame(self.to_list())
df.set_index(self._get_index_column(), inplace=True)
# Cleanup dataframe
if self._schema == Schema.MBO:
df.drop("chan_id", axis=1, inplace=True)
df = df.reindex(columns=BIN_COLUMNS[self._schema])
df["flags"] = df["flags"] & 0xFF # Apply bitmask
df["side"] = df["side"].str.decode("utf-8")
df["action"] = df["action"].str.decode("utf-8")
elif self._schema in DERIV_SCHEMAS:
df.drop(["nwords", "type", "depth"], axis=1, inplace=True)
df = df.reindex(columns=BIN_COLUMNS[self._schema])
df["flags"] = df["flags"] & 0xFF # Apply bitmask
df["side"] = df["side"].str.decode("utf-8")
df["action"] = df["action"].str.decode("utf-8")
else:
df.drop(["nwords", "type"], axis=1, inplace=True)
return df
def _prepare_df_csv(self) -> pd.DataFrame:
data: bytes = self.reader(decompress=True).read()
df = pd.read_csv(io.BytesIO(data), index_col=self._get_index_column())
return df
def _prepare_df_json(self) -> pd.DataFrame:
jsons: List[Dict] = self.to_list()
df = pd.DataFrame.from_dict(jsons, orient="columns")
df.set_index(self._get_index_column(), inplace=True)
return df
def _replay_bin(self, callback: Callable[[Any], None]) -> None:
dtype = BIN_RECORD_MAP[self._schema]
reader: BinaryIO = self.reader(decompress=True)
while True:
raw: bytes = reader.read(self.struct_size)
record = np.frombuffer(raw, dtype=dtype)
if record.size == 0:
break
callback(record[0])
def _replay_csv_or_json(self, callback: Callable[[Any], None]) -> None:
if self._compression == Compression.NONE:
reader: BinaryIO = self.reader(decompress=True)
while True:
record: bytes = reader.readline()
if not record:
break
callback(record.decode().rstrip("\n"))
else:
for record in self.to_list():
callback(record)
class MemoryBento(Bento):
"""
Provides a data container backed by in-memory buffer streaming I/O.
Parameters
----------
schema : Schema
The data record schema.
encoding : Encoding, optional
The data encoding. If ``None`` then will be inferred.
compression : Compression, optional
The data compression mode. If ``None`` then will be inferred.
initial_bytes : bytes, optional
The initial data for the memory buffer.
"""
def __init__(
self,
schema: Schema,
encoding: Optional[Encoding] = None,
compression: Optional[Compression] = None,
initial_bytes: Optional[bytes] = None,
):
self._raw = io.BytesIO(initial_bytes=initial_bytes)
super().__init__(
schema=schema,
encoding=encoding,
compression=compression,
)
@property
def nbytes(self) -> int:
"""
Return the amount of space in bytes that the data array would use in a
contiguous representation.
Returns
-------
int
"""
return self._raw.getbuffer().nbytes
@property
def raw(self) -> bytes:
return self._raw.getvalue()
def reader(self, decompress: bool = False) -> BinaryIO:
self._raw.seek(0) # Ensure reader at start of stream
if self._should_decompress(decompress):
return zstandard.ZstdDecompressor().stream_reader(self._raw.getbuffer())
else:
return self._raw
def writer(self) -> BinaryIO:
return self._raw
class FileBento(Bento):
"""
Provides a data container backed by file streaming I/O.
Parameters
----------
path : str
The path to the data file.
schema : Schema, optional
The data record schema. If ``None`` then will be inferred.
encoding : Encoding, optional
The data encoding. If ``None`` then will be inferred.
compression : Compression, optional
The data compression mode. If ``None`` then will be inferred.
"""
def __init__(
self,
path: str,
schema: Optional[Schema] = None,
encoding: Optional[Encoding] = None,
compression: Optional[Compression] = None,
):
self._path = path
super().__init__(
schema=schema,
encoding=encoding,
compression=compression,
)
@property
def path(self) -> str:
"""
Return the path to the backing data file.
Returns
-------
str
"""
return self._path
@property
def nbytes(self) -> int:
"""
Return the amount of space occupied by the data.
Returns
-------
int
"""
return os.path.getsize(self._path)
@property
def raw(self) -> bytes:
return self.reader().read()
def reader(self, decompress: bool = False) -> BinaryIO:
f = open(self._path, mode="rb")
if self._should_decompress(decompress):
return zstandard.ZstdDecompressor().stream_reader(f)
else:
return f
def writer(self) -> BinaryIO:
return open(self._path, mode="wb")
|
1656764
|
from .. import common # ensure that libdyndt is loaded
from .type import make_fixed_bytes, make_fixed_string, make_struct, \
make_tuple, make_fixed_dim, make_string, make_var_dim, \
make_fixed_dim_kind, type_for
from .type import *
# Some classes making dimension construction easier
from .dim_helpers import *
from . import dynd_ctypes as ctypes
from . import json
|
1656784
|
import datetime
import unittest
from zoomus import components, util
import responses
def suite():
"""Define all the tests of the module."""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GetDailyReportV1TestCase))
suite.addTest(unittest.makeSuite(GetDailyReportV2TestCase))
return suite
class GetDailyReportV1TestCase(unittest.TestCase):
def setUp(self):
self.component = components.report.ReportComponent(
base_uri="http://foo.com",
config={
"api_key": "KEY",
"api_secret": "SECRET",
"version": util.API_VERSION_1,
},
)
@responses.activate
def test_can_get_daily_report(self):
responses.add(
responses.POST,
"http://foo.com/report/getdailyreport?month=01&year=2020"
"&api_key=KEY&api_secret=SECRET",
)
self.component.get_daily_report(month="01", year="2020")
def test_requires_month(self):
with self.assertRaises(ValueError) as context:
self.component.get_daily_report()
self.assertEqual(context.exception.message, "'month' must be set")
def test_requires_year(self):
with self.assertRaises(ValueError) as context:
self.component.get_daily_report(
month=datetime.datetime.now().strftime("%m")
)
self.assertEqual(context.exception.message, "'year' must be set")
class GetDailyReportV2TestCase(unittest.TestCase):
def setUp(self):
self.component = components.report.ReportComponentV2(
base_uri="http://foo.com", config={"api_key": "KEY", "api_secret": "SECRET"}
)
@responses.activate
def test_can_get_daily_report(self):
responses.add(responses.GET, "http://foo.com/report/daily?month=01&year=2020")
self.component.get_daily_report(month="01", year="2020")
def test_requires_month(self):
with self.assertRaisesRegex(ValueError, "'month' must be set"):
self.component.get_daily_report()
def test_requires_year(self):
with self.assertRaisesRegex(ValueError, "'year' must be set"):
self.component.get_daily_report(
month=datetime.datetime.now().strftime("%Y")
)
if __name__ == "__main__":
unittest.main()
|
1656792
|
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import pickle
def poly(x, c2, c3):
return c2*x**2 + c3*x**3
df = pd.read_excel('curved_data.xlsx', sheet_name = 'FEA (3D, c3=0.25, F=-10)')
skip_lines = 0 # -1 of what you think it should be
plt.figure()
for i in range(1): #range(int(len(df.columns)/2)):
ii = 2*i
x = np.array(df.values[skip_lines:, ii])
y = np.array(df.values[skip_lines:, ii+1])
# Remove NaN
x = np.array(list(x[~np.isnan(list(x))]))
y = np.array(list(y[~np.isnan(list(y))]))
# Fit
popt, pcov = curve_fit(poly, x, y)
print(popt)
plt.plot(x, y, 'b', label = 'Raw %i' % i)
plt.plot(x, poly(x, *popt), 'r--', label = 'Fit %i' % i)
plt.show()
|
1656852
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from ..settings import Settings
settings = Settings()
SQLALCHEMY_DATABASE_URL = settings.SQLALCHEMY_DATABASE_URI
if "sqlite:///" in SQLALCHEMY_DATABASE_URL:
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
else:
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
|
1656858
|
class Solution:
"""
@param nums: a mountain sequence which increase firstly and then decrease
@return: then mountain top
"""
def mountainSequence(self, nums):
# write your code here
if not nums or len(nums) == 0:
return None
start, end = 0, len(nums) - 1
while start + 1 < end:
mid = start + (end - start) // 2
if nums[mid] > nums[mid - 1] and nums[mid] > nums[mid + 1]:
return nums[mid]
elif nums[mid] > nums[mid - 1] and nums[mid] < nums[mid + 1]:
start = mid
else:
end = mid
if nums[start] > nums[end]:
return nums[start]
return nums[end]
|
1656885
|
def default_builtins():
a = str()
b = bool()
c = int()
assert a == ""
assert b == False
assert c == 0
a = max(1, 2)
print(a)
b = min(1, 2)
print(b)
|
1656911
|
class SearchPage(object):
def __init__(self, page, per_page, pages=0, total=0):
self._page = page
self._per_page = per_page
self._pages = pages
self._total = total
@property
def page(self):
return self._page
@page.setter
def page(self, page):
self._page = page
@property
def per_page(self):
return self._per_page
@per_page.setter
def per_page(self, per_page):
self._per_page = per_page
@property
def pages(self):
return self._pages
@pages.setter
def pages(self, pages):
self._pages = pages
@property
def total(self):
return self._total
@total.setter
def total(self, total):
self._total = total
|
1656925
|
from brainslug.database import AsyncTinyDB
from brainslug.remote import Remote
#: Global application state
AGENT_INFO = AsyncTinyDB()
def get_resources(loop, store, spec):
resources = dict()
for name, query in spec.items():
found = store.search(query)
if found:
# TODO: Implement Many() to return a list
agent_info = found[0]
resources[name] = Remote.from_agent_info(loop, agent_info)
else:
return None
return resources
async def wait_for_resources(loop, store, spec):
resources = get_resources(loop, store, spec)
while resources is None:
await store.wait_for_insert()
resources = get_resources(loop, store, spec)
return resources
|
1656937
|
PRIMITIVE_TYPES = (int, str, float, bool)
SEQUENCE_TYPES = (tuple, set, list)
MAPPING_TYPES = dict
ComponentName = str
TargetName = str
|
1656959
|
import numpy as np
import os
from qtpy import QtGui, QtCore, QtWidgets
import sharppy.sharptab.params as params
import sharppy.sharptab.winds as winds
import sharppy.sharptab.interp as interp
import sharppy.databases.inset_data as inset_data
from sharppy.sharptab.constants import *
## routine written by <NAME> and <NAME>
## <EMAIL> and <EMAIL>
__all__ = ['backgroundENS', 'plotENS']
class backgroundENS(QtWidgets.QFrame):
'''
Draw the background frame and lines for the Theta-E plot frame
'''
def __init__(self):
super(backgroundENS, self).__init__()
self.initUI()
def initUI(self):
## window configuration settings,
## sich as padding, width, height, and
## min/max plot axes
self.setStyleSheet("QFrame {"
" background-color: rgb(0, 0, 0);"
" border-width: 1px;"
" border-style: solid;"
" border-color: #3399CC;}")
if self.physicalDpiX() > 75:
fsize = 10
else:
fsize = 11
fsize = np.floor(.055 * self.size().height())
self.fsize = fsize
self.plot_font = QtGui.QFont('Helvetica', fsize + 1)
self.box_font = QtGui.QFont('Helvetica', fsize)
self.plot_metrics = QtGui.QFontMetrics( self.plot_font )
self.box_metrics = QtGui.QFontMetrics(self.box_font)
self.plot_height = self.plot_metrics.xHeight() + 5
self.box_height = self.box_metrics.xHeight() + 5
self.lpad = 40; self.rpad = 40.
self.tpad = fsize * 2 + 5; self.bpad = fsize
self.wid = self.size().width() - self.rpad
self.hgt = self.size().height() - self.bpad
self.tlx = self.rpad; self.tly = self.tpad
self.brx = self.wid; self.bry = self.hgt
self.ymax = 3000.; self.ymin = 0.
self.xmax = 3000.; self.xmin = 0.
self.plotBitMap = QtGui.QPixmap(self.width()-2, self.height()-2)
self.plotBitMap.fill(self.bg_color)
self.plotBackground()
def resizeEvent(self, e):
'''
Handles the event the window is resized
'''
self.initUI()
def plotBackground(self):
'''
Handles painting the frame.
'''
## initialize a painter object and draw the frame
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
self.draw_frame(qp)
qp.end()
def setBlackPen(self, qp):
color = QtGui.QColor('#000000')
color.setAlphaF(.5)
pen = QtGui.QPen(color, 0, QtCore.Qt.SolidLine)
brush = QtGui.QBrush(QtCore.Qt.SolidPattern)
qp.setPen(pen)
qp.setBrush(brush)
return qp
def draw_frame(self, qp):
'''
Draw the background frame.
qp: QtGui.QPainter object
'''
## set a new pen to draw with
EF1_color = "#006600"
EF2_color = "#FFCC33"
EF3_color = "#FF0000"
EF4_color = "#FF00FF"
pen = QtGui.QPen(self.fg_color, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.setFont(self.plot_font)
rect1 = QtCore.QRectF(1.5, 2, self.brx, self.plot_height)
qp.drawText(rect1, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter,
'Ensemble Indices')
pen = QtGui.QPen(QtCore.Qt.blue, 1, QtCore.Qt.DashLine)
qp.setPen(pen)
ytick_fontsize = self.fsize-1
y_ticks_font = QtGui.QFont('Helvetica', ytick_fontsize)
qp.setFont(y_ticks_font)
efstp_inset_data = inset_data.condSTPData()
#texts = efstp_inset_data['ytexts']
spacing = self.bry / 10.
texts = ['0','1000','2000','3000']
y_ticks = [0,1000,2000,3000]#np.arange(self.tpad, self.bry+spacing, spacing)
for i in range(len(y_ticks)):
#print y_ticks[i]
pen = QtGui.QPen(QtGui.QColor("#0080FF"), 1, QtCore.Qt.DashLine)
qp.setPen(pen)
try:
qp.drawLine(self.tlx, self.y_to_ypix(int(texts[i])), self.brx, self.y_to_ypix(int(texts[i])))
except:
continue
color = QtGui.QColor('#000000')
pen = QtGui.QPen(color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
ypos = spacing*(i+1) - (spacing/4.)
ypos = self.y_to_ypix(int(texts[i])) - ytick_fontsize/2
xpos = self.tlx - 50/2.
rect = QtCore.QRect(xpos, ypos, 50, ytick_fontsize)
pen = QtGui.QPen(self.fg_color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, texts[i])
width = self.brx / 12
spacing = self.brx / 12
# Draw the x tick marks
qp.setFont(QtGui.QFont('Helvetica', self.fsize-1))
for i in range(np.asarray(texts).shape[0]):
pen = QtGui.QPen(QtGui.QColor("#0080FF"), 1, QtCore.Qt.DashLine)
qp.setPen(pen)
try:
qp.drawLine(self.x_to_xpix(int(texts[i])), self.tly, self.x_to_xpix(int(texts[i])),self.bry)
except:
continue
color = QtGui.QColor('#000000')
color.setAlpha(0)
pen = QtGui.QPen(color, 1, QtCore.Qt.SolidLine)
ypos = self.y_to_ypix(0)
xpos = self.x_to_xpix(float(texts[i])) - 50 / 2.
rect = QtCore.QRect(xpos, ypos, 50, ytick_fontsize)
# Change to a white pen to draw the text below the box and whisker plot
pen = QtGui.QPen(self.fg_color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawText(rect, QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter, texts[i])
def y_to_ypix(self, y):
scl1 = self.ymax - self.ymin
scl2 = self.ymin + y
#print scl1, scl2, self.bry, self.tpad, self.tly
return self.bry - (scl2 / scl1) * (self.bry - self.tpad)
def x_to_xpix(self, x):
scl1 = self.xmax - self.xmin
scl2 = self.xmax - x
return self.brx - (scl2 / scl1) * (self.brx - self.rpad)
class plotENS(backgroundENS):
'''
Plot the data on the frame. Inherits the background class that
plots the frame.
'''
def __init__(self):
self.bg_color = QtGui.QColor('#000000')
self.fg_color = QtGui.QColor('#ffffff')
self.use_left = False
super(plotENS, self).__init__()
self.prof = None
self.pc_idx = 0
self.prof_collections = []
def addProfileCollection(self, prof_coll):
# Add a profile collection to the scatter plot
self.prof_collections.append(prof_coll)
def rmProfileCollection(self, prof_coll):
# Remove a profile collection from the scatter plot
self.prof_collections.remove(prof_coll)
def setActiveCollection(self, pc_idx, **kwargs):
# Set the active profile collection that is being shown in SPCWindow.
self.pc_idx = pc_idx
prof = self.prof_collections[pc_idx].getHighlightedProf()
self.prof = prof
self.hght = prof.hght
self.clearData()
self.plotData()
self.update()
def setProf(self, prof):
# Set the current profile being viewed in the SPC window.
self.prof = prof
# Some code to show whether or not the left or right mover is being used.
#if self.use_left:
# self.stpc = prof.left_stp_cin
#else:
# self.stpc = prof.right_stp_cin
self.clearData()
self.plotBackground()
self.plotData()
self.update()
def setPreferences(self, update_gui=True, **prefs):
# Set the preferences for the inset.
self.bg_color = QtGui.QColor(prefs['bg_color'])
self.fg_color = QtGui.QColor(prefs['fg_color'])
if update_gui:
if self.use_left:
self.stpc = self.prof.left_stp_cin
else:
self.stpc = self.prof.right_stp_cin
self.clearData()
self.plotBackground()
self.plotData()
self.update()
def setDeviant(self, deviant):
# Set the variable to indicate whether or not the right or left mover is being used.
self.use_left = deviant == 'left'
if self.use_left:
self.stpc = self.prof.left_stp_cin
else:
self.stpc = self.prof.right_stp_cin
self.clearData()
self.plotBackground()
self.plotData()
self.update()
def resizeEvent(self, e):
'''
Handles when the window is resized
'''
super(plotENS, self).resizeEvent(e)
self.plotData()
def paintEvent(self, e):
super(plotENS, self).paintEvent(e)
qp = QtGui.QPainter()
qp.begin(self)
qp.drawPixmap(1, 1, self.plotBitMap)
qp.end()
def clearData(self):
'''
Handles the clearing of the pixmap
in the frame.
'''
self.plotBitMap = QtGui.QPixmap(self.width(), self.height())
self.plotBitMap.fill(self.bg_color)
def plotData(self):
'''
Handles drawing of data on the frame.
'''
if self.prof is None:
return
## this function handles painting the plot
## create a new painter obkect
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
qp.setRenderHint(qp.Antialiasing)
qp.setRenderHint(qp.TextAntialiasing)
cur_dt = self.prof_collections[self.pc_idx].getCurrentDate()
bc_idx = 0
for idx, prof_coll in enumerate(self.prof_collections):
# Draw all unhighlighed ensemble members
if prof_coll.getCurrentDate() == cur_dt:
proflist = list(prof_coll.getCurrentProfs().values())
if idx == self.pc_idx:
for prof in proflist:
self.draw_ensemble_point(qp, prof)
else:
for prof in proflist:
self.draw_ensemble_point(qp, prof)
#%bc_idx = (bc_idx + 1) % len(self.background_colors)
bc_idx = 0
for idx, prof_coll in enumerate(self.prof_collections):
# Draw all highlighted members that aren't the active one.
if idx != self.pc_idx and (prof_coll.getCurrentDate() == cur_dt or self.all_observed):
prof = prof_coll.getHighlightedProf()
self.draw_ensemble_point(qp, prof)
#bc_idx = (bc_idx + 1) % len(self.background_colors)
def draw_ensemble_point(self, qp, prof):
# Plot the profile index on the scatter plot
if 'pbl_h' not in dir(prof): # Make sure a PBL top has been found in the profile object
ppbl_top = params.pbl_top(prof)
setattr(prof, 'pbl_h', interp.to_agl(prof, interp.hght(prof, ppbl_top)))
if 'sfcpcl' not in dir(prof): # Make sure a surface parcel has been lifted in the profile object
setattr(prof, 'sfcpcl', params.parcelx(prof, flag=1 ))
#x = self.x_to_xpix()
#y = self.y_to_ypix()
color = QtCore.Qt.red
qp.setPen(QtGui.QPen(color))
qp.setBrush(QtGui.QBrush(color))
x = self.x_to_xpix(prof.pbl_h) - 50 / 2.
y = self.y_to_ypix(prof.sfcpcl.bplus) - (self.fsize-1) / 2
qp.drawEllipse(x, y, 3, 3)
return
class DrawTest(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(DrawTest, self).__init__(parent)
# x = np.asarray([1,2,3,4])
# y = np.asarray([2,2,3,4])
length = 10
x = np.random.rand(length) + np.random.randint(0, 10, length)
y = np.random.rand(length) + np.random.randint(0, 10, length)
x = np.asarray([0, 5, 10, 0], dtype=float)
y = np.asarray([0, 5, 10, 20], dtype=float)
self.frame = plotENS()
self.frame.addProfileCollection(prof_coll)
self.frame.setActiveCollection(0)
self.setCentralWidget(self.frame)
if __name__ == "__main__":
import sys
import sharppy.io.buf_decoder as buf_decoder
path = 'ftp://ftp.meteo.psu.edu/pub/bufkit/SREF/21/sref_oun.buf'
dec = buf_decoder.BufDecoder(path)
prof_coll = dec._parse()
app = QtGui.QApplication(sys.argv)
mySW = DrawTest()
mySW.show()
sys.exit(app.exec_())
|
1656992
|
import io
import os
import stat
import struct
from enum import IntEnum, unique
from pathlib import Path
from typing import Dict, Optional
from structlog import get_logger
from unblob.extractor import is_safe_path
from ...file_utils import Endian, InvalidInputFormat, read_until_past, round_up
from ...models import Extractor, File, HexString, StructHandler, ValidChunk
logger = get_logger()
STRING_ALIGNMENT = 16
MAX_LINUX_PATH_LENGTH = 0xFF
MAX_UINT32 = 0x100000000
WORLD_RW = 0o666
WORLD_RWX = 0o777
ROMFS_HEADER_SIZE = 512
ROMFS_SIGNATURE = b"-rom1fs-"
@unique
class FS_TYPE(IntEnum):
HARD_LINK = 0
DIRECTORY = 1
FILE = 2
SYMLINK = 3
BLOCK_DEV = 4
CHAR_DEV = 5
SOCKET = 6
FIFO = 7
def valid_checksum(content: bytes) -> bool:
"""Apply a RomFS checksum and returns whether it's valid or not."""
total = 0
# unalign content will lead to unpacking errors down the line
if len(content) % 4 != 0:
return False
for i in range(0, len(content), 4):
total = (
total + struct.unpack(">L", content[i : i + 4])[0] # noqa: E203
) % MAX_UINT32
return total == 0
def get_string(file: File) -> bytes:
"""Read a 16 bytes aligned, null terminated string."""
filename = b""
counter = 0
while b"\x00" not in filename and counter < MAX_LINUX_PATH_LENGTH:
filename += file.read(STRING_ALIGNMENT)
counter += STRING_ALIGNMENT
return filename.rstrip(b"\x00")
class FileHeader(object):
addr: int
next_filehdr: int
spec_info: int
type: FS_TYPE
executable: bool
size: int
checksum: int
filename: bytes
depth: int = -1
parent: Optional["FileHeader"] = None
start_offset: int
end_offset: int
file: File
def __init__(self, addr: int, file: File):
self.addr = addr
type_exec_next = struct.unpack(">L", file.read(4))[0]
self.next_filehdr = type_exec_next & ~0b1111
self.type = FS_TYPE(type_exec_next & 0b0111)
self.executable = type_exec_next & 0b1000
self.spec_info = struct.unpack(">I", file.read(4))[0]
self.size = struct.unpack(">I", file.read(4))[0]
self.checksum = struct.unpack(">I", file.read(4))[0]
self.filename = get_string(file)
self.start_offset = file.tell()
self.file = file
def valid_checksum(self) -> bool:
current_position = self.file.tell()
try:
self.file.seek(self.addr, io.SEEK_SET)
filename_len = len(self.filename)
header_size = 16 + round_up(filename_len, 16)
return valid_checksum(self.file.read(header_size))
finally:
self.file.seek(current_position, io.SEEK_SET)
@property
def content(self) -> bytes:
"""Returns the file content. Applicable to files and symlinks."""
try:
self.file.seek(self.start_offset, io.SEEK_SET)
return self.file.read(self.size)
finally:
self.file.seek(-self.size, io.SEEK_CUR)
@property
def mode(self) -> int:
"""Permission mode is assumed to be world readable if executable bit is set, and world executable otherwise.
Handle mode for both block device and character devices too.
"""
mode = WORLD_RWX if self.executable else WORLD_RW
mode |= stat.S_IFBLK if self.type == FS_TYPE.BLOCK_DEV else 0x0
mode |= stat.S_IFCHR if self.type == FS_TYPE.CHAR_DEV else 0x0
return mode
@property
def dev(self) -> int:
"""Returns raw device number if block device or character device, zero otherwise."""
if self.type in [FS_TYPE.BLOCK_DEV, FS_TYPE.CHAR_DEV]:
major = self.spec_info >> 16
minor = self.spec_info & 0xFFFF
return os.makedev(major, minor)
return 0
@property
def path(self) -> Path:
"""Returns the full path of this file, up to the RomFS root."""
current_node = self
current_path = Path()
while current_node is not None:
current_path = Path(current_node.filename.decode("utf-8")).joinpath(
current_path
)
current_node = current_node.parent
return current_path
def __repr__(self):
return (
f"FileHeader<next_filehdr:{self.next_filehdr}, type:{self.type},"
f" executable:{self.executable}, spec_info:{self.spec_info},"
f" size:{self.size}, checksum:{self.checksum}, filename:{self.filename}>"
)
class RomFSHeader(object):
signature: bytes
full_size: int
checksum: int
volume_name: bytes
eof: int
file: File
end_offset: int
inodes: Dict[int, "FileHeader"]
extract_root: Path
def __init__(
self,
file: File,
extract_root: Path,
):
self.file = file
self.file.seek(0, io.SEEK_END)
self.eof = self.file.tell()
self.file.seek(0, io.SEEK_SET)
if self.eof < ROMFS_HEADER_SIZE:
raise Exception("File too small to hold ROMFS")
self.signature = self.file.read(8)
self.full_size = struct.unpack(">I", self.file.read(4))[0]
self.checksum = struct.unpack(">I", self.file.read(4))[0]
self.volume_name = get_string(self.file)
self.header_end_offset = self.file.tell()
self.inodes = {}
self.extract_root = extract_root
def valid_checksum(self) -> bool:
current_position = self.file.tell()
try:
self.file.seek(0, io.SEEK_SET)
return valid_checksum(self.file.read(ROMFS_HEADER_SIZE))
finally:
self.file.seek(current_position, io.SEEK_SET)
def validate(self):
if self.signature != ROMFS_SIGNATURE:
raise Exception("Invalid RomFS signature")
if self.full_size > self.eof:
raise Exception("ROMFS size is greater than file size")
if not self.valid_checksum():
raise Exception("Invalid checksum")
def is_valid_addr(self, addr):
"""Validates that an inode address is valid. inodes addresses must be 16 bytes aligned and placed within the RomFS on file."""
if (self.header_end_offset <= addr <= self.eof) and (addr % 16 == 0):
return True
return False
def is_recursive(self, addr) -> bool:
return True if addr in self.inodes else False
def recursive_walk(self, addr: int, parent: Optional[FileHeader] = None):
while self.is_valid_addr(addr) is True:
addr = self.walk_dir(addr, parent)
def walk_dir(self, addr: int, parent: Optional[FileHeader] = None):
self.file.seek(addr, io.SEEK_SET)
file_header = FileHeader(addr, self.file)
file_header.parent = parent
if not file_header.valid_checksum():
raise Exception(f"Invalid file CRC at addr {addr:0x}.")
logger.debug("walking dir", addr=addr, file=file_header)
if file_header.filename not in [b".", b".."]:
if file_header.type == FS_TYPE.DIRECTORY and file_header.spec_info != 0x0:
if not self.is_recursive(addr):
self.inodes[addr] = file_header
self.recursive_walk(file_header.spec_info, file_header)
self.inodes[addr] = file_header
return file_header.next_filehdr
def create_symlink(self, extract_root: Path, output_path: Path, inode: FileHeader):
target = inode.content.decode("utf-8").lstrip("/")
if target.startswith(".."):
target_path = extract_root.joinpath(output_path.parent, target).resolve()
else:
target_path = extract_root.joinpath(target).resolve()
if not is_safe_path(extract_root, target_path):
logger.warn(
"Path traversal attempt through symlink.", target_path=target_path
)
return
# we create relative paths to make the output directory portable
output_path.symlink_to(os.path.relpath(target_path, start=output_path.parent))
def create_hardlink(self, extract_root: Path, link_path: Path, inode: FileHeader):
if inode.spec_info in self.inodes:
target = str(self.inodes[inode.spec_info].path).lstrip("/")
target_path = extract_root.joinpath(target).resolve()
# we don't need to check for potential traversal given that, if the inode
# is in self.inodes, it already got verified in create_inode.
try:
os.link(target_path, link_path)
except FileNotFoundError:
logger.warn(
"Hard link target does not exist, discarding.",
target_path=target_path,
link_path=link_path,
)
except PermissionError:
logger.warn(
"Not enough privileges to create hardlink to block/char device, discarding.",
target_path=target_path,
link_path=link_path,
)
else:
logger.warn("Invalid hard link target", inode_key=inode.spec_info)
def create_inode(self, extract_root: Path, inode: FileHeader):
output_path = extract_root.joinpath(inode.path).resolve()
if not is_safe_path(extract_root, inode.path):
logger.warn("Path traversal attempt, discarding.", output_path=output_path)
return
logger.info("dumping inode", inode=inode, output_path=str(output_path))
if inode.type == FS_TYPE.HARD_LINK:
self.create_hardlink(extract_root, output_path, inode)
elif inode.type == FS_TYPE.SYMLINK:
self.create_symlink(extract_root, output_path, inode)
elif inode.type == FS_TYPE.DIRECTORY:
output_path.mkdir(mode=inode.mode, exist_ok=True)
elif inode.type == FS_TYPE.FILE:
with output_path.open("wb") as f:
f.write(inode.content)
elif inode.type in [FS_TYPE.BLOCK_DEV, FS_TYPE.CHAR_DEV]:
os.mknod(inode.path, inode.mode, inode.dev)
elif inode.type == FS_TYPE.FIFO:
os.mkfifo(output_path, inode.mode)
def dump_fs(self):
# first we create files and directories
fd_inodes = {
k: v
for k, v in self.inodes.items()
if v.type in [FS_TYPE.FILE, FS_TYPE.DIRECTORY, FS_TYPE.FIFO, FS_TYPE.SOCKET]
}
for inode in sorted(fd_inodes.values(), key=lambda inode: inode.path):
self.create_inode(self.extract_root, inode)
if os.geteuid() != 0:
logger.warn(
"root privileges are required to create block and char devices, skipping."
)
else:
# then we create devices if we have enough privileges
dev_inodes = {
k: v
for k, v in self.inodes.items()
if v.type in [FS_TYPE.BLOCK_DEV, FS_TYPE.CHAR_DEV]
}
for inode in sorted(dev_inodes.values(), key=lambda inode: inode.path):
self.create_inode(self.extract_root, inode)
# then we create links
links_inodes = {
k: v
for k, v in self.inodes.items()
if v.type in [FS_TYPE.SYMLINK, FS_TYPE.HARD_LINK]
}
for inode in sorted(links_inodes.values(), key=lambda inode: inode.path):
self.create_inode(self.extract_root, inode)
def __str__(self):
return f"signature: {self.signature}\nfull_size: {self.full_size}\nchecksum: {self.checksum}\nvolume_name: {self.volume_name}"
class RomfsExtractor(Extractor):
def extract(self, inpath: Path, outdir: Path):
with File.from_path(inpath) as f:
header = RomFSHeader(f, outdir)
header.validate()
header.recursive_walk(header.header_end_offset, None)
header.dump_fs()
class RomFSFSHandler(StructHandler):
NAME = "romfs"
PATTERNS = [
# '-rom1fs-'
HexString("2D 72 6F 6D 31 66 73 2d")
]
C_DEFINITIONS = r"""
struct romfs_header {
char magic[8];
uint32 full_size;
uint32 checksum;
}
"""
HEADER_STRUCT = "romfs_header"
EXTRACTOR = RomfsExtractor()
def calculate_chunk(self, file: File, start_offset: int) -> Optional[ValidChunk]:
if not valid_checksum(file.read(512)):
raise InvalidInputFormat("Invalid RomFS checksum.")
file.seek(-512, io.SEEK_CUR)
# Every multi byte value must be in big endian order.
header = self.parse_header(file, Endian.BIG)
# The zero terminated name of the volume, padded to 16 byte boundary.
get_string(file)
# seek filesystem size (number of accessible bytes in this fs)
# from the actual end of the header
file.seek(header.full_size, io.SEEK_CUR)
# Another thing to note is that romfs works on file headers and data
# aligned to 16 byte boundaries, but most hardware devices and the block
# device drivers are unable to cope with smaller than block-sized data.
# To overcome this limitation, the whole size of the file system must be
# padded to an 1024 byte boundary.
read_until_past(file, b"\x00")
return ValidChunk(
start_offset=start_offset,
end_offset=file.tell(),
)
|
1657001
|
import unittest
import unidecode
import random
from core.lists.listutils import ListUtils
PEOPLE = [
{ "name": "Adam", "age": 60 },
{ "name": "Adam", "age": 40 },
{ "name": "Łukasz", "age": 40 },
{ "name": "Lucia", "age": 30 },
{ "name": "Lucia", "age": 48 },
{ "name": "Luca", "age": 35 },
{ "name": "Luca", "age": 20 },
{ "name": "Lucetta", "age": 21 },
{ "name": "Lucio", "age": 40 },
{ "name": "Monica", "age": 10 },
{ "name": "Monica", "age": 14 },
{ "name": "Roberto", "age": 20 },
{ "name": "Roberto", "age": 31 },
{ "name": "Stanisław", "age": 30 },
{ "name": "Bogumił", "age": 29 }
]
"""
The simplest way is to take advantage of sort-stability and do
successive sorts. For example, to sort by a primary key ascending and
a secondary key decending:
L.sort(key=lambda r: r.secondary, reverse=True)
L.sort(key=lambda r: r.primary)
A less general technique is to transform fields in a way that reverses
their comparison order:
L.sort(key=lambda r: (-r.age, r.height)) # sorts descending age
and ascending height
"""
def sort_by_v1(a, criteria):
def fn(o):
props = []
for k in criteria:
prop, order = k
v = o.get(prop)
if isinstance(v, str):
# normalize
v = unidecode.unidecode(v)
if order == "desc":
# revert
v = [[-ord(c) for c in v]]
elif order == "desc":
v = -v
props.append(v)
return tuple(props)
a.sort(key=fn)
def sort_by_v2(a, criteria):
"""
Sorts an array of items by multiple properties;
:param a:
:param criteria:
:return:
"""
# take advantage of sort-stability and do successive sorts
# assume that properties are in order of importance, sorting must be from less important
# to most important property:
criteria.reverse()
for k in criteria:
prop, order = k
def fn(o):
v = o.get(prop)
if isinstance(v, str):
# normalize
v = unidecode.unidecode(v)
return v
a.sort(key=fn, reverse="desc" in order)
return a
class ArrayUtilsTestCase(unittest.TestCase):
"""
Tests for Array utilities.
"""
def test_parse_sort_by(self):
a = ListUtils.parse_sort_by("name")
self.assertEqual(a, [["name", 1]])
a = ListUtils.parse_sort_by("name desc")
self.assertEqual(a, [["name", -1]])
a = ListUtils.parse_sort_by("name, age desc")
self.assertEqual(a, [["name", 1], ["age", -1]])
a = ListUtils.parse_sort_by("name desc, age desc")
self.assertEqual(a, [["name", -1], ["age", -1]])
def test_sort_by_criteria(self):
random.shuffle(PEOPLE)
sort_by_v2(PEOPLE, [["name", "desc"], ["age", "asc"]])
self.assertEqual("Stanisław", PEOPLE[0].get("name"))
self.assertEqual(30, PEOPLE[0].get("age"))
self.assertEqual("Roberto", PEOPLE[1].get("name"))
self.assertEqual(20, PEOPLE[1].get("age"))
self.assertEqual("Roberto", PEOPLE[2].get("name"))
self.assertEqual(31, PEOPLE[2].get("age"))
self.assertEqual("Monica", PEOPLE[3].get("name"))
self.assertEqual(10, PEOPLE[3].get("age"))
self.assertEqual("Monica", PEOPLE[4].get("name"))
self.assertEqual(14, PEOPLE[4].get("age"))
self.assertEqual("Łukasz", PEOPLE[5].get("name"))
self.assertEqual(40, PEOPLE[5].get("age"))
self.assertEqual("Lucio", PEOPLE[6].get("name"))
self.assertEqual(40, PEOPLE[6].get("age"))
self.assertEqual("Lucia", PEOPLE[7].get("name"))
self.assertEqual(30, PEOPLE[7].get("age"))
self.assertEqual("Lucia", PEOPLE[8].get("name"))
self.assertEqual(48, PEOPLE[8].get("age"))
self.assertEqual("Lucetta", PEOPLE[9].get("name"))
self.assertEqual(21, PEOPLE[9].get("age"))
self.assertEqual("Luca", PEOPLE[10].get("name"))
self.assertEqual(20, PEOPLE[10].get("age"))
self.assertEqual("Luca", PEOPLE[11].get("name"))
self.assertEqual(35, PEOPLE[11].get("age"))
self.assertEqual("Bogumił", PEOPLE[12].get("name"))
self.assertEqual(29, PEOPLE[12].get("age"))
self.assertEqual("Adam", PEOPLE[13].get("name"))
self.assertEqual(40, PEOPLE[13].get("age"))
self.assertEqual("Adam", PEOPLE[14].get("name"))
self.assertEqual(60, PEOPLE[14].get("age"))
|
1657010
|
from __future__ import annotations
from typing import (
TypeVar,
Mapping,
Dict,
Sequence,
Iterator,
overload,
Any,
Iterable,
AbstractSet,
)
TYPE = TypeVar("TYPE")
KEY_TYPE = TypeVar("KEY_TYPE")
VALUE_TYPE = TypeVar("VALUE_TYPE")
class FList(Sequence[TYPE]):
def __init__(self, iterable: Iterable[TYPE]):
self._tuple = tuple(_frozen(value) for value in iterable)
self._hash = hash((type(self), self._tuple))
def __repr__(self) -> str:
return f"FList: {self}"
def __str__(self) -> str:
return str(self._tuple)
def __len__(self) -> int:
return len(self._tuple)
def __add__(self, other: Sequence[TYPE]) -> FList[TYPE]:
return FList(self._tuple + tuple(other))
def __radd__(self, other: Sequence[TYPE]) -> FList[TYPE]:
return FList(tuple(other) + self._tuple)
def __eq__(self, other) -> bool:
if not isinstance(other, FList):
return False
return self._tuple == other._tuple
@overload
def __getitem__(self, idx: int) -> TYPE:
...
@overload
def __getitem__(self, s: slice) -> FList[TYPE]:
...
def __getitem__(self, item):
if isinstance(item, slice):
return FList(self._tuple[item])
return self._tuple[item]
def __copy__(self) -> FList[TYPE]:
return self
def __deepcopy__(self, memodict={}) -> FList[TYPE]:
return self
def __hash__(self) -> int:
return self._hash
class FSet(AbstractSet[TYPE]):
def __init__(self, iterable: Iterable[TYPE]):
self._frozenset = frozenset(_frozen(value) for value in iterable)
self._hash = hash((type(self), self._frozenset))
def __repr__(self) -> str:
return f"FSet: {self}"
def __str__(self) -> str:
return str(set(self._frozenset))
def __contains__(self, x: object) -> bool:
return x in self._frozenset
def __len__(self) -> int:
return len(self._frozenset)
def __iter__(self) -> Iterator[TYPE]:
return iter(self._frozenset)
def __copy__(self) -> FSet[TYPE]:
return self
def __deepcopy__(self, memodict={}) -> FSet[TYPE]:
return self
def __hash__(self) -> int:
return self._hash
class FDict(Mapping[KEY_TYPE, VALUE_TYPE]):
def __init__(self, mapping: Mapping[KEY_TYPE, VALUE_TYPE]):
self._dict: Dict[KEY_TYPE, VALUE_TYPE] = {
key: _frozen(value) for key, value in mapping.items()
}
self._hash = hash((type(self), tuple(self._dict.items())))
def __repr__(self) -> str:
return f"FDict: {self}"
def __str__(self) -> str:
return f"{str(self._dict)}"
def __getitem__(self, k: KEY_TYPE) -> VALUE_TYPE:
return self._dict.__getitem__(k)
def __len__(self) -> int:
return self._dict.__len__()
def __iter__(self) -> Iterator[KEY_TYPE]:
return iter(self._dict)
def __eq__(self, other):
if not isinstance(other, FDict):
return False
return self._dict == other._dict
def __copy__(self) -> FDict[KEY_TYPE, VALUE_TYPE]:
return self
def __deepcopy__(self, memodict={}) -> FDict[KEY_TYPE, VALUE_TYPE]:
return self
def __hash__(self):
return self._hash
@overload
def _frozen(obj: Sequence[TYPE]) -> FList[TYPE]:
...
@overload
def _frozen(obj: AbstractSet[TYPE]) -> FSet[TYPE]:
...
@overload
def _frozen(obj: Mapping[KEY_TYPE, VALUE_TYPE]) -> FDict[KEY_TYPE, VALUE_TYPE]:
...
@overload
def _frozen(obj: Any) -> Any:
...
def _frozen(obj):
if isinstance(obj, (FList, FSet, FDict, str)):
return obj
if isinstance(obj, AbstractSet):
return FSet(obj)
if isinstance(obj, Sequence):
return FList(obj)
if isinstance(obj, Mapping):
return FDict(obj)
if _is_immutable(obj):
return obj
raise ValueError(f"Can't freeze object of type: {type(obj)}")
def _is_immutable(obj) -> bool:
try:
_ = hash(obj)
return True
except Exception:
return False
|
1657015
|
from logger import log_info
from Classes.Metadata import Metadata
from Classes.PortablePacket import PortablePacket
from timeit import default_timer as timer
from extension import write, write_debug
from colorama import Fore
from zip_utils import *
import os
import sys
home = os.path.expanduser('~')
def install_portable(packet: PortablePacket, metadata: Metadata):
if find_existing_installation(f'{packet.extract_dir}@{packet.latest_version}'):
log_info(
f'Detected an existing installation of {packet.display_name}', metadata.logfile)
write(
f'Found Existing Installation Of {packet.display_name}', 'bright_yellow', metadata)
continue_installation = confirm(
f'Would you like to reinstall {packet.display_name}?')
if not continue_installation:
sys.exit()
if packet.dependencies:
log_info(
f'Installing dependencies for {packet.display_name}', metadata.logfile)
install_dependencies(packet, metadata)
changes_environment = False
shortcuts = packet.shortcuts
extract_dir = packet.extract_dir
write_debug(
f'Downloading {packet.json_name}{packet.file_type} from {packet.url}', metadata)
log_info(
f'Downloading {packet.json_name}{packet.file_type} from {packet.url}', metadata.logfile)
show_progress_bar = not metadata.silent and not metadata.no_progress
if isinstance(packet.url, str):
download(packet, packet.url, packet.file_type, rf'{home}\electric\\' + f'{packet.extract_dir}@{packet.latest_version}',
metadata, show_progress_bar=show_progress_bar, is_zip=True)
if packet.checksum:
verify_checksum(
rf'{home}\electric\\' + f'{packet.extract_dir}@{packet.latest_version}{packet.file_type}', packet.checksum, metadata)
unzip_dir = unzip_file(f'{packet.extract_dir}@{packet.latest_version}' +
packet.file_type, f'{extract_dir}@{packet.latest_version}', packet.file_type, metadata)
elif isinstance(packet.url, list):
for idx, url in enumerate(packet.url):
if idx == 0:
download(packet, url['url'], '.zip', rf'{home}\electric\\' + f'{packet.extract_dir}@{packet.latest_version}',
metadata, show_progress_bar=show_progress_bar, is_zip=True)
unzip_dir = unzip_file(
f'{packet.extract_dir}@{packet.latest_version}' + '.zip', extract_dir, url['file-type'], metadata)
else:
write(
f'Downloading {url["file-name"]}{url["file-type"]}', 'cyan', metadata)
download(packet, url['url'], url['file-type'],
rf'{home}\electric\extras\{packet.extract_dir}@{packet.latest_version}\\{url["file-name"]}', metadata, show_progress_bar=False, is_zip=False)
if packet.pre_install:
log_info('Executing pre install code', metadata.logfile)
if packet.pre_install['type'] == 'powershell':
packet.pre_install['code'] = [l.replace('<dir>', unzip_dir.replace(
'\\\\', '\\')) for l in packet.pre_install['code']]
packet.pre_install['code'] = [l.replace('<extras>', rf'{home}\electric\extras\{packet.extract_dir}@{packet.latest_version}'.replace(
'\\\\', '\\')) for l in packet.pre_install['code']]
if not os.path.isdir(rf'{home}\electric\temp\Scripts'):
try:
os.mkdir(rf'{home}\electric\temp')
except:
# temp directory already exists
pass
os.mkdir(rf'{home}\electric\temp\Scripts')
with open(rf'{home}\electric\temp\Scripts\temp.ps1', 'w+') as f:
for line in packet.pre_install['code']:
f.write(f'\n{line}')
os.system(
rf'powershell -executionpolicy bypass -File {home}\electric\temp\Scripts\temp.ps1')
write('Successfully Executed Pre-Install Code',
'bright_green', metadata)
if packet.pre_install['type'] in ['bat', 'cmd']:
packet.pre_install['code'] = [l.replace('<dir>', unzip_dir.replace(
'\\\\', '\\')) for l in packet.pre_install['code']]
packet.pre_install['code'] = [l.replace('<extras>', rf'{home}\electric\extras\{packet.extract_dir}@{packet.latest_version}'.replace(
'\\\\', '\\')) for l in packet.pre_install['code']]
if not os.path.isdir(rf'{home}\electric\temp\Scripts'):
try:
os.mkdir(rf'{home}\electric\temp')
except:
# temp directory already exists
pass
os.mkdir(rf'{home}\electric\temp\Scripts')
with open(rf'{home}\electric\temp\Scripts\temp.bat', 'w+') as f:
for line in packet.pre_install['code']:
f.write(f'\n{line}')
os.system(
rf'{home}\electric\temp\Scripts\temp.bat')
write('Successfully Executed Pre-Install Code',
'bright_green', metadata)
if packet.pre_install['type'] == 'python':
code = ''''''.join(l + '\n' for l in packet.pre_install['code'])
exec(code)
if packet.chdir:
dir = packet.chdir.replace('<version>', packet.latest_version)
unzip_dir += f'\\{dir}\\'
if packet.bin and isinstance(packet.bin, list):
for binary in packet.bin:
if isinstance(binary, str):
shim_dir = unzip_dir
shim = ''.join(binary.split('.')[:-1])
shim_ext = binary.split('.')[-1]
if '\\' in binary:
shim = ''.join(binary.split('\\')[-1])
shim = ''.join(shim.split('.')[:-1])
shim_ext = binary.split('.')[-1]
shim_dir += ' '.join(binary.split('\\')
[:-1]).replace(' ', '\\')
shim = shim.replace('<version>', packet.latest_version)
shim_dir = shim_dir.replace('<version>', packet.latest_version)
start = timer()
generate_shim(f'{shim_dir}', shim, shim_ext)
end = timer()
write(
f'{Fore.LIGHTCYAN_EX}Successfully Generated {shim} Shim In {round(end - start, 5)} seconds{Fore.RESET}', 'white', metadata)
else:
val = binary['file-name']
shim_dir = unzip_dir
shim = ''.join(val.split('.')[:-1])
shim_ext = val.split('.')[-1]
if '\\' in val:
shim = ''.join(val.split('\\')[-1])
shim = ''.join(shim.split('.')[:-1])
shim_ext = val.split('.')[-1]
shim_dir += ' '.join(val.split('\\')
[:-1]).replace(' ', '\\')
shim = shim.replace('<version>', packet.latest_version)
shim_dir = shim_dir.replace('<version>', packet.latest_version)
val = val.replace('<version>', packet.latest_version)
start = timer()
generate_shim(f'{shim_dir}', val.split(
'\\')[-1].split('.')[0], shim_ext, overridefilename=binary['shim-name'])
end = timer()
write(
f'{Fore.LIGHTCYAN_EX}Successfully Generated {binary["shim-name"]} Shim In {round(end - start, 5)} seconds{Fore.RESET}', 'white', metadata)
if shortcuts:
for shortcut in shortcuts:
shortcut_name = shortcut['shortcut-name']
file_name = shortcut['file-name']
log_info(
f'Creating shortcuts for {packet.display_name}', metadata.logfile)
create_start_menu_shortcut(unzip_dir, file_name, shortcut_name)
if packet.set_env:
if isinstance(packet.set_env, list):
changes_environment = True
for obj in packet.set_env:
log_info(
f'Setting environment variables for {packet.display_name}', metadata.logfile)
write(
f'Setting Environment Variable {obj["name"]}', 'bright_green', metadata)
set_environment_variable(obj['name'], obj['value'].replace(
'<install-directory>', unzip_dir).replace('\\\\', '\\'))
else:
changes_environment = True
log_info(
f'Setting environment variables for {packet.display_name}', metadata.logfile)
write(
f'Setting Environment Variable {packet.set_env["name"]}', 'bright_green', metadata)
set_environment_variable(packet.set_env['name'], packet.set_env['value'].replace(
'<install-directory>', unzip_dir).replace('\\\\', '\\'))
if changes_environment:
log_info(
'Detected change in PATH variable. Requesting `refreshenv` to be run', metadata.logfile)
write(
f'{Fore.LIGHTGREEN_EX}The PATH environment variable has changed. Run `refreshenv` to refresh your environment variables.{Fore.RESET}', 'white', metadata)
if packet.post_install:
log_info('Executing post installation code', metadata.logfile)
for line in packet.post_install:
exec(line.replace('<install-directory>', unzip_dir).replace('<extras>',
rf'{home}\electric\extras\{packet.extract_dir}@{packet.latest_version}'))
if packet.install_notes:
log_info('Found Installation Notes, Writing To Console.',
metadata.logfile)
display_notes(packet, unzip_dir, metadata)
write(
f'Successfully Installed {packet.display_name}', 'bright_magenta', metadata)
|
1657020
|
import math
def std(subject_marks, avg, n):
ans = 0
for i in subject_marks:
ans += (avg - i) ** 2
return math.sqrt(ans / (n - 1))
def cof(u, v, avg_u, avg_v, std_u, std_v, n):
ans = 0
for i in range(n):
ans += u[i] * v[i]
ans -= (n * avg_u * avg_v)
return ans / ((n - 1) * std_u * std_v)
n = int(input())
m = []
p = []
c = []
sum_m = 0
sum_c = 0
sum_p = 0
for _ in range(n):
m_, p_, c_ = map(int, input().split('\t'))
m.append(m_)
p.append(p_)
c.append(c_)
avg_m = sum(m) / n
avg_c = sum(c) / n
avg_p = sum(p) / n
std_m = std(m, avg_m, n)
std_p = std(p, avg_p, n)
std_c = std(c, avg_c, n)
print(round(cof(m, p, avg_m, avg_p, std_m, std_p, n), 2))
print(round(cof(c, p, avg_c, avg_p, std_c, std_p, n), 2))
print(round(cof(m, c, avg_m, avg_c, std_m, std_c, n), 2))
|
1657022
|
from __future__ import print_function
import pyxb
import po1
xml = open('badcontent.xml').read()
try:
order = po1.CreateFromDocument(xml, location_base='badcontent.xml')
except pyxb.ValidationError as e:
print(e.details())
|
1657078
|
from flask import g, abort
from flask_restplus import Resource
from pyinfraboxutils.ibflask import auth_required, OK
from pyinfraboxutils.ibrestplus import api
ns = api.namespace('api/v1/user',
description="Users related operations")
@ns.route('/')
class User(Resource):
@auth_required(['user'], check_project_access=False)
def get(self):
user = g.db.execute_one_dict('''
SELECT github_id, username, avatar_url, name, email, id
FROM "user"
WHERE id = %s
''', [g.token['user']['id']])
if not user:
abort(404)
return user
|
1657080
|
class Solution:
def findMaxLength(self, nums: List[int]) -> int:
dic = {0: -1}
ps = 0
max_length = 0
for idx, number in enumerate(nums):
if number:
ps += 1
else:
ps -= 1
if ps in dic:
max_length = max(max_length, idx - dic[ps])
else:
dic[ps] = idx
return max_length
|
1657087
|
import time
from __init__ import print_msg_box
def counting_sort(array, unit):
n = len(array)
output = [0]*n
count = [0]*10
for i in array:
pos = (i/unit)
count[int(pos%10)] +=1
for i in range(1, 10):
count[i] += count[i-1]
for i in range(n-1, -1, -1):
pos = (array[i]/unit)
output[count[int(pos%10)]-1] = array[i]
count[int(pos%10)]-=1
for i in range(n):
array[i] = output[i]
def radix_sort(array, hint = False):
start = time.time()
maxval = max(array)
unit = 1
while maxval//unit > 0:
counting_sort(array, unit)
unit*=10
end = time.time()
if (hint is True):
radix_sort_hint()
print("Radix Sort Runtime = {}".format(end - start))
return array
def radix_sort_hint():
message = """
Radix Sort
------------------------------------
Purpose: Sorting a given array
Method: Distributing, Non Comparing
Time Complexity: Sorts in O(n+k) time when elements are in the range from 1 to k.
Hint:
From the given array, we sort the elements based on the i'th digit by performing counting sort on it until the unit value exceeds the maximum value in the array.
Radix sort uses counting sort as a sub routine to sort elements.
Pseudocode:
Counting-Sort(A, n, unit)
for j = 1 to d do
int count[10] = {0};
for i = 0 to n do
count[key of ((A[i]/unit)%10) in pass j]++
for k = 1 to 10 do
count[k] = count[k] + count[k-1]
for i = n-1 downto 0 do
result[count[key of ((A[i]/unit)%10)] ] = A[j]
count[key of(((A[i]/unit)%10)]--
for i= n-1 to 0 do
A[i] = result[i]
end for(j)
end func
Radix-Sort(A)
unit = 1
while unit < max(A)
Counting-Sort(A, unit)
unit*=10
Visualization:
Given Array :
+-----+----+----+----+-----+----+---+----+
| 170 | 45 | 75 | 90 | 802 | 24 | 2 | 66 |
+-----+----+----+----+-----+----+---+----+
First Iteration (unit = 1):
+---+---+---+---+---+---+---+---+
| 0 | 5 | 5 | 0 | 2 | 4 | 2 | 6 |
+---+---+---+---+---+---+---+---+
Count Array
+---+---+---+---+---+---+---+---+---+---+
Count | 2 | 0 | 2 | 0 | 1 | 2 | 1 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+---+---+---+
Index | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
Cumilative Count Array
+---+---+---+---+---+---+---+---+---+---+
| 2 | 2 | 4 | 4 | 5 | 7 | 8 | 8 | 8 | 8 |
+---+---+---+---+---+---+---+---+---+---+
From the first iteration array, we take each value as the index of the cumilative count array and that element provides the position in the result array.
Once it is placed, the value in the cumilative count array, reduces by one.
Example - First Iteration Array -> Value : 66
Pos = (66/1)
Result[Count[int(Pos%10)]-1] = Result[Count[6]-1] = Result[7] = 66
Result Array
+---+---+---+---+---+---+---+---+---+----+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 66 |
+---+---+---+---+---+---+---+---+---+----+
Updated Cumilative Array
+---+---+---+---+---+---+---+---+---+---+
| 2 | 2 | 4 | 4 | 5 | 7 | 8 | 8 | 8 | 7 |
+---+---+---+---+---+---+---+---+---+---+
Final Result Array after First Iteration
+-----+----+-----+---+----+----+----+----+
| 170 | 90 | 802 | 2 | 24 | 45 | 75 | 66 |
+-----+----+-----+---+----+----+----+----+
Second Iteration (unit = 10):
+---+---+---+---+---+---+---+---+
| 7 | 9 | 0 | 0 | 2 | 4 | 7 | 6 |
+---+---+---+---+---+---+---+---+
Count Array
+---+---+---+---+---+---+---+---+---+---+
Count | 2 | 0 | 1 | 0 | 1 | 0 | 1 | 2 | 0 | 1 |
+---+---+---+---+---+---+---+---+---+---+
Index | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
Cumilative Count Array
+---+---+---+---+---+---+---+---+---+---+
| 2 | 2 | 3 | 3 | 4 | 4 | 5 | 7 | 7 | 8 |
+---+---+---+---+---+---+---+---+---+---+
Final Result Array after Second Iteration
+-----+---+----+----+----+-----+----+----+
| 802 | 2 | 24 | 45 | 66 | 170 | 75 | 90 |
+-----+---+----+----+----+-----+----+----+
Third Iteration (unit = 100):
+---+---+---+---+---+---+---+---+
| 8 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
+---+---+---+---+---+---+---+---+
Count Array
+---+---+---+---+---+---+---+---+---+---+
Count | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
+---+---+---+---+---+---+---+---+---+---+
Index | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
Cumilative Count Array
+---+---+---+---+---+---+---+---+---+---+
| 6 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 8 | 8 |
+---+---+---+---+---+---+---+---+---+---+
Final Result Array after Third (And Final) Iteration
+---+----+----+----+----+----+-----+-----+
| 2 | 24 | 45 | 66 | 75 | 90 | 170 | 802 |
+---+----+----+----+----+----+-----+-----+
Learn More Here - https://en.wikipedia.org/wiki/Radix_sort
"""
print_msg_box(message)
|
1657116
|
from setuptools import setup
setup(
name="mkdocs-autodoc",
version="0.1.2",
url="https://github.com/restaction/mkdocs-autodoc",
license="MIT",
description="Auto generate API document in MKDocs",
author="guyskk",
author_email="<EMAIL>",
keywords=["mkdocs"],
packages=["mkdocs_autodoc"],
py_modules=["magicpatch"],
package_data={
"mkdocs_autodoc": ["autodoc.jinja2"]
},
include_package_data=True,
entry_points={
"mkdocs.themes": [
"autodoc = mkdocs_autodoc",
]
},
zip_safe=False
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.