id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1691476
|
from __future__ import division, with_statement
from ...._common import block_to_format, get_label_length
from ._helpers import prune_nones_dict, prune_nones_list, read_record
__all__ = [
"read",
]
def read(filename, label_length=None):
"""
Read TOUGH input file.
Parameters
----------
filename : str
Input file name.
label_length : int or None, optional, default None
Number of characters in cell labels.
Returns
-------
dict
TOUGH input parameters.
"""
if not (label_length is None or isinstance(label_length, int)):
raise TypeError()
if isinstance(label_length, int) and not 5 <= label_length < 10:
raise ValueError()
with open(filename, "r") as f:
out = read_buffer(f, label_length)
return out
def read_buffer(f, label_length):
"""Read TOUGH input file."""
parameters = {}
# Title
line = f.readline().strip()
if line[:5] not in {"ROCKS", "ELEME", "INCON", "GENER"}:
title = [line]
while True:
line = f.readline().strip()
if not line.startswith("ROCKS"):
title.append(line)
else:
break
parameters["title"] = title[0] if len(title) == 1 else title
f.seek(0)
# Loop over blocks
# Some blocks (INCON, INDOM, PARAM) need to rewind to previous line but tell and seek are disabled by next
# See <https://stackoverflow.com/questions/22688505/is-there-a-way-to-go-back-when-reading-a-file-using-seek-and-calls-to-next>
fiter = iter(f.readline, "")
for line in fiter:
if line.startswith("ROCKS"):
parameters.update(_read_rocks(fiter))
elif line.startswith("RPCAP"):
rpcap = _read_rpcap(fiter)
if "default" in parameters.keys():
parameters["default"].update(rpcap)
else:
parameters["default"] = rpcap
elif line.startswith("FLAC"):
flac = _read_flac(fiter, parameters["rocks_order"])
parameters["flac"] = flac["flac"]
for k, v in flac["rocks"].items():
parameters["rocks"][k].update(v)
elif line.startswith("MULTI"):
parameters.update(_read_multi(fiter))
elif line.startswith("SOLVR"):
parameters.update(_read_solvr(fiter))
elif line.startswith("START"):
parameters["start"] = True
elif line.startswith("PARAM"):
param = _read_param(fiter, f)
parameters["options"] = param["options"]
parameters["extra_options"] = param["extra_options"]
if "default" in parameters.keys():
parameters["default"].update(param["default"])
else:
parameters["default"] = param["default"]
elif line.startswith("SELEC"):
parameters.update(_read_selec(fiter))
elif line.startswith("INDOM"):
indom = _read_indom(fiter, f)
for k, v in indom["rocks"].items():
parameters["rocks"][k].update(v)
elif line.startswith("MOMOP"):
parameters.update(_read_momop(fiter))
elif line.startswith("TIMES"):
parameters.update(_read_times(fiter))
elif line.startswith("FOFT"):
parameters.update(_read_oft(fiter, "element_history"))
elif line.startswith("COFT"):
parameters.update(_read_oft(fiter, "connection_history"))
elif line.startswith("GOFT"):
parameters.update(_read_oft(fiter, "generator_history"))
elif line.startswith("GENER"):
parameters.update(_read_gener(fiter, label_length))
elif line.startswith("DIFFU"):
parameters.update(_read_diffu(fiter, f))
elif line.startswith("OUTPU"):
parameters.update(_read_outpu(fiter))
elif line.startswith("ELEME"):
parameters.update(_read_eleme(fiter, label_length))
parameters["coordinates"] = False
elif line.startswith("COORD"):
coord = _read_coord(fiter)
for k, v in zip(parameters["elements_order"], coord):
parameters["elements"][k]["center"] = v
parameters["coordinates"] = True
elif line.startswith("CONNE"):
parameters.update(_read_conne(fiter, label_length))
elif line.startswith("INCON"):
parameters.update(_read_incon(fiter, label_length, f))
elif line.startswith("NOVER"):
parameters["nover"] = True
elif line.startswith("ENDCY"):
break
return parameters
def _read_rocks(f):
"""Read ROCKS block data."""
fmt = block_to_format["ROCKS"]
rocks = {"rocks": {}, "rocks_order": []}
while True:
line = next(f)
if line.strip():
# Record 1
data = read_record(line, fmt[1])
rock = data[0]
rocks["rocks"][rock] = {
"density": data[2],
"porosity": data[3],
"permeability": data[4] if len(set(data[4:7])) == 1 else data[4:7],
"conductivity": data[7],
"specific_heat": data[8],
}
nad = data[1]
if nad is not None:
# Record 2
line = next(f)
data = read_record(line, fmt[2])
rocks["rocks"][rock].update(
{
"compressibility": data[0],
"expansivity": data[1],
"conductivity_dry": data[2],
"tortuosity": data[3],
"klinkenberg_parameter": data[4],
"distribution_coefficient_3": data[5],
"distribution_coefficient_4": data[6],
}
)
if nad and nad > 1:
rocks["rocks"][rock].update(_read_rpcap(f))
rocks["rocks_order"].append(rock)
else:
break
rocks["rocks"] = {k: prune_nones_dict(v) for k, v in rocks["rocks"].items()}
return rocks
def _read_rpcap(f):
"""Read RPCAP block data."""
fmt = block_to_format["RPCAP"]
rpcap = {}
for key in ["relative_permeability", "capillarity"]:
line = next(f)
data = read_record(line, fmt)
if data[0] is not None:
rpcap[key] = {
"id": data[0],
"parameters": prune_nones_list(data[2:]),
}
return rpcap
def _read_flac(f, rocks_order):
"""Read FLAC block data."""
fmt = block_to_format["FLAC"]
flac = {"rocks": {}, "flac": {}}
# Record 1
line = next(f)
data = read_record(line, fmt[1])
flac["flac"]["creep"] = data[0]
flac["flac"]["porosity_model"] = data[1]
flac["flac"]["version"] = data[2]
# Additional records
for rock in rocks_order:
flac["rocks"][rock] = {}
line = next(f)
data = read_record(line, fmt[2])
flac["rocks"][rock]["permeability_model"] = {
"id": data[0],
"parameters": prune_nones_list(data[1:]),
}
line = next(f)
data = read_record(line, fmt[3])
flac["rocks"][rock]["equivalent_pore_pressure"] = {
"id": data[0],
"parameters": prune_nones_list(data[2:]),
}
flac["flac"] = prune_nones_dict(flac["flac"])
return flac
def _read_multi(f):
"""Read MULTI block data."""
fmt = block_to_format["MULTI"]
multi = {}
line = next(f)
data = read_record(line, fmt)
multi["n_component"] = data[0]
multi["isothermal"] = data[1] == data[0]
multi["n_phase"] = data[2]
return multi
def _read_solvr(f):
"""Read SOLVR block data."""
fmt = block_to_format["SOLVR"]
solvr = {}
line = next(f)
data = read_record(line, fmt)
solvr["solver"] = {
"method": data[0],
"z_precond": data[2],
"o_precond": data[4],
"rel_iter_max": data[5],
"eps": data[6],
}
return solvr
def _read_param(f, fh):
"""Read PARAM block data."""
fmt = block_to_format["PARAM"]
param = {}
# Record 1
line = next(f)
data = read_record(line, fmt[1])
param["options"] = {
"n_iteration": data[0],
"verbosity": data[1],
"n_cycle": data[2],
"n_second": data[3],
"n_cycle_print": data[4],
"temperature_dependence_gas": data[7],
"effective_strength_vapor": data[8],
}
param["extra_options"] = {
i + 1: int(x) for i, x in enumerate(data[5]) if x.isdigit()
}
# Record 2
line = next(f)
data = read_record(line, fmt[2])
param["options"].update(
{
"t_ini": data[0],
"t_max": data[1],
"t_steps": data[2],
"t_step_max": data[3],
"gravity": data[5],
"t_reduce_factor": data[6],
"mesh_scale_factor": data[7],
}
)
t_steps = int(data[2])
if t_steps >= 0.0:
param["options"]["t_steps"] = t_steps
else:
param["options"]["t_steps"] = []
for _ in range(-t_steps):
line = next(f)
data = read_record(line, fmt[3])
param["options"]["t_steps"] += prune_nones_list(data)
if len(param["options"]["t_steps"]) == 1:
param["options"]["t_steps"] = param["options"]["t_steps"][0]
# Record 3
line = next(f)
data = read_record(line, fmt[4])
param["options"].update(
{
"eps1": data[0],
"eps2": data[1],
"w_upstream": data[3],
"w_newton": data[4],
"derivative_factor": data[5],
}
)
# Record 4 and record 5 (EOS7R)
line = next(f)
data = read_record(line, fmt[5])
i = fh.tell()
try:
line = next(f)
data += read_record(line, fmt[5])
except ValueError:
fh.seek(i)
if any(x is not None for x in data):
data = prune_nones_list(data)
param["default"] = {"initial_condition": data}
else:
param["default"] = {}
# Remove Nones
param["options"] = prune_nones_dict(param["options"])
param["extra_options"] = prune_nones_dict(param["extra_options"])
return param
def _read_selec(f):
"""Read SELEC block data."""
fmt = block_to_format["SELEC"]
selec = {"selections": {}}
line = next(f)
data = read_record(line, fmt[1])
selec["selections"]["integers"] = {k + 1: v for k, v in enumerate(data)}
if selec["selections"]["integers"][1]:
selec["selections"]["floats"] = []
for _ in range(selec["selections"]["integers"][1]):
line = next(f)
data = read_record(line, fmt[2])
selec["selections"]["floats"].append(prune_nones_list(data))
selec["selections"]["integers"] = prune_nones_dict(selec["selections"]["integers"])
if selec["selections"]["integers"][1] == 1:
selec["selections"]["floats"] = selec["selections"]["floats"][0]
return selec
def _read_indom(f, fh):
"""Read INDOM block data."""
fmt = block_to_format["INDOM"]
indom = {"rocks": {}}
line = next(f)
two_lines = True
while True:
if line.strip():
# Record 1
rock = read_record(line, fmt[5])[0]
# Record 2
line = next(f)
data = read_record(line, fmt[0])
# Record 3 (EOS7R)
if two_lines:
i = fh.tell()
line = next(f)
if line.strip():
try:
data += read_record(line, fmt[0])
except ValueError:
two_lines = False
fh.seek(i)
else:
fh.seek(i)
data = prune_nones_list(data)
indom["rocks"][rock] = {"initial_condition": data}
else:
break
line = next(f)
return indom
def _read_momop(f):
"""Read MOMOP block data."""
fmt = block_to_format["MOMOP"]
line = next(f)
data = read_record(line, fmt)
momop = {
"more_options": {i + 1: int(x) for i, x in enumerate(data[0]) if x.isdigit()}
}
return momop
def _read_times(f):
"""Read TIMES block data."""
fmt = block_to_format["TIMES"]
times = {"times": []}
# Record 1
line = next(f)
data = read_record(line, fmt[1])
n_times = data[0]
# Record 2
while len(times["times"]) < n_times:
line = next(f)
data = read_record(line, fmt[2])
times["times"] += prune_nones_list(data)
if n_times == 1:
times["times"] = times["times"][0]
return times
def _read_oft(f, oft):
"""Read FOFT, COFT and GOFT blocks data."""
history = {oft: []}
while True:
line = next(f).rstrip()
if line:
history[oft].append(line)
else:
break
return history
def _read_gener(f, label_length):
"""Read GENER block data."""
fmt = block_to_format["GENER"]
gener = {"generators": {}}
line = next(f)
if not label_length:
label_length = get_label_length(line[:9])
while True:
if line.strip():
data = read_record(line, fmt[label_length])
label = data[0]
tmp = {
"name": [data[1]],
"nseq": [data[2]],
"nadd": [data[3]],
"nads": [data[4]],
"type": [data[7]],
"layer_thickness": [data[11]],
}
ltab = data[5]
if ltab and ltab > 1:
itab = data[8]
keys = ["times", "rates"]
keys += ["specific_enthalpy"] if itab else []
for key in keys:
table = []
while len(table) < ltab:
line = next(f)
data = read_record(line, fmt[0])
table += prune_nones_list(data)
tmp[key] = [table]
else:
tmp.update(
{
"times": [None],
"rates": [data[9]],
"specific_enthalpy": [data[10]],
}
)
if label in gener["generators"].keys():
for k, v in gener["generators"][label].items():
v += tmp[k]
else:
gener["generators"][label] = tmp
else:
break
line = next(f)
# Tidy up
for generator in gener["generators"].values():
for k, v in generator.items():
if len(v) == 1:
generator[k] = v[0]
else:
if all(x is None for x in v):
generator[k] = None
return {
k: {kk: prune_nones_dict(vv) for kk, vv in v.items()} for k, v in gener.items()
}
def _read_diffu(f, fh):
"""Read DIFFU block data."""
fmt = block_to_format["DIFFU"]
diffu = {"diffusion": []}
while True:
i = fh.tell()
line = next(f)
if line.split():
try:
data = read_record(line, fmt)
diffu["diffusion"].append(prune_nones_list(data))
except ValueError:
fh.seek(i)
break
else:
break
return diffu
def _read_outpu(f):
"""Read OUTPU block data."""
fmt = block_to_format["OUTPU"]
outpu = {"output": {}}
# Format
line = next(f).strip()
if line and not line.isdigit():
outpu["output"]["format"] = line if line else None
line = next(f).strip()
# Variables
names = {}
if line.isdigit():
num_vars = int(line)
outpu["output"]["variables"] = {}
for _ in range(num_vars):
line = next(f)
data = read_record(line, fmt[3])
name = data[0].lower()
tmp = prune_nones_list(data[1:])
if name not in names.keys():
names[name] = 1
outpu["output"]["variables"][name] = tmp
else:
if names[name] == 1:
outpu["output"]["variables"][name] = [
outpu["output"]["variables"][name],
tmp,
]
else:
outpu["output"]["variables"][name].append(tmp)
names[name] += 1
for k, v in outpu["output"]["variables"].items():
outpu["output"]["variables"][k] = (
None if len(v) == 0 else v[0] if len(v) == 1 else v
)
return outpu
def _read_eleme(f, label_length):
"""Read ELEME block data."""
fmt = block_to_format["ELEME"]
eleme = {"elements": {}, "elements_order": []}
line = next(f)
if not label_length:
label_length = get_label_length(line[:9])
while True:
if line.strip():
data = read_record(line, fmt[label_length])
label = data[0]
rock = data[3].strip()
eleme["elements"][label] = {
"nseq": data[1],
"nadd": data[2],
"material": int(rock) if all(r.isdigit() for r in rock) else rock,
"volume": data[4],
"heat_exchange_area": data[5],
"permeability_modifier": data[6],
"center": data[7:10],
}
eleme["elements_order"].append(label)
else:
break
line = next(f)
eleme["elements"] = {k: prune_nones_dict(v) for k, v in eleme["elements"].items()}
return eleme
def _read_coord(f):
"""Read COORD block data."""
fmt = block_to_format["COORD"]
coord = []
line = next(f)
while True:
if line.strip():
data = read_record(line, fmt)
coord.append(data)
else:
break
line = next(f)
return coord
def _read_conne(f, label_length):
"""Read CONNE block data."""
fmt = block_to_format["CONNE"]
conne = {"connections": {}, "connections_order": []}
line = next(f)
if not label_length:
label_length = get_label_length(line[:9])
while True:
if line.strip() and not line.startswith("+++"):
data = read_record(line, fmt[label_length])
label = data[0]
conne["connections"][label] = {
"nseq": data[1],
"nadd": data[2:4],
"permeability_direction": data[4],
"nodal_distances": data[5:7],
"interface_area": data[7],
"gravity_cosine_angle": data[8],
"radiant_emittance_factor": data[9],
}
conne["connections_order"].append(label)
else:
break
line = next(f)
conne["connections"] = {
k: prune_nones_dict(v) for k, v in conne["connections"].items()
}
return conne
def _read_incon(f, label_length, fh):
"""Read INCON block data."""
fmt = block_to_format["INCON"]
incon = {"initial_conditions": {}, "initial_conditions_order": []}
line = next(f)
if not label_length:
label_length = get_label_length(line[:9])
two_lines = True
while True:
if line.strip() and not line.startswith("+++"):
# Record 1
data = read_record(line, fmt[label_length])
label = data[0]
userx = prune_nones_list(data[4:9])
incon["initial_conditions"][label] = {
"porosity": data[3],
"userx": userx if userx else None,
}
# Record 2
line = next(f)
data = read_record(line, fmt[0])
# Record 3 (EOS7R)
if two_lines:
i = fh.tell()
line = next(f)
if line.strip() and not line.startswith("+++"):
try:
data += read_record(line, fmt[0])
except ValueError:
two_lines = False
fh.seek(i)
else:
fh.seek(i)
incon["initial_conditions"][label]["values"] = prune_nones_list(data)
incon["initial_conditions_order"].append(label)
else:
break
line = next(f)
incon["initial_conditions"] = {
k: prune_nones_dict(v) for k, v in incon["initial_conditions"].items()
}
return incon
|
1691478
|
class Solution:
def beautifulArray(self, N: int) -> List[int]:
result = [1]
while len(result) < N:
result = [i * 2 - 1 for i in result] + [i * 2 for i in result]
return [i for i in result if i <= N]
|
1691493
|
import copy
import random
from configuration import config
from src.genotype.cdn.nodes.blueprint_node import BlueprintNode
from src.genotype.cdn.nodes.module_node import ModuleNode
from src.genotype.neat.connection import Connection
from src.genotype.neat.genome import Genome
from src.genotype.neat.mutation_record import MutationRecords
from src.genotype.neat.node import Node, NodeType
from src.genotype.neat.operators.mutators.mutation_report import MutationReport
from src.genotype.neat.operators.mutators.mutator import Mutator
class GenomeMutator(Mutator):
"""
performs the base set of mutations to the general genome object
"""
def mutate(self, genome: Genome, mutation_record: MutationRecords):
raise NotImplementedError("Implement mutate method in all super classes")
def mutate_base_genome(self, genome: Genome, mutation_record: MutationRecords, add_node_chance: float,
add_connection_chance: float, allow_disabling_connections: bool = True):
"""performs base neat genome mutations, as well as node and genome property mutations"""
mutation_report = MutationReport()
if random.random() < add_node_chance:
if self.add_node_mutation(genome, mutation_record):
mutation_report.nodes_added += 1
if random.random() < add_connection_chance:
if self.add_connection_mutation(genome, mutation_record):
mutation_report.connections_created += 1
if allow_disabling_connections:
"""randomly deactivates and reactivates connections"""
for connection in genome.connections.values():
orig_conn = copy.deepcopy(connection)
result = connection.mutate() # this is the call which enables/disables connections
if result.check_mutated():
"""the connection was mutated"""
# If mutation made the genome invalid then undo it
if not genome.validate():
"""
disabling the connection lead to a disconnected graph
or enabling the connection lead to a cycle
- undoing this mutation
"""
genome.connections[orig_conn.id] = orig_conn
else:
"""mutation is valid"""
if connection.enabled():
mutation_report.connections_enabled += 1
else:
mutation_report.connections_disabled += 1
for node in genome.nodes.values():
"""mutates node properties"""
mutation_report += node.mutate()
for mutagen in genome.get_all_mutagens():
"""mutates the genome level properties"""
report = mutagen.mutate()
if report is None:
raise Exception("none report returned from mutating " + mutagen.name + ", " + repr(mutagen))
mutation_report += report
return mutation_report
def add_connection_mutation(self, genome: Genome, mutation_record: MutationRecords):
"""
tries to randomly add a new connection to the genome
a new random connection can be rejected if it is already in the genome,
or it tries to connect a node to itself
or it tries to connect to an input node
or it tries to connect to an output node
or it creates a cycle
"""
tries = 10 # TODO config option
added_connection = False
while not added_connection and tries > 0:
added_connection = self.test_and_add_connection(genome, mutation_record,
random.choice(list(genome.nodes.values())),
random.choice(list(genome.nodes.values())))
tries -= 1
return added_connection
def test_and_add_connection(self, genome: Genome, mutation_record: MutationRecords, from_node: Node, to_node: Node):
"""
Adds a connection between to nodes if possible
creates a copy genome, adds the node, checks for cycles in the copy
if no cycles, the connection is added to the original genome
:returns whether or not the candidate connection was added to the original genome
"""
copy_genome = copy.deepcopy(genome)
# Validation
if from_node.id == to_node.id:
return False
candidate_connection = (from_node.id, to_node.id)
if candidate_connection in genome.connected_nodes:
# this connection is already in the genome
return False
# else:
# print("candidate conn", candidate_connection, " not in connections:", genome.connected_nodes,
# "nodes:", genome.nodes.keys(), "connections:", genome.connections)
if from_node.node_type == NodeType.OUTPUT:
return False
if to_node.node_type == NodeType.INPUT:
return False
# Adding to global mutation dictionary
if mutation_record.connection_mut_exists(candidate_connection):
mutation_id = mutation_record.connection_mutations[candidate_connection]
else:
mutation_id = mutation_record.add_conn_mutation(candidate_connection)
# Adding new mutation
mutated_conn = Connection(mutation_id, from_node.id, to_node.id)
copy_genome.add_connection(mutated_conn)
if copy_genome.has_cycle():
# the candidate connection creates a cycle
return False
# by now the candidate connection is valid
genome.add_connection(mutated_conn)
return True
def add_node_mutation(self, genome: Genome, mutation_record: MutationRecords):
"""Adds a node on a connection and updates the relevant genome"""
tries = 10
added_node = False
while not added_node and tries > 0:
"""
tries to randomly add a new node onto a connection in the genome
a new random node can be rejected if is already in the genome
"""
added_node = self.test_and_add_node_on_connection(genome, mutation_record,
random.choice(list(genome.connections.values())))
tries -= 1
return added_node
def test_and_add_node_on_connection(self, genome: Genome, mutation_record: MutationRecords, connection: Connection):
n_mutations_on_conn = genome.n_mutations_on_connection(mutation_record, connection.id)
node_mutation_id = (connection.id, n_mutations_on_conn)
if mutation_record.node_mut_exists(node_mutation_id):
# this node mutation has occurred before
# ie: this connection has had a node placed already
# the id of the original node which was placed on this connection
mutated_node_id = mutation_record.node_mutations[node_mutation_id]
if mutated_node_id in genome.nodes:
raise Exception("node id already in genome, but searched for unique node id")
# the id of the connection which bridges to the new node
into_node_connection_id = mutation_record.connection_mutations[(connection.from_node_id, mutated_node_id)]
# the id of the connection which bridges from the new node
out_of_node_connection_id = mutation_record.connection_mutations[(mutated_node_id, connection.to_node_id)]
else:
# if this mutation hasn't occurred before if should not be in any genome
mutated_node_id = mutation_record.add_node_mutation(node_mutation_id)
if mutated_node_id in genome.nodes: # this connection has already created a new node
raise Exception("tried to mutate a node onto connection " + str(connection.id) +
" mutation (node id) given value " + str(mutated_node_id) +
" but this value is already present in the genome: " + repr(genome.nodes.keys()) +
"\nmutation record: " + repr(mutation_record))
into_node_connection_id = mutation_record.add_conn_mutation((connection.from_node_id, mutated_node_id))
out_of_node_connection_id = mutation_record.add_conn_mutation((mutated_node_id, connection.to_node_id))
TypeNode = type(list(genome.nodes.values())[0]) # node could be a blueprint, module or da node
# multiple node objects share the same id. indicating they are topologically the same
if TypeNode == BlueprintNode and config.blueprint_node_type_switch_chance > random.random():
# node switch type applies to new nodes being added to the blueprint
TypeNode = ModuleNode
mutated_node = TypeNode(mutated_node_id, NodeType.HIDDEN)
genome.add_node(mutated_node)
mutated_from_conn = Connection(into_node_connection_id, connection.from_node_id, mutated_node_id)
mutated_to_conn = Connection(out_of_node_connection_id, mutated_node_id, connection.to_node_id)
genome.add_connection(mutated_from_conn)
genome.add_connection(mutated_to_conn)
connection.enabled.set_value(False)
return True
|
1691537
|
import importlib
from . import config
import atexit
import os
HEAP_DEFAULT = 0
HEAP_UPLOAD = 1
HEAP_READBACK = 2
SHADER_BINARY_TYPE_DXIL = 0
SHADER_BINARY_TYPE_SPIRV = 1
SHADER_BINARY_TYPE_DXBC = 2
SHADER_BINARY_TYPE_MSL = 3
SHADER_BINARY_TYPE_GLSL = 4
class UnknownBackend(Exception):
pass
class BufferException(Exception):
pass
class Texture1DException(Exception):
pass
class Texture2DException(Exception):
pass
class Texture3DException(Exception):
pass
_backend = None
_discovered_devices = None
_current_device = None
def get_backend():
def debug_callback():
messages = get_current_device().get_debug_messages()
for message in messages:
print(message)
global _backend
if _backend is None:
_backend = importlib.import_module(
'compushady.backends.{0}'.format(config.wanted_backend))
if config.debug:
_backend.enable_debug()
atexit.register(debug_callback)
return _backend
def get_discovered_devices():
global _discovered_devices
if _discovered_devices is None:
_discovered_devices = get_backend().get_discovered_devices()
return _discovered_devices
def set_current_device(index):
global _current_device
_current_device = get_discovered_devices()[index]
def get_current_device():
global _current_device
if _current_device is None:
_current_device = get_best_device()
return _current_device
def get_best_device():
if 'COMPUSHADY_DEVICE' in os.environ:
return get_discovered_devices()[int(os.environ['COMPUSHADY_DEVICE'])]
return sorted(get_discovered_devices(), key=lambda x: (
x.is_hardware, x.is_discrete, x.dedicated_video_memory))[-1]
class Resource:
def copy_to(self, destination):
self.handle.copy_to(destination.handle)
@property
def size(self):
return self.handle.size
class Buffer(Resource):
def __init__(self, size, heap=HEAP_DEFAULT, stride=0, format=0, device=None):
self.device = device if device else get_current_device()
self.handle = self.device.create_buffer(heap, size, stride, format)
def upload(self, data, offset=0):
self.handle.upload(data, offset)
def upload2d(self, data, pitch, width, height, bytes_per_pixel):
return self.handle.upload2d(data, pitch, width, height, bytes_per_pixel)
def upload_chunked(self, data, stride, filler):
return self.handle.upload_chunked(data, stride, filler)
def readback(self, buffer_or_size=0, offset=0):
if isinstance(buffer_or_size, int):
return self.handle.readback(buffer_or_size, offset)
self.handle.readback_to_buffer(buffer_or_size, offset)
def readback2d(self, pitch, width, height, bytes_per_pixel):
return self.handle.readback2d(pitch, width, height, bytes_per_pixel)
class Texture1D(Resource):
def __init__(self, width, format, device=None):
self.device = device if device else get_current_device()
self.handle = self.device.create_texture1d(width, format)
@classmethod
def from_native(cls, ptr, device=None):
instance = cls.__new__(cls)
instance.device = device if device else get_current_device()
instance.handle = instance.device.create_texture1d_from_native(ptr)
return instance
@property
def width(self):
return self.handle.width
@property
def row_pitch(self):
return self.handle.row_pitch
class Texture2D(Resource):
def __init__(self, width, height, format, device=None):
self.device = device if device else get_current_device()
self.handle = self.device.create_texture2d(width, height, format)
@classmethod
def from_native(cls, ptr, width, height, format, device=None):
instance = cls.__new__(cls)
instance.device = device if device else get_current_device()
instance.handle = instance.device.create_texture2d_from_native(
ptr, width, height, format)
return instance
@property
def width(self):
return self.handle.width
@property
def height(self):
return self.handle.height
@property
def row_pitch(self):
return self.handle.row_pitch
class Texture3D(Resource):
def __init__(self, width, height, depth, format, device=None):
self.device = device if device else get_current_device()
self.handle = self.device.create_texture3d(
width, height, depth, format)
@classmethod
def from_native(cls, ptr, device=None):
instance = cls.__new__(cls)
instance.device = device if device else get_current_device()
instance.handle = instance.device.create_texture3d_from_native(ptr)
return instance
@property
def width(self):
return self.handle.width
@property
def height(self):
return self.handle.height
@property
def depth(self):
return self.handle.depth
@property
def row_pitch(self):
return self.handle.row_pitch
class Swapchain:
def __init__(self, window_handle, format, num_buffers=3, device=None):
self.device = device if device else get_current_device()
self.handle = self.device.create_swapchain(
window_handle, format, num_buffers)
@property
def width(self):
return self.handle.width
@property
def height(self):
return self.handle.height
def present(self, resource, x=0, y=0):
self.handle.present(resource.handle, x, y)
class Compute:
def __init__(self, shader, cbv=[], srv=[], uav=[], device=None):
self.device = device if device else get_current_device()
self.handle = self.device.create_compute(
shader,
cbv=[resource.handle for resource in cbv],
srv=[resource.handle for resource in srv],
uav=[resource.handle for resource in uav])
def dispatch(self, x, y, z):
self.handle.dispatch(x, y, z)
|
1691546
|
import ast
import pathlib
import yaml
from typing import Any, Dict, Set
from viewflow.adapters.python.python_adapter import DependantDataFrame
class NoDocstringError(Exception):
pass
class UnparseableDocstringError(Exception):
pass
def get_dependencies_from_function_def(node: ast.FunctionDef) -> Set[str]:
return set(
arg.arg
for arg in node.args.args
if arg.annotation is not None
and arg.annotation.id == DependantDataFrame.__name__
)
def parse_yaml_in_docstring(docstring: str):
raw_docstring_lines = docstring.split("\n")
try:
yaml_start = raw_docstring_lines.index("---")
yaml_end = raw_docstring_lines.index("---", yaml_start + 1)
yaml_header = "\n".join(raw_docstring_lines[(yaml_start + 1) : yaml_end])
return yaml.safe_load(yaml_header)
except (ValueError, yaml.scanner.ScannerError):
raise UnparseableDocstringError()
def create_view_from_function_def(function_file: pathlib.Path, node: ast.FunctionDef):
try:
docstring = ast.get_docstring(node)
except TypeError:
raise NoDocstringError()
if docstring is None:
raise NoDocstringError()
parsed_docstring = parse_yaml_in_docstring(docstring)
implicit_dependencies = get_dependencies_from_function_def(node)
explicit_dependencies = set(parsed_docstring.get("dependencies", []))
dependencies = implicit_dependencies | explicit_dependencies
return dict(
**parsed_docstring,
type="PythonToPostgresOperator",
inject_dependencies=True,
dependencies=dependencies,
depends_on=list(dependencies),
name=node.name,
python_dir=str(function_file.parent),
callable=f"{function_file.stem}.{node.name}",
content=function_file.read_text(),
)
class PythonViewParser(ast.NodeVisitor):
file: pathlib.Path
def __init__(self, file: pathlib.Path):
self.views = []
self.file = file
def visit_FunctionDef(self, node: ast.FunctionDef) -> Any:
try:
self.views.append(create_view_from_function_def(self.file, node))
except (NoDocstringError, UnparseableDocstringError):
pass
return super().generic_visit(node)
def parse_python(file: pathlib.Path) -> Dict[str, Any]:
tree = ast.parse(file.read_text())
python_view_parser = PythonViewParser(file)
python_view_parser.visit(tree)
if len(python_view_parser.views) > 0:
return python_view_parser.views[0]
else:
return None
|
1691549
|
import os
try:
os.makedirs("./temp")
except OSError:
#faz o que acha que deve se não for possível criar
#https://pt.stackoverflow.com/q/170615/101
|
1691576
|
from os import listdir, mkdir
from os.path import join, split, isfile, isdir
conversions = [
('./track2-gallery-query-metadata-v2m100/test-prob-v2m100.log',
'./track2-gallery-query-metadata-v2m100/prob_v2m100.txt',
'./track2-gallery-query-metadata-v2m100/imglist_v2m100.txt'),
]
img_gline = {}
with open('test_track.txt', 'r') as f:
for gg, line in enumerate(f):
g_line = gg+1
print(g_line)
imgs = line.replace("\n", "").strip().split(" ")
for i, img in enumerate(imgs):
img_gline[img] = g_line
img_qline = {}
with open('query_track.txt', 'r') as f:
for qq, line in enumerate(f):
q_line = qq+1
print(q_line)
imgs = line.replace("\n", "").strip().split(" ")
for i, img in enumerate(imgs):
img_qline[img] = q_line
assert int(imgs[0].replace('.jpg','')) == q_line # make sure is ordered
for raw_filename, prob_filename, imglist_filename in conversions:
metadatas = []
with open(raw_filename, 'r') as f:
buf = ''
i = 0
for line in f:
line = line.strip()
if i % 4 == 0:
metadatas.append([])
i += 1
else:
buf = buf + ' ' + line
#if line[-2:] != ']]':
# continue
#print(buf)
l = buf.rfind('[[')
r = buf.find(']]')
if l == -1 and r == -1:
metadatas[-1].append(buf.strip())
elif l < r:
metadatas[-1].append(buf[l+2:r].strip())
else:
print('invalid buf: ' + buf)
buf = ''
i += 1
if len(metadatas[-1]) == 0:
metadatas = metadatas[:-1]
print('images in metadatas: %d' % len(metadatas))
prob_filename_test = prob_filename[:-4] + '_test.txt'
imglist_filename_test = imglist_filename[:-4] + '_test.txt'
f_prob = open(prob_filename_test, 'w')
f_imglist = open(imglist_filename_test, 'w')
i = 0
for img in img_gline:
f_prob.write('%d/%d image\n' % (i, len(img_gline)))
for metadata in metadatas[img_gline[img]-1 + 1052]:
f_prob.write(metadata+'\n')
f_imglist.write(img+'\n')
i+=1
f_prob.close()
f_imglist.close()
prob_filename_query = prob_filename[:-4] + '_query.txt'
imglist_filename_query = imglist_filename[:-4] + '_query.txt'
f_prob = open(prob_filename_query, 'w')
f_imglist = open(imglist_filename_query, 'w')
i = 0
for img in img_qline:
f_prob.write('%d/%d image\n' % (i, len(img_qline)))
for metadata in metadatas[img_qline[img]-1]:
f_prob.write(metadata+'\n')
f_imglist.write(img+'\n')
i+=1
f_prob.close()
f_imglist.close()
|
1691583
|
import os
import unittest
import subprocess
from fuc.api.common import FUC_PATH
from fuc import pyvcf, pybed, pyfq, pygff
vcf_file1 = f'{FUC_PATH}/data/vcf/1.vcf'
vcf_file2 = f'{FUC_PATH}/data/vcf/2.vcf'
vcf_file3 = f'{FUC_PATH}/data/vcf/3.vcf'
bed_file1 = f'{FUC_PATH}/data/bed/1.bed'
bed_file2 = f'{FUC_PATH}/data/bed/2.bed'
fq_file1 = f'{FUC_PATH}/data/fq/1.fastq'
text_file1 = f'{FUC_PATH}/data/text/1.txt'
text_file2 = f'{FUC_PATH}/data/text/2.txt'
class TestPyvcf(unittest.TestCase):
def test_shape(self):
vf = pyvcf.VcfFrame.from_file(vcf_file1)
self.assertEqual(vf.shape, (5, 4))
def test_filter_empty(self):
vf = pyvcf.VcfFrame.from_file(vcf_file2)
vf = vf.filter_empty()
self.assertEqual(vf.df.shape, (4, 11))
def test_filter_bed(self):
vf = pyvcf.VcfFrame.from_file(vcf_file1)
bf = pybed.BedFrame.from_file(f'{FUC_PATH}/data/bed/1.bed')
vf = vf.filter_bed(bf)
self.assertEqual(vf.df.shape, (3, 13))
def test_merge(self):
vf1 = pyvcf.VcfFrame.from_file(vcf_file1)
vf2 = pyvcf.VcfFrame.from_file(vcf_file2)
vf3 = vf1.merge(vf2, how='outer', format='GT:DP')
self.assertEqual(vf3.df.shape, (9, 15))
def test_calculate_concordance(self):
vf = pyvcf.VcfFrame.from_file(vcf_file1)
self.assertEqual(vf.calculate_concordance('Steven', 'Sarah'), (1, 0, 0, 3))
def test_filter_multialt(self):
vf = pyvcf.VcfFrame.from_file(vcf_file1)
vf = vf.filter_multialt()
self.assertEqual(vf.shape[0], 4)
def test_subset(self):
vf = pyvcf.VcfFrame.from_file(vcf_file1)
vf = vf.subset(['Sarah', 'John'])
self.assertEqual(len(vf.samples), 2)
class TestPybed(unittest.TestCase):
def test_intersect(self):
bf1 = pybed.BedFrame.from_file(f'{FUC_PATH}/data/bed/1.bed')
bf2 = pybed.BedFrame.from_file(f'{FUC_PATH}/data/bed/2.bed')
bf3 = pybed.BedFrame.from_file(f'{FUC_PATH}/data/bed/3.bed')
bf4 = bf1.intersect(bf2)
self.assertEqual(bf3.to_string(), bf4.to_string())
class TestPyfq(unittest.TestCase):
def test_shape(self):
qf = pyfq.FqFrame.from_file(f'{FUC_PATH}/data/fq/1.fastq')
self.assertEqual(qf.shape, (5, 4))
class TestPygff(unittest.TestCase):
def test_from_file(self):
gf = pygff.GffFrame.from_file(f'{FUC_PATH}/data/gff/fasta.gff')
self.assertEqual(gf.df.shape, (12, 9))
self.assertEqual(len(gf.fasta), 2)
class TestCli(unittest.TestCase):
def test_bfintxn(self):
result = subprocess.run(['fuc', 'bed-intxn', bed_file1, bed_file2], capture_output=True, text=True, check=True)
self.assertEqual(len(result.stdout.split('\n')), 5)
def test_bfsum(self):
result = subprocess.run(['fuc', 'bed-sum', bed_file1], capture_output=True, text=True, check=True)
self.assertTrue('Total' in result.stdout)
def test_dfmerge(self):
result = subprocess.run(['fuc', 'tbl-merge', text_file1, text_file2], capture_output=True, text=True, check=True)
self.assertTrue('Sarah' in result.stdout)
def test_dfsum(self):
result = subprocess.run(['fuc', 'tbl-sum', text_file1], capture_output=True, text=True, check=True)
self.assertTrue('max' in result.stdout)
def test_fuccompf(self):
result = subprocess.run(['fuc', 'fuc-compf', vcf_file1, vcf_file1], capture_output=True, text=True, check=True)
self.assertTrue('True' in result.stdout)
def test_fucexist(self):
result = subprocess.run(['fuc', 'fuc-exist', vcf_file1], capture_output=True, text=True, check=True)
self.assertTrue('True' in result.stdout)
def test_fucfind(self):
result = subprocess.run('fuc fuc-find "*.vcf"', shell=True, capture_output=True, text=True, check=True)
self.assertTrue('1.vcf' in result.stdout)
def test_qfcount(self):
result = subprocess.run(['fuc', 'fq-count', fq_file1], capture_output=True, text=True, check=True)
self.assertEqual(int(result.stdout.strip()), 5)
def test_qfsum(self):
result = subprocess.run(['fuc', 'fq-sum', fq_file1], capture_output=True, text=True, check=True)
self.assertTrue('# Total: 5' in result.stdout)
def test_vfmerge(self):
result = subprocess.run(['fuc', 'vcf-merge', vcf_file1, vcf_file2, '--how', 'outer'], capture_output=True, text=True, check=True)
self.assertEqual(len(result.stdout.strip().split('\n')), 10)
if __name__ == '__main__':
unittest.main()
|
1691602
|
import argparse
import sys
import time
from multiprocessing import Pool
import numpy as np
import pandas as pd
from terminaltables import *
from dataset import VideoDataSet
from ops.utils import temporal_nms
sys.path.append('./anet_toolkit/Evaluation')
import os
import pdb
import pickle
from anet_toolkit.Evaluation.eval_detection import \
compute_average_precision_detection
from ops.utils import get_configs, softmax
# options
parser = argparse.ArgumentParser(
description="Evaluate detection performance metrics")
parser.add_argument('dataset', type=str, choices=['thumos14', 'muses'])
parser.add_argument('detection_pickles', type=str, nargs='+')
parser.add_argument('--nms_threshold', type=float, default=0.4)
parser.add_argument('--no_regression', default=False, action="store_true")
parser.add_argument('-j', '--ap_workers', type=int, default=16)
parser.add_argument('--top_k', type=int, default=None)
parser.add_argument('--cls_scores', type=str, nargs='+')
parser.add_argument('--reg_scores', type=str, default=None)
parser.add_argument('--cls_top_k', type=int, default=1)
parser.add_argument('--cfg', default='data/dataset_cfg.yml')
parser.add_argument('--score_weights', type=float, default=None, nargs='+')
parser.add_argument('--min_length', type=float, default=None, help='minimum duration of proposals in second')
parser.add_argument('--one_iou', action='store_true')
parser.add_argument('--no_comp', action='store_true')
args = parser.parse_args()
configs = get_configs(args.dataset, args.cfg)
dataset_configs = configs['dataset_configs']
model_configs = configs["model_configs"]
num_class = model_configs['num_class']
nms_threshold = args.nms_threshold if args.nms_threshold else configs['evaluation']['nms_threshold']
top_k = args.top_k if args.top_k else configs['evaluation']['top_k']
print('---'*10)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("initiating evaluation of detection results {}".format(args.detection_pickles))
print('top_k={}'.format(top_k))
sys.stdout.flush()
score_pickle_list = []
for pc in args.detection_pickles:
score_pickle_list.append(pickle.load(open(pc, 'rb')))
if args.score_weights:
weights = np.array(args.score_weights) / sum(args.score_weights)
else:
weights = [1.0/len(score_pickle_list) for _ in score_pickle_list]
def merge_scores(vid):
def merge_part(arrs, index, weights):
if arrs[0][index] is not None:
return np.sum([a[index] * w for a, w in zip(arrs, weights)], axis=0)
else:
return None
arrays = [pc[vid] for pc in score_pickle_list]
act_weights = weights
comp_weights = weights
reg_weights = weights
rel_props = score_pickle_list[0][vid][0]
return rel_props, \
merge_part(arrays, 1, act_weights), \
merge_part(arrays, 2, comp_weights), \
merge_part(arrays, 3, reg_weights)
print('Merge detection scores from {} sources...'.format(len(score_pickle_list)))
detection_scores = {k: merge_scores(k) for k in score_pickle_list[0]}
print('Done.')
if 'deploy_prop_file' in dataset_configs:
prop_file = dataset_configs['deploy_prop_file']
else:
prop_file = dataset_configs['test_prop_file']
if 'deploy_online_slice' in dataset_configs:
online_slice = dataset_configs['deploy_online_slice']
else:
online_slice = dataset_configs.get('online_slice', False)
dataset = VideoDataSet(dataset_configs,
prop_file=prop_file,
ft_path=dataset_configs['train_ft_path'],
test_mode=True)
from functools import reduce
gt_lens = np.array(reduce(lambda x,y: x+y, [[(x.end_frame-x.start_frame)/6 for x in v.gt] for v in dataset.video_list]))
# pdb.set_trace()
dataset_detections = [dict() for i in range(num_class)]
def merge_all_vid_scores(pickle_list):
def merge_op(arrs, index, weights):
if arrs[0][index] is not None:
return np.sum([a[index] * w for a, w in zip(arrs, weights)], axis=0)
else:
return None
out_score_dict = {}
for vid in pickle_list[0]:
arrays = [pc[vid] for pc in pickle_list]
act_weights = weights
comp_weights = weights
reg_weights = weights
rel_props = pickle_list[0][vid][0]
out_score_dict[vid] = [rel_props, \
merge_op(arrays, 1, act_weights), \
merge_op(arrays, 2, comp_weights), \
merge_op(arrays, 3, reg_weights)]
return out_score_dict
if args.cls_scores:
print('Using classifier scores from {}'.format(args.cls_scores))
cls_score_pickle_list = []
for pc in args.cls_scores:
cls_score_pickle_list.append(pickle.load(open(pc, 'rb')))
cls_score_dict = merge_all_vid_scores(cls_score_pickle_list)
# cls_score_pc = pickle.load(open(args.cls_scores, 'rb'), encoding='bytes')
# cls_score_dict = cls_score_pc
# cls_score_dict = {os.path.splitext(os.path.basename(k.decode('utf-8')))[0]:v for k, v in cls_score_pc.items()}
else:
cls_score_dict = None
if args.reg_scores:
print('Using regression scores from {}'.format(args.reg_scores))
reg_score_dict = pickle.load(open(args.reg_scores, 'rb'))
else:
reg_score_dict = None
# generate detection results
def gen_detection_results(video_id, score_tp):
if len(score_tp[0].shape) == 3:
rel_prop = np.squeeze(score_tp[0], 0)
else:
rel_prop = score_tp[0]
# standardize regression scores
reg_scores = score_tp[3]
if reg_scores is None:
reg_scores = np.zeros((len(rel_prop), num_class, 2), dtype=np.float32)
reg_scores = reg_scores.reshape((-1, num_class, 2))
if cls_score_dict is None:
combined_scores = softmax(score_tp[1][:, :])
combined_scores = combined_scores[:,1:]
else:
combined_scores = softmax(cls_score_dict[video_id][1])[:, 1:]
if combined_scores.shape[1] < score_tp[2].shape[1]:
combined_scores = np.concatenate(
(combined_scores, np.zeros([len(combined_scores), score_tp[2].shape[1]-combined_scores.shape[1]])), axis=1)
elif combined_scores.shape[1] > score_tp[2].shape[1]:
combined_scores = combined_scores[:, :score_tp[2].shape[1]]
if not args.no_comp:
combined_scores = combined_scores * np.exp(score_tp[2])
keep_idx = np.argsort(combined_scores.ravel())[-top_k:]
# pdb.set_trace()
delete_short = args.min_length is not None
if delete_short:
print('delete short proposals')
duration = dataset.video_dict[video_id].num_frames / 6
prop_duration = duration * (rel_prop[:,1] - rel_prop[:, 0])
non_short_prop_idx = np.where(prop_duration <= args.min_length)[0]
keep_idx = [x for x in keep_idx if x // num_class in non_short_prop_idx]
# keep_prop_num = len({x//num_class for x in keep_idx})
for k in keep_idx:
cls = k % num_class
prop_idx = k // num_class
if video_id not in dataset_detections[cls]:
dataset_detections[cls][video_id] = np.array([
[rel_prop[prop_idx, 0], rel_prop[prop_idx, 1], combined_scores[prop_idx, cls],
reg_scores[prop_idx, cls, 0], reg_scores[prop_idx, cls, 1]]
])
else:
dataset_detections[cls][video_id] = np.vstack(
[dataset_detections[cls][video_id],
[rel_prop[prop_idx, 0], rel_prop[prop_idx, 1], combined_scores[prop_idx, cls],
reg_scores[prop_idx, cls, 0], reg_scores[prop_idx, cls, 1]]])
return len(keep_idx)
print("Preprocessing detections...")
orig_num_list = []
keep_num_list = []
def mean(x):
return sum(x)/len(x)
for k, v in detection_scores.items():
orig_num = len(v[0])
keep_num = gen_detection_results(k, v)
orig_num_list.append(orig_num)
keep_num_list.append(keep_num)
print('Done. {} videos, avg prop num {:.0f} => {:.0f}'.format(len(detection_scores), mean(orig_num_list), mean(keep_num_list)))
# perform NMS
print("Performing nms with thr {} ...".format(nms_threshold))
for cls in range(num_class):
dataset_detections[cls] = {
k: temporal_nms(v, nms_threshold) for k,v in dataset_detections[cls].items()
}
print("NMS Done.")
def perform_regression(detections):
t0 = detections[:, 0]
t1 = detections[:, 1]
center = (t0 + t1) / 2
duration = (t1 - t0)
new_center = center + duration * detections[:, 3]
new_duration = duration * np.exp(detections[:, 4])
new_detections = np.concatenate((
np.clip(new_center - new_duration / 2, 0, 1)[:, None], np.clip(new_center + new_duration / 2, 0, 1)[:, None], detections[:, 2:]
), axis=1)
return new_detections
# perform regression
if not args.no_regression:
print("Performing location regression...")
for cls in range(num_class):
dataset_detections[cls] = {
k: perform_regression(v) for k, v in dataset_detections[cls].items()
}
print("Regression Done.")
else:
print("Skip regresssion as requested by --no_regression")
# ravel test detections
def ravel_detections(detection_db, cls):
detection_list = []
for vid, dets in detection_db[cls].items():
detection_list.extend([[vid, cls] + x[:3] for x in dets.tolist()])
df = pd.DataFrame(detection_list, columns=["video-id", "cls","t-start", "t-end", "score"])
return df
plain_detections = [ravel_detections(dataset_detections, cls) for cls in range(num_class)]
# get gt
gt_list = []
all_gt = dataset.get_all_gt()
all_gt = pd.DataFrame(all_gt, columns=["video-id", "cls","t-start", "t-end"])
gt_by_cls = []
for cls in range(num_class):
gt_by_cls.append(all_gt[all_gt.cls == cls].reset_index(drop=True).drop('cls', 1))
print(cls, len(gt_by_cls[cls]))
# pdb.set_trace()
pickle.dump(gt_by_cls, open('gt_dump.pc', 'wb'), pickle.HIGHEST_PROTOCOL)
pickle.dump(plain_detections, open('pred_dump.pc', 'wb'), pickle.HIGHEST_PROTOCOL)
print("Calling mean AP calculator from toolkit with {} workers...".format(args.ap_workers))
if args.one_iou:
iou_range = [0.5]
else:
if args.dataset == 'thumos14':
iou_range = np.arange(0.1, 1.0, 0.1)
elif args.dataset == 'muses':
iou_range = [0.3, 0.4, 0.5, 0.6, 0.7]
else:
iou_range = np.arange(0.5, 1.0, 0.05)
# raise ValueError("unknown dataset {}".format(args.dataset))
ap_values = np.zeros((num_class, len(iou_range)))
def eval_ap(iou, iou_idx, cls, gt, predition):
ap = compute_average_precision_detection(gt, predition, iou)
sys.stdout.flush()
return cls, iou_idx, ap
def callback(rst):
sys.stdout.flush()
ap_values[rst[0], rst[1]] = rst[2][0]
pool = Pool(args.ap_workers)
jobs = []
for iou_idx, min_overlap in enumerate(iou_range):
for cls in range(num_class):
if len(gt_by_cls[cls]) == 0:
continue
jobs.append(pool.apply_async(eval_ap, args=([min_overlap], iou_idx, cls, gt_by_cls[cls], plain_detections[cls],),callback=callback))
pool.close()
pool.join()
print("Evaluation done.\n\n")
map_iou = ap_values.mean(axis=0)
per_cls_map = ap_values.mean(axis=1)
#
# for
display_title = "Detection Performance on {}".format(args.dataset)
display_data = [["IoU thresh"], ["mAP"]]
for i in range(len(iou_range)):
display_data[0].append("{:.02f}".format(iou_range[i]))
display_data[1].append("{:.04f}".format(map_iou[i]))
display_data[0].append('Average')
display_data[1].append("{:.04f}".format(map_iou.mean()))
table = AsciiTable(display_data, display_title)
table.justify_columns[-1] = 'right'
table.inner_footing_row_border = True
print(table.table)
# first_line = '\t'.join(['iou'], ['{:.02f}'])
print('Per-class average AP over all iou thresholds')
for i,x in enumerate(per_cls_map):
print('%.4f' % x, end='\t')
print(time.strftime('%Y-%m-%d %H:%M:%S') + ' Done')
|
1691606
|
from api.event import Event
class TaskCreatedEvent(Event):
"""
A `task-created` event that replaces the celery's `task-sent` event.
Sent by TaskResponse for every PENDING/STARTED task.
"""
_type_ = _name_ = 'task-created'
|
1691613
|
import unittest
import comma.cpp_bindings
class test(unittest.TestCase):
def test_size(self):
self.assertEqual(comma.cpp_bindings.csv.format('d,2ub,s[5]').size(), 15)
if __name__ == '__main__':
unittest.main()
|
1691615
|
from ekphrasis.classes.preprocessor import TextPreProcessor
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.dicts.emoticons import emoticons
def ws_tokenizer(text):
return text.split()
text_processor = TextPreProcessor(
normalize=['url', 'email', 'percent', 'money', 'phone', 'user', 'time',
'date', 'number'],
annotate={"hashtag", "elongated", "allcaps", "repeated", 'emphasis',
'censored'},
all_caps_tag="wrap",
fix_text=True,
segmenter="twitter_2018",
corrector="twitter_2018",
unpack_hashtags=True,
unpack_contractions=True,
spell_correct_elong=False,
tokenizer=SocialTokenizer(lowercase=True).tokenize,
# tokenizer=ws_tokenizer,
dicts=[emoticons]
)
sentences = [
"CANT WAIT for the new season of #TwinPeaks \(^o^)/!!! #davidlynch #tvseries :))) ",
"I saw the new #johndoe movie and it suuuuucks!!! WAISTED $10... #badmovies :/",
"I saw the new #JOHNDOE movie AND IT SUCKS!!! WAISTED $10... #badmovies :/",
"@SentimentSymp: can't wait for the Nov 9 #Sentiment talks! YAAAAAAY !!! :-D http://sentimentsymposium.com/.",
"Thanks x https://t.co/ZXTcDLyDS9",
"@Calum5SOS You lil poop please follow @EmilyBain224 ☺️💕",
"Words attendees would use to describe @prosper4africa's #ALN2015! https://t.co/hmNm8AdwOh",
]
for s in sentences:
print()
print(s)
print(" ".join(text_processor.pre_process_doc(s)))
|
1691629
|
import logging
import time
from src.tasks.celery_app import celery
from src.queries.get_trending_playlists import (
make_trending_cache_key,
make_get_unpopulated_playlists,
)
from src.utils.redis_cache import pickle_and_set
from src.utils.redis_constants import trending_playlists_last_completion_redis_key
from src.trending_strategies.trending_strategy_factory import TrendingStrategyFactory
from src.trending_strategies.trending_type_and_version import TrendingType
logger = logging.getLogger(__name__)
TIME_RANGES = ["week", "month", "year"]
trending_strategy_factory = TrendingStrategyFactory()
def cache_trending(db, redis, strategy):
with db.scoped_session() as session:
for time_range in TIME_RANGES:
key = make_trending_cache_key(time_range, strategy.version)
res = make_get_unpopulated_playlists(session, time_range, strategy)()
pickle_and_set(redis, key, res)
@celery.task(name="cache_trending_playlists", bind=True)
def cache_trending_playlists(self):
"""Caches trending playlists for time period"""
db = cache_trending_playlists.db
redis = cache_trending_playlists.redis
have_lock = False
update_lock = redis.lock("cache_trending_playlists_lock", timeout=7200)
try:
have_lock = update_lock.acquire(blocking=False)
if have_lock:
trending_playlist_versions = (
trending_strategy_factory.get_versions_for_type(
TrendingType.PLAYLISTS
).keys()
)
for version in trending_playlist_versions:
logger.info(
f"cache_trending_playlists.py ({version.name} version) | Starting"
)
strategy = trending_strategy_factory.get_strategy(
TrendingType.PLAYLISTS, version
)
start_time = time.time()
cache_trending(db, redis, strategy)
end_time = time.time()
logger.info(
f"cache_trending_playlists.py ({version.name} version) | \
Finished in {end_time - start_time} seconds"
)
redis.set(trending_playlists_last_completion_redis_key, int(end_time))
else:
logger.info("cache_trending_playlists.py | Failed to acquire lock")
except Exception as e:
logger.error(
"cache_trending_playlists.py | Fatal error in main loop", exc_info=True
)
raise e
finally:
if have_lock:
update_lock.release()
|
1691641
|
import os
import pickle
import tensorflow as tf
from six.moves import cPickle
from moon.models.lstm_gen.char_rnn.model import Model
def get_sample(base_save_dir, sample_length, seed_str):
with open("sample_args.pickle", "rb") as handle:
args = pickle.load(handle)
args.save_dir = base_save_dir
args.n = sample_length
args.prime = seed_str
return sample(args)
def sample(args):
with open(os.path.join(args.save_dir, "save/", "config.pkl"), "rb") as f:
saved_args = cPickle.load(f)
with open(os.path.join(args.save_dir, "save/", "chars_vocab.pkl"), "rb") as f:
chars, vocab = cPickle.load(f)
model = Model(saved_args, training=False)
with tf.Session() as sess:
tf.global_variables_initializer().run()
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(os.path.join(args.save_dir, "save/"))
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
return model.sample(sess, chars, vocab, args.n, args.prime, args.sample)
|
1691655
|
from smart_grasping_sandbox.smart_grasper import SmartGrasper
from tf.transformations import quaternion_from_euler
from math import pi
import time
import rospy
from math import sqrt, pow
import random
from sys import argv
sgs = SmartGrasper()
MIN_LIFT_STEPS = 5
MAX_BALL_DISTANCE = 0.6
CLOSED_HAND = {}
CLOSED_HAND["H1_F1J1"] = 0.0
CLOSED_HAND["H1_F1J2"] = 0.25
CLOSED_HAND["H1_F1J3"] = 0.4
CLOSED_HAND["H1_F2J1"] = 0.0
CLOSED_HAND["H1_F2J2"] = 0.25
CLOSED_HAND["H1_F2J3"] = 0.4
CLOSED_HAND["H1_F3J1"] = 0.0
CLOSED_HAND["H1_F3J2"] = 0.25
CLOSED_HAND["H1_F3J3"] = 0.4
JOINT_NAMES = CLOSED_HAND.keys()
class GraspQuality(object):
def __init__(self, sgs):
self.sgs = sgs
self.last_distance = None
def check_stable(self, joint_names):
current_min = 1000
positions = []
velocities = []
efforts = []
for k in range(30):
sgs.move_tip(y=0.02)
ball_distance = self.__compute_euclidean_distance()
if k > MIN_LIFT_STEPS and ball_distance < current_min:
current_min = ball_distance
if ball_distance > MAX_BALL_DISTANCE:
break
joints_positions, joints_velocity, joints_effort = self.sgs.get_current_joint_state()
new_pos = []
new_vel = []
new_eff = []
for name in joint_names:
new_pos.append(joints_positions[name])
new_vel.append(joints_velocity[name])
new_eff.append(joints_effort[name])
positions.append(new_pos)
velocities.append(new_vel)
efforts.append(new_eff)
time.sleep(0.01)
robustness = (1/(current_min - 0.18))**2
return robustness, positions, velocities, efforts
def __compute_euclidean_distance(self):
ball_pose = self.sgs.get_object_pose()
hand_pose = self.sgs.get_tip_pose()
dist = sqrt((hand_pose.position.x - ball_pose.position.x)**2 + \
(hand_pose.position.y - ball_pose.position.y)**2 + \
(hand_pose.position.z - ball_pose.position.z)**2)
return dist
quality = GraspQuality(sgs)
def experiment(grasp_distance=-0.163):
sgs.reset_world()
time.sleep(0.1)
sgs.reset_world()
time.sleep(0.1)
sgs.open_hand()
time.sleep(0.1)
sgs.open_hand()
time.sleep(0.1)
ball_pose = sgs.get_object_pose()
ball_pose.position.z += 0.5
#setting an absolute orientation (from the top)
quaternion = quaternion_from_euler(-pi/2., 0.0, 0.0)
ball_pose.orientation.x = quaternion[0]
ball_pose.orientation.y = quaternion[1]
ball_pose.orientation.z = quaternion[2]
ball_pose.orientation.w = quaternion[3]
sgs.move_tip_absolute(ball_pose)
sgs.move_tip(y=grasp_distance)
# close the grasp
sgs.check_fingers_collisions(False)
random_closed_hand = {}
for joint in CLOSED_HAND:
random_closed_hand[joint] = CLOSED_HAND[joint] + random.gauss(0.0, 0.04)
sgs.send_command(random_closed_hand, duration=1.0)
# lift slowly and check the quality
joint_names = random_closed_hand.keys()
joint_targets = random_closed_hand.values()
robustness, positions, velocities, efforts = quality.check_stable(joint_names)
rospy.loginfo("Grasp quality = " + str(robustness))
sgs.check_fingers_collisions(True)
return joint_names, joint_targets, robustness, positions, velocities, efforts
with open("/results/headers.txt", "wb") as txt_file:
headers = "experiment_number; robustness; "
for name in JOINT_NAMES:
headers += name+"_pos ; "+name+"_vel ; "+name+"_eff ; "
headers += "measurement_number\n"
txt_file.write(headers)
grasp_distances = [float(i) for i in argv[1:-1]]
number_of_tests_per_distance = int(argv[-1])
print "Running the grasp script with the distances: ", grasp_distances, " / number of tests: ", number_of_tests_per_distance
import uuid
for dist in grasp_distances:
for _ in range(number_of_tests_per_distance):
rospy.loginfo("---- grasping ["+str(uuid.uuid4().hex)+"/"+str(len(grasp_distances*number_of_tests_per_distance))+"] - dist="+str(dist))
joint_names, joint_targets, robustness, positions, velocities, efforts = experiment(dist)
with open("/results/"+str(uuid.uuid4())+".txt", "a") as txt_file:
base_line = str(uuid.uuid4().hex)+" ; "+str(robustness)
for measurement_number in range(len(positions)):
pos = positions[measurement_number]
vel = velocities[measurement_number]
eff = efforts[measurement_number]
measurement_line = base_line + " ; "
for id_name, _ in enumerate(joint_names):
measurement_line += str(pos[id_name]) + " ; " + str(vel[id_name])+" ; "+str(eff[id_name])+ " ; "
measurement_line += str(measurement_number) + "\n"
txt_file.write(measurement_line)
measurement_number += 1
|
1691658
|
import pandas as pd
import numpy as np
import plotly.graph_objs as go
import plotly.colors
from collections import OrderedDict
import requests
# default list of all countries of interest
country_default = OrderedDict([('Canada', 'CAN'), ('United States', 'USA'),
('Brazil', 'BRA'), ('France', 'FRA'), ('India', 'IND'), ('Italy', 'ITA'),
('Germany', 'DEU'), ('United Kingdom', 'GBR'), ('China', 'CHN'), ('Japan', 'JPN')])
def return_figures(countries=country_default):
"""Creates four plotly visualizations using the World Bank API
# Example of the World Bank API endpoint:
# arable land for the United States and Brazil from 1990 to 2015
# http://api.worldbank.org/v2/countries/usa;bra/indicators/AG.LND.ARBL.HA?date=1990:2015&per_page=1000&format=json
Args:
country_default (dict): list of countries for filtering the data
Returns:
list (dict): list containing the four plotly visualizations
"""
# when the countries variable is empty, use the country_default dictionary
if not bool(countries):
countries = country_default
# prepare filter data for World Bank API
# the API uses ISO-3 country codes separated by ;
country_filter = list(countries.values())
country_filter = [x.lower() for x in country_filter]
country_filter = ';'.join(country_filter)
# World Bank indicators of interest for pulling data
indicators = ['AG.LND.ARBL.HA.PC', 'SP.RUR.TOTL.ZS', 'SP.RUR.TOTL.ZS', 'AG.LND.FRST.ZS']
data_frames = [] # stores the data frames with the indicator data of interest
urls = [] # url endpoints for the World Bank API
# pull data from World Bank API and clean the resulting json
# results stored in data_frames variable
for indicator in indicators:
url = 'http://api.worldbank.org/v2/countries/' + country_filter +\
'/indicators/' + indicator + '?date=1990:2015&per_page=1000&format=json'
urls.append(url)
try:
r = requests.get(url)
data = r.json()[1]
except:
print('could not load data ', indicator)
for i, value in enumerate(data):
value['indicator'] = value['indicator']['value']
value['country'] = value['country']['value']
data_frames.append(data)
# first chart plots arable land from 1990 to 2015 in top 10 economies
# as a line chart
graph_one = []
df_one = pd.DataFrame(data_frames[0])
# filter and sort values for the visualization
# filtering plots the countries in decreasing order by their values
df_one = df_one[(df_one['date'] == '2015') | (df_one['date'] == '1990')]
df_one.sort_values('value', ascending=False, inplace=True)
# this country list is re-used by all the charts to ensure legends have the same
# order and color
countrylist = df_one.country.unique().tolist()
for country in countrylist:
x_val = df_one[df_one['country'] == country].date.tolist()
y_val = df_one[df_one['country'] == country].value.tolist()
graph_one.append(
go.Scatter(
x = x_val,
y = y_val,
mode = 'lines',
name = country
)
)
layout_one = dict(title = 'Change in Hectares Arable Land <br> per Person 1990 to 2015',
xaxis = dict(title = 'Year',
autotick=False, tick0=1990, dtick=25),
yaxis = dict(title = 'Hectares'),
)
# second chart plots ararble land for 2015 as a bar chart
graph_two = []
df_one.sort_values('value', ascending=False, inplace=True)
df_one = df_one[df_one['date'] == '2015']
graph_two.append(
go.Bar(
x = df_one.country.tolist(),
y = df_one.value.tolist(),
)
)
layout_two = dict(title = 'Hectares Arable Land per Person in 2015',
xaxis = dict(title = 'Country',),
yaxis = dict(title = 'Hectares per person'),
)
# third chart plots percent of population that is rural from 1990 to 2015
graph_three = []
df_three = pd.DataFrame(data_frames[1])
df_three = df_three[(df_three['date'] == '2015') | (df_three['date'] == '1990')]
df_three.sort_values('value', ascending=False, inplace=True)
for country in countrylist:
x_val = df_three[df_three['country'] == country].date.tolist()
y_val = df_three[df_three['country'] == country].value.tolist()
graph_three.append(
go.Scatter(
x = x_val,
y = y_val,
mode = 'lines',
name = country
)
)
layout_three = dict(title = 'Change in Rural Population <br> (Percent of Total Population)',
xaxis = dict(title = 'Year',
autotick=False, tick0=1990, dtick=25),
yaxis = dict(title = 'Percent'),
)
# fourth chart shows rural population vs arable land as percents
graph_four = []
df_four_a = pd.DataFrame(data_frames[2])
df_four_a = df_four_a[['country', 'date', 'value']]
df_four_b = pd.DataFrame(data_frames[3])
df_four_b = df_four_b[['country', 'date', 'value']]
df_four = df_four_a.merge(df_four_b, on=['country', 'date'])
df_four.sort_values('date', ascending=True, inplace=True)
plotly_default_colors = plotly.colors.DEFAULT_PLOTLY_COLORS
for i, country in enumerate(countrylist):
current_color = []
x_val = df_four[df_four['country'] == country].value_x.tolist()
y_val = df_four[df_four['country'] == country].value_y.tolist()
years = df_four[df_four['country'] == country].date.tolist()
country_label = df_four[df_four['country'] == country].country.tolist()
text = []
for country, year in zip(country_label, years):
text.append(str(country) + ' ' + str(year))
graph_four.append(
go.Scatter(
x = x_val,
y = y_val,
mode = 'lines+markers',
text = text,
name = country,
textposition = 'top'
)
)
layout_four = dict(title = '% of Population that is Rural versus <br> % of Land that is Forested <br> 1990-2015',
xaxis = dict(title = '% Population that is Rural', range=[0,100], dtick=10),
yaxis = dict(title = '% of Area that is Forested', range=[0,100], dtick=10),
)
# append all charts
figures = []
figures.append(dict(data=graph_one, layout=layout_one))
figures.append(dict(data=graph_two, layout=layout_two))
figures.append(dict(data=graph_three, layout=layout_three))
figures.append(dict(data=graph_four, layout=layout_four))
return figures
|
1691660
|
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_text
class CommentManager(models.Manager):
def for_model(self, model):
"""
QuerySet for all comments for a particular model (either an instance or
a class).
"""
ct = ContentType.objects.get_for_model(model)
qs = self.get_query_set().filter(content_type=ct)
if isinstance(model, models.Model):
qs = qs.filter(object_pk=force_text(model._get_pk_val()))
return qs
from django.db.models.fields.related import ManyToManyField
from django.contrib.contenttypes.fields import GenericRelation
class ObjectTaskMixin(models.Model):
_object_tasks = GenericRelation('ObjectTask')
class Meta:
abstract = True
@property
def tasks(self):
from django_project.models import Task
return Task.objects.filter(objecttask_tasks__content_type=self._content_type(), objecttask_tasks__object_id=self._object_pk())
def _content_type(self):
return ContentType.objects.get_for_model(self)
def _object_pk(self):
return force_text(self._get_pk_val())
def _filter(self, model):
return self._object_tasks
#return model.objects.filter(content_type=self._content_type(), object_pk=self._object_pk())
def add_task(self, task):
from django_project.models import ObjectTask
if self._filter(ObjectTask).filter(task=task).count() == 0:
ot = ObjectTask(task=task, content_object=self)
ot.save()
def remove_task(self, task):
from django_project.models import ObjectTask
self._filter(ObjectTask).filter(task=task).delete()
def tasks_for_author(self, user):
return self.tasks.filter(author=user)
|
1691661
|
from django.contrib import admin
from reversion.admin import VersionAdmin
from symposion.teams.models import Team, Membership
admin.site.register(Team,
prepopulated_fields={"slug": ("name",)})
class MembershipAdmin(VersionAdmin):
list_display = ["team", "user", "state"]
list_filter = ["team"]
search_fields = ["user__username"]
admin.site.register(Membership, MembershipAdmin)
|
1691676
|
import json
import pytest
from verity_sdk.utils import unpack_forward_message
from verity_sdk.utils.Context import Context
from verity_sdk.protocols.Protocol import Protocol
from ..test_utils import get_test_config, cleanup
@pytest.mark.asyncio
async def test_get_message():
message = {'hello': 'world'}
context = await Context.create_with_config(await get_test_config())
packed_message = await Protocol('test-family', '0.1').get_message_bytes(context, message)
unpacked_message = json.dumps(await unpack_forward_message(context, packed_message))
assert json.dumps(message) == unpacked_message
await cleanup(context)
|
1691684
|
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while (1):
_, frame = cap.read()
# intialize the object
detector = cv2.CascadeClassifier("haarcascade_smile.xml")
# read the image and convert to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# perform the the face detection
rects = detector.detectMultiScale(gray, scaleFactor=1.05,
minNeighbors=5, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
# loop over the bounding boxes
for (x, y, w, h) in rects:
# draw the face bounding box on the image
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# display the camera
cv2.imshow('Eye Detection ', frame)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
|
1691716
|
from ps1_argonaut.files.DATFile import DATFile
class BINFile(DATFile):
suffix = 'BIN'
def __str__(self):
return 'Translated text'
|
1691725
|
import asyncio
import logging
from public_plugins.trickcord_patches import trickcord_patches
logger = logging.getLogger('gradiusbot')
logger.info("[Message Event Plugin] <trickcord_patches.py>: Trickcord Edit Event patches.")
TRICKCORD_ID = 755580145078632508
async def action(**kwargs):
event_type = kwargs['event_type']
config = kwargs['config']
spooked_role_id = config.getint('trickcord', 'spooked_role_id')
if event_type == 'edit':
before = kwargs['before']
after = kwargs['after']
# a trickortreater has left, so back to standby
if len(after.embeds) > 0 and after.embeds[0].title == 'The trick-or-treater disappeared...':
trickcord_patches.trickcord_state = 'STANDBY'
# someone scare the trickortreater
if len(after.embeds) > 0 and after.embeds[0].title == 'Oh no!':
trickcord_patches.trickcord_state = 'STANDBY'
# someone claimed the trickortreater, go to standby
elif len(after.embeds) > 0 and after.embeds[0].title == 'Happy Halloween!':
trickcord_patches.trickcord_state = 'STANDBY'
await remove_spooked(after, spooked_role_id)
elif after.author.id == 101103243991465984:
if after.content == 'DEBUG-TRICK':
print("DEBUGTRICK")
trickcord_patches.trickcord_state = 'TRICK'
elif after.content == 'DEBUG-TREAT':
print("DEBUGTREAT")
trickcord_patches.trickcord_state = 'TREAT'
elif after.content == 'DEBUG-STANDBY':
print("DEBUGSTANDBY")
trickcord_patches.trickcord_state = 'STANDBY'
elif after.content == 'UNSPOOK':
print("UNSPOOK")
await remove_spooked(after, spooked_role_id)
async def remove_spooked(message, spooked_role_id):
guild = message.guild
spooked_role = guild.get_role(spooked_role_id)
spooked_users = spooked_role.members
for member in spooked_users:
await member.remove_roles(spooked_role)
|
1691735
|
import string
import unittest
class BrailleTest(unittest.TestCase):
def testLowercase(self):
self.assertEqual(
answer("code"), "100100101010100110100010")
def testMixedcase(self):
self.assertEqual(
answer("Braille"),
"000001110000111010100000010100111000111000100010")
def testSentence(self):
self.assertEqual(
answer(
"The quick brown fox jumped over the lazy dog"),
"000001011110110010100010000000111110101001010100100100101000" +
"000000110000111010101010010111101110000000110100101010101101" +
"000000010110101001101100111100100010100110000000101010111001" +
"100010111010000000011110110010100010000000111000100000101011" +
"101111000000100110101010110110")
translate = {
'a': '100000',
'b': '110000',
'c': '100100',
'd': '100110',
'e': '100010',
'f': '110100',
'g': '110110',
'h': '110010',
'i': '010100',
'j': '010110',
'k': '101000',
'l': '111000',
'm': '101100',
'n': '101110',
'o': '101010',
'p': '111100',
'q': '111110',
'r': '111010',
's': '011100',
't': '011110',
'u': '101001',
'v': '111001',
'w': '010111',
'x': '101101',
'y': '101111',
'z': '101011',
' ': '000000',
}
def answer(plaintext):
characters = list(plaintext)
out = ""
for c in characters:
t = translate[c.lower()]
if c in string.ascii_uppercase:
t = "000001" + t
out += t
return out
unittest.main()
|
1691758
|
import pyspark.sql.functions as F
import pyspark.sql.types as T
from typing import Callable
from pyspark.sql import SparkSession, DataFrame
# helper function for looping
def loop(op: Callable[[DataFrame], DataFrame], df: DataFrame = None) -> DataFrame:
for _ in range(10):
df = op(df)
return df
class query(object):
def __getitem__(self, name):
return getattr(self, name)
class column_queries(query):
def avg_x_double(self, spark: SparkSession) -> DataFrame:
print("Query: SELECT AVG(double_x) FROM table")
return spark.sql('SELECT AVG(double_x) FROM table')
def avg_x_plus_y_double(self, spark: SparkSession) -> DataFrame:
print("Query: SELECT AVG(double_x + double_y) FROM table")
return spark.sql('SELECT AVG(double_x + double_y) FROM table')
def sum_x_double(self, spark: SparkSession) -> DataFrame:
print("Query: SELECT SUM(double_x) FROM table")
return spark.sql('SELECT SUM(double_x) FROM table')
def sum_x_plus_y_double(self, spark: SparkSession) -> DataFrame:
print("Query: SELECT SUM(double_x + double_y) FROM table")
return spark.sql('SELECT SUM(double_x + double_y) FROM table')
def x_plus_y_double(self, spark: SparkSession) -> DataFrame:
print("Query: SELECT double_x + double_y FROM table")
return spark.sql('SELECT double_x + double_y FROM table')
class group_by_queries(query):
def group_by_sum_x(self, spark: SparkSession) -> DataFrame:
print("Query: SELECT id, SUM(double_x) FROM table GROUP BY id")
return spark.sql('SELECT id, SUM(double_x) FROM table GROUP BY id')
def group_by_avg_x(self, spark: SparkSession) -> DataFrame:
print("Query: SELECT id, AVG(double_x) FROM table GROUP BY id")
return spark.sql('SELECT id, AVG(double_x) FROM table GROUP BY id')
def group_by_sum_x_plus_y(self, spark: SparkSession) -> DataFrame:
print("Query: SELECT id, SUM(double_x + double_y) FROM table GROUP BY id")
return spark.sql('SELECT id, SUM(double_x + double_y) FROM table GROUP BY id')
def group_by_avg_x_plus_y(self, spark: SparkSession) -> DataFrame:
print("Query: SELECT id, AVG(double_x + double_y) FROM table GROUP BY id")
return spark.sql('SELECT id, AVG(double_x + double_y) FROM table GROUP BY id')
def group_by_count_asterisk(self, spark: SparkSession) -> DataFrame:
print("Query: SELECT id, COUNT(*) FROM table GROUP BY id")
return spark.sql('SELECT id, COUNT(*) FROM table GROUP BY id')
def group_by_sum_x_minus_y(self, spark: SparkSession) -> DataFrame:
print("Query: SELECT id, SUM(double_x - double_y) FROM table GROUP BY id")
return spark.sql('SELECT id, SUM(double_x - double_y) FROM table GROUP BY id')
def group_by_sum_x_plus_y_as_res_filter_res(self, spark: SparkSession) -> DataFrame:
print("Query: SELECT id, SUM(double_x + double_y) AS res FROM table GROUP BY id HAVING res > 405008")
return spark.sql("SELECT id, SUM(double_x + double_y) AS res FROM table GROUP BY id HAVING res > 405008")
class nyctaxi_queries(query):
def q1(self, spark: SparkSession) -> DataFrame:
print("""Query: SELECT pickup_datetime,dropoff_datetime,fare_amount,pickup_location_id,dropoff_location_id
FROM trips WHERE payment_type = 2
group by pickup_datetime,dropoff_datetime,fare_amount,pickup_location_id,dropoff_location_id
having fare_amount > 20.0
""")
res = spark.sql('SELECT pickup_datetime,dropoff_datetime,fare_amount,pickup_location_id,dropoff_location_id \
FROM trips WHERE payment_type = 2 \
group by pickup_datetime,dropoff_datetime,fare_amount,pickup_location_id,dropoff_location_id \
having fare_amount > 20.0')
return res
def q2(self, spark: SparkSession) -> DataFrame:
print("""Query: SELECT pickup_location_id,dropoff_location_id,payment_type, COUNT(*), AVG(fare_amount)
FROM trips group by pickup_location_id,dropoff_location_id,payment_type
""")
res = spark.sql('SELECT pickup_location_id,dropoff_location_id,payment_type, COUNT(*), AVG(fare_amount) \
FROM trips group by pickup_location_id,dropoff_location_id,payment_type')
return res
def q3(self, spark: SparkSession) -> DataFrame:
print("""Query: select pickup_location_id,dropoff_location_id,payment_type, COUNT(*), SUM(total_amount)
from trips group byd pickup_location_id,dropoff_location_id,payment_type having SUM(fare_amount + extra) < 0
""")
res = spark.sql('select pickup_location_id,dropoff_location_id,payment_type, COUNT(*), SUM(total_amount) \
from trips group by pickup_location_id,dropoff_location_id,payment_type having SUM(fare_amount + extra) < 0')
return res
def q4(self, spark: SparkSession) -> DataFrame:
print("""Query: select trips.payment_type, trips.fare_amount, trips.mta_tax, trips.trip_distance, trips.tolls_amount, cab_types.type
from trips inner join cab_types on trips.cab_type_id = cab_types.id
""")
res = spark.sql('select trips.payment_type, trips.fare_amount, trips.mta_tax, trips.trip_distance, trips.tolls_amount, cab_types.type \
from trips inner join cab_types on trips.cab_type_id = cab_types.id')
return res
def q5(self, spark: SparkSession) -> DataFrame:
print("""Query: select corr(trip_distance, total_amount) as correlation, AVG(trip_distance)
as mean_distance, AVG(total_amount) as mean_amount from trips group by pickup_location_id
""")
res = spark.sql('select corr(trip_distance, total_amount) as correlation, AVG(trip_distance) as mean_distance, AVG(total_amount) as mean_amount from trips group by pickup_location_id')
return res
def q6(self, spark: SparkSession) -> DataFrame:
print("""Query: select corr(trip_distance, total_amount) as correlation, AVG(trip_distance)
as mean_distance, AVG(total_amount) as mean_amount from trips group by payment_type
""")
res = spark.sql('select corr(trip_distance, total_amount) as correlation, AVG(trip_distance) as mean_distance, AVG(total_amount) as mean_amount from trips group by payment_type')
return res
def q7(self, spark: SparkSession) -> DataFrame:
print("""Query: select corr(trip_distance, total_amount) as correlation, AVG(trip_distance)
as mean_distance, AVG(total_amount) as mean_amount from trips group by dropoff_location_id
""")
res = spark.sql('select corr(trip_distance, total_amount) as correlation, AVG(trip_distance) as mean_distance, AVG(total_amount) as mean_amount from trips group by dropoff_location_id')
return res
# operations = {
# 'abs' : lambda df : df.select(F.abs(df['int_x'])),
# 'acos' : lambda df : df.select(F.acos(df['randn'])),
# 'acosh' : lambda df : df.select(F.acosh(df['randn'])),
# 'ascii': lambda df : df.select(F.ascii(df['prefix2'])),
# 'asin': lambda df : df.select(F.asin(df['randn1'])),
# 'asinh' : lambda df : df.select(F.asinh(df['randn1'])),
# 'atan' : lambda df : df.select(F.atan(df['randn1'])),
# 'atanh' : lambda df : df.select(F.atanh(df['randn1'])),
# 'atan2' : lambda df : df.select(F.atan2(df['randn1'], df['randn'])),
# 'base64' : lambda df : df.select(F.base64(df['value'])),
# 'bin' : lambda df : df.select(F.bin(df['int_x'])),
# 'bitwiseNOT' : lambda df : df.select(F.bitwiseNOT(df['int_x'])),
# 'cbrt' : lambda df : df.select(F.cbrt(df['randn'])),
# 'concat' : lambda df : df.select(F.concat(df['prefix2'], df['prefix4'])),
# 'concat_ws' : lambda df : df.select(F.concat_ws('-', df['prefix2'], df['prefix4'], df['double_x'])),
# 'conv' : lambda df : df.select(F.conv(df['int_x'], 10, 16)),
# 'cos' : lambda df : df.select(F.cos(df['randn'])),
# 'cosh' : lambda df : df.select(F.cosh(df['randn'])),
# 'crc32' : lambda df : df.select(F.crc32(df['value'])),
# 'degrees' : lambda df : df.select(F.degrees(df['degree'])),
# 'exp' : lambda df : df.select(F.exp(df['randn'])),
# 'expr' : lambda df : df.select(F.expr("length(double_x)")),
# 'factorial' : lambda df : df.select(F.factorial(df['small_int'])),
# 'hash' : lambda df : df.select(F.hash(df['value'])),
# 'hex' : lambda df : df.select(F.hex(df['value'])),
# 'hypot' : lambda df : df.select(F.hypot(df['int_x'], df['randn'])),
# 'levenshtein' : lambda df : df.select(F.levenshtein(df['value'],df['int_x'])),
# 'log' : lambda df : df.select(F.log(df['int_x'])),
# 'log10' : lambda df : df.select(F.log10(df['double_x'])),
# 'log1p' : lambda df : df.select(F.log1p(df['randn'])),
# 'log2' : lambda df : df.select(F.log2(df['randn1'])),
# 'md5' : lambda df : df.select(F.md5(df['value'])),
# 'pow' : lambda df : df.select(F.pow(df['randn'], df['small_int'])),
# 'radians' : lambda df : df.select(F.radians(df['degree'])),
# 'sha1' : lambda df : df.select(F.sha1(df['value'])),
# 'sha2' : lambda df : df.select(F.sha2(df['value'], 256)),
# 'signum' : lambda df : df.select(F.signum(df['int_x'])),
# 'sin' : lambda df : df.select(F.sin(df['randn'])),
# 'sinh' : lambda df : df.select(F.sinh(df['randn'])),
# 'sqrt' : lambda df : df.select(F.sqrt(df['small_int'])),
# 'tan' : lambda df : df.select(F.tan(df['randn'])),
# 'tanh' : lambda df : df.select(F.tanh(df['randn'])),
# 'xxhash64' : lambda df : df.select(F.xxhash64(df['value'])),
# 'bitwiseAND' : lambda df : df.select(df['small_int'].bitwiseAND(df['int_x'])),
# 'bitwiseOR' : lambda df : df.select(df['small_int'].bitwiseOR(df['int_x'])),
# 'bitwiseXOR' : lambda df : df.select(df['small_int'].bitwiseXOR(df['int_x'])),
# '(x+y)_double': lambda df : df.select(df['double_x'] + df['double_y']),
# '(x-y)_double': lambda df : df.select(df['double_x'] - df['double_y']),
# '(x*y)_double': lambda df : df.select(df['double_x'] * df['double_y']),
# '(x/y)_double': lambda df : df.select(df['double_x'] / df['double_y']),
# '(x+y)_int': lambda df : df.select(df['int_x'] + df['int_y']),
# '(x-y)_int': lambda df : df.select(df['int_x'] - df['int_y']),
# '(x*y)_int': lambda df : df.select(df['int_x'] * df['int_y']),
# '(x/y)_int': lambda df : df.select(df['int_x'] / df['int_y']),
# 'a=a+b_int' : lambda df : loop(lambda df : df.withColumn("int_a", df['int_a'] + df['int_b']), df),
# 'a=a+b_double' : lambda df : loop(lambda df : df.withColumn("double_a", df['double_a'] + df['double_b']), df),
# 'a=a*b_int' : lambda df : loop(lambda df : df.withColumn("int_a", df['int_a'] * df['int_b']), df),
# 'a=a*b_double' : lambda df : loop(lambda df : df.withColumn("double_a", df['double_a'] * df['double_b']), df),
# 'a=a+b*x_int' : lambda df : loop(lambda df : df.withColumn("int_a", df['int_a'] + df['int_b'] * df['int_x']), df),
# 'a=a+b*x_double' : lambda df : loop(lambda df : df.withColumn("double_a", df['double_a'] + df['double_b'] * df['double_x']), df),
# }
# aggregate = {
# 'approx_count_distinct' : lambda df : df.agg(F.approx_count_distinct(df['int_x'])),
# 'avg_double' : lambda df : df.agg(F.avg(df['double_x'])),
# 'avg(x+y)_double' : lambda df : df.agg(F.avg(df['double_y'] + df['double_x'])),
# 'avg_int' : lambda df : df.agg(F.avg(df['int_x'])),
# 'avg(x+y)_int' : lambda df : df.agg(F.avg(df['int_y'] + df['int_x'])),
# 'corr' : lambda df : df.agg(F.corr(df['double_x'], df['randn'])),
# 'count' : lambda df : df.agg(F.count(df['value'])),
# 'countDistinct' : lambda df : df.agg(F.countDistinct(df['value'], df['int_x'])),
# 'covar_pop' : lambda df : df.agg(F.covar_pop(df['int_x'], df['double_x'])),
# 'covar_samp' : lambda df : df.agg(F.covar_samp(df['randn'], df['double_x'])),
# 'kurtosis' : lambda df : df.agg(F.kurtosis(df['randn1'])),
# 'max' : lambda df : df.agg(F.max(df['double_x'])),
# 'mean' : lambda df : df.agg(F.mean(df['randn'])),
# 'min' : lambda df : df.agg(F.min(df['randn'])),
# 'percentile_approx' : lambda df : df.agg(F.percentile_approx('randn',[0.25,0.5,0.75], 100000)),
# 'skewness' : lambda df : df.agg(F.skewness(df['randn'])),
# 'stddev' : lambda df : df.agg(F.stddev(df['randn1'])),
# 'stddev_pop' : lambda df : df.agg(F.stddev_pop(df['randn1'])),
# 'stddev_samp' : lambda df : df.agg(F.stddev_samp(df['randn1'])),
# 'sum_double' : lambda df : df.agg(F.sum(df['double_x'])),
# 'sum(x+y)_double' : lambda df : df.agg(F.sum(df['double_y'] + df['double_x'])),
# 'sum_int' : lambda df : df.agg(F.sum(df['int_x'])),
# 'sum(x+y)_int' : lambda df : df.agg(F.sum(df['int_y'] + df['int_x'])),
# 'sumDistinct' : lambda df : df.agg(F.sumDistinct(df['int_x'])),
# 'var_pop' : lambda df : df.agg(F.var_pop(df['small_int'])),
# 'var_samp' : lambda df : df.agg(F.var_samp(df['int_x']))
# }
|
1691814
|
import os,sys
from tkinter import *
import tkinter.font as font
from tkinter import filedialog
class Final(Frame):
def __init__(self, parent=None, pid=0,side=LEFT, anchor=N,wt=600,ht=400,is_next=True,is_back=True,next_frame=None,back_frame=None,info_txt="",path_frm=None,path_frm2=None,frames=[],fdict=[],prefix_var=None,cytnx_dir=None):
Frame.__init__(self, parent)
self.pid = pid
self.var = IntVar()
self.path_frm = path_frm
#self.path_frm2 = path_frm2
#self.cytnx_dir = cytnx_dir
self.frames = frames
self.fd = fdict
self.prefix_var = prefix_var
self.txtvar = StringVar()
self.lbl = Label(self,textvariable=self.txtvar,anchor='w',justify=LEFT)
self.lbl['font'] = font.Font(size=16)
self.set_info_text(info_txt)
self.lbl.pack(fill="both")
self.termf = Frame(self, height=400, width=600)
self.termf.pack(fill=BOTH, expand=YES)
if is_next:
self.nbut = Button(self,text="[ install ]",command=self._action_nxt)
self.nbut.pack(side=RIGHT)
if is_back:
self.bfram = back_frame
self.bbut = Button(self,text="<- back",command=self._action_bak)
self.bbut.pack(side=RIGHT)
def _analysis(self):
str_print = "";
if(self.frames[self.fd['MKL']].state_str()=="ON"):
str_print += "[x] USE_MKL\n"
str_print += "[x] USE_OMP [force by mkl]\n"
else:
str_print += "[ ] USE_MKL\n"
if(self.frames[self.fd['OMP']].state_str()=="ON"):
str_print += "[x] USE_OMP\n"
if(self.frames[self.fd['HPTT']].state_str()=="ON"):
str_print += "[x] USE_HPTT\n"
if(self.frames[self.fd['HPTT_finetune']].state_str()=="ON"):
str_print += " [x] HPTT finetune\n"
else:
str_print += " [ ] HPTT finetune\n"
if(self.frames[self.fd['HPTT_arch']].state_str()=="AVX"):
str_print += " [x] HPTT optim option = AVX\n"
elif(self.frames[self.fd['HPTT_arch']].state_str()=="IBM"):
str_print += " [x] HPTT optim option = IBM\n"
elif(self.frames[self.fd['HPTT_arch']].state_str()=="ARM"):
str_print += " [x] HPTT optim option = ARM\n"
else:
str_print += " [ ] HPTT optim option\n"
else:
str_print += "[ ] USE_HPTT\n"
if(self.frames[self.fd['CUDA']].state_str()=="ON"):
str_print += "[x] USE_CUDA\n"
if(self.frames[self.fd['CUTT']].state_str()=="ON"):
str_print += " [x] USE_CUTT\n"
if(self.frames[self.fd['CUTT_finetune']].state_str()=="ON"):
str_print += " [x] CUTT finetune\n"
else:
str_print += " [ ] CUTT finetune\n"
else:
str_print += " [ ] USE_CUTT\n"
else:
str_print += "[ ] USE_CUDA\n"
if(self.frames[self.fd['PY']].state_str()=="ON"):
str_print += "[x] BUILD_PYTHON API\n"
else:
str_print += "[ ] BUILD_PYTHON API\n"
self.txtvar.set("Review install:\n"+str_print)
def _action_nxt(self):
print("Review install")
## analysis all flags and generate command.
strout = "cmake";
if not self.prefix_var.get()=='default':
strout += " -DCMAKE_INSTALL_PREFIX=%s"%(self.prefix_var.get())
if(self.frames[self.fd['MKL']].state_str()=="ON"):
strout += " -DUSE_MKL=on"
else:
if(self.frames[self.fd['OMP']].state_str()=="ON"):
strout += " -DUSE_OMP=on"
if(self.frames[self.fd['HPTT']].state_str()=="ON"):
strout += " -DUSE_HPTT=on"
if(self.frames[self.fd['HPTT_finetune']].state_str()=="ON"):
strout += " -DHPTT_ENABLE_FINE_TUNE=on"
if(self.frames[self.fd['HPTT_arch']].state_str()=="AVX"):
strout += " -DHPTT_ENABLE_AVX=on"
elif(self.frames[self.fd['HPTT_arch']].state_str()=="IBM"):
strout += " -DHPTT_ENABLE_IBM=on"
elif(self.frames[self.fd['HPTT_arch']].state_str()=="ARM"):
strout += " -DHPTT_ENABLE_ARM=on"
if(self.frames[self.fd['CUDA']].state_str()=="ON"):
strout += " -DUSE_CUDA=on"
if(self.frames[self.fd['CUTT']].state_str()=="ON"):
strout += " -DUSE_CUTT=on"
if(self.frames[self.fd['CUTT_finetune']].state_str()=="ON"):
strout += " -DCUTT_ENABLE_FINE_TUNE=on"
if(self.frames[self.fd['PY']].state_str()=="ON"):
strout += " -DBUILD_PYTHON=on"
else:
strout += " -DBUILD_PYTHON=off"
strout += " ../\n"
"""
strout += " %s"%(self.cytnx_dir.get())
## check:
if(len(self.cytnx_dir.get())==0):
raise ValueError("[ERROR] invalid cytnx source path.")
else:
if not os.path.exists(self.cytnx_dir.get()):
raise ValueError("[ERROR] invalid cytnx source path. cannot find path.")
"""
# hide all op buttoms
self.lbl.destroy()
self.bbut.destroy()
self.nbut.destroy()
self.path_frm.destroy()
#self.path_frm2.destroy()
f = open("ainstall.sh",'w')
f.write("echo $PWD\n")
f.write("rm -rf build\n")
f.write("mkdir build\n")
f.write("cd build\n")
f.write("echo $PWD\n")
f.write(strout)
f.write("make\n")
f.write("make install")
os.system('xterm -into %d -geometry 95x30 -s -sb -e sh ainstall.sh&' %(self.termf.winfo_id()))
#os.system('xterm -into %d -geometry 40x20 -sb -e %s &' %(self.termf.winfo_id(),"cpuinfo"))
def _action_bak(self):
self.pack_forget()
self.bfram.pack(side=TOP,fill=X)
def jump_pack(self,direction,N,start_frame):
if N>0:
raise ValueError("cannot call jump on final page.")
else:
self.set_back_frame(start_frame)
self._analysis()
self.pack(fill="both")
def state_id(self):
return self.var.get()
def set_back_frame(self,back_frame):
self.bfram = back_frame
def set_info_text(self,txt):
self.txtvar.set(txt)
class Optionbar(Frame):
def __init__(self, parent=None, pid=0,picks=[], picks_js=None, side=LEFT, anchor=N,wt=600,ht=400,is_next=True,is_back=True,next_frame=None,back_frame=None,info_txt=""):
Frame.__init__(self, parent)
self.pid = pid
self.var = IntVar()
self.picks = picks
if picks_js is None:
self.picks_js = [1 for i in range(len(picks))]
else:
self.picks_js = picks_js
self.dic = dict(zip(picks,range(len(picks))))
self.txtvar = StringVar()
lbl = Label(self,textvariable=self.txtvar,anchor=W)
lbl['font'] = font.Font(size=16)
self.set_info_text(info_txt)
lbl.pack(fill='both')
for pick in picks:
chk = Radiobutton(self, text=pick, variable=self.var,value=self.dic[pick])
chk.pack(side=side, anchor=anchor, expand=YES)
if is_next:
self.nfram = next_frame
self.nbut = Button(self,text="next ->",command=self._action_nxt)
self.nbut.pack(side=RIGHT)
if is_back:
self.bfram = back_frame
self.bbut = Button(self,text="<- back",command=self._action_bak)
self.bbut.pack(side=RIGHT)
def _action_nxt(self):
self.pack_forget()
self.jump_pack('nxt',self.picks_js[self.state_id()],self)
def _action_bak(self):
self.pack_forget()
self.bfram.pack(fill='both')
def jump_pack(self,direction,N,start_frame):
if N>0:
if direction == 'nxt':
self.nfram.jump_pack('nxt',N-1,start_frame)
else:
raise ValueError("direction should be 'nxt' or 'bak'")
else:
self.set_back_frame(start_frame)
self.pack(fill='both')
def state_id(self):
return self.var.get()
def state_str(self):
return self.picks[self.var.get()]
def set_next_frame(self,next_frame):
self.nfram = next_frame
def set_back_frame(self,back_frame):
self.bfram = back_frame
def set_info_text(self,txt):
self.txtvar.set(txt)
def set_default(self,val,by_str=True):
ival = val
if by_str:
ival = self.dic[val]
self.var.set(ival)
top = Tk()
top.title("Cytnx installer")
#top.geometry("400x300")
top.resizable(False,False)
#main.mainloop()
PREFIX = None
prefix_var = StringVar()
prefix_var.set("default")
def get_prefix():
PREFIX = filedialog.askdirectory(title = "Select directory to install cytnx")
prefix_var.set(PREFIX)
print(PREFIX)
"""
CYTNX_DIR = None
cytnx_var = StringVar()
cytnx_var.set("")
def get_cytnx_dir():
CYTNX_DIR = filedialog.askdirectory(title = "Select cytnx source path")
cytnx_var.set(CYTNX_DIR)
print(CYTNX_DIR)
"""
frm = Frame(top)
pp = Label(frm,text="install path:",anchor=W)
pp.pack(side=LEFT)
p_str = Label(frm,textvariable=prefix_var,anchor=W)
p_str.pack(side=LEFT)
but_f = Button(frm,text="choose directory to install",command=get_prefix,anchor=E)
but_f.pack(side=RIGHT)
frm.pack(side=TOP,fill=X)
"""
frm2 = Frame(top)
pp2 = Label(frm2,text="cytnx source path:",anchor=W)
pp2.pack(side=LEFT)
p2_str = Label(frm2,textvariable=cytnx_var,anchor=W)
p2_str.pack(side=LEFT)
but_f = Button(frm2,text="choose cytnx source path",command=get_cytnx_dir,anchor=E)
but_f.pack(side=RIGHT)
frm2.pack(side=TOP,fill=X)
"""
ftype = []
## page mkl
ftype.append("MKL")
mkl_tk = Optionbar(top,0,['ON','OFF'],picks_js=[2,1],is_back=False)
mkl_tk.set_default('OFF')
mkl_tk.set_info_text("use mkl as linalg library? (default: OFF)\n"+
"[Note] 1. default use openblas\n"+
"[Note] 2. if ON, openmp is forced enable."
)
## page omp
ftype.append("OMP")
omp_tk = Optionbar(top,1,['ON','OFF'])
omp_tk.set_default('OFF')
omp_tk.set_info_text("accelerate using OpenMP? (default: OFF)")
## page hptt
ftype.append("HPTT")
hptt_tk = Optionbar(top,2,['ON','OFF'],picks_js=[1,3])
hptt_tk.set_default('OFF')
hptt_tk.set_info_text("accelerate tensor transpose using HPTT lib? (default: OFF)")
ftype.append("HPTT_finetune")
hptt_op2_tk = Optionbar(top,3,['ON','OFF'])
hptt_op2_tk.set_default('OFF')
hptt_op2_tk.set_info_text("build HPTT lib with optimization on current hardware? (default: OFF)")
ftype.append("HPTT_arch")
hptt_op_tk = Optionbar(top,4,['AVX','IBM','ARM','OFF'])
hptt_op_tk.set_default('OFF')
hptt_op_tk.set_info_text("build HPTT lib with additional instructions support? (default: OFF)")
## page cuda
ftype.append("CUDA")
cuda_tk = Optionbar(top,5,['ON','OFF'],picks_js=[1,2])
cuda_tk.set_default('OFF')
cuda_tk.set_info_text("install GPU(CUDA) support in cytnx? (default: OFF)")
## cutt
ftype.append("CUTT")
cutt_tk = Optionbar(top,6,['ON','OFF'],picks_js=[1,2])
cutt_tk.set_default('OFF')
cutt_tk.set_info_text("accelerate tensor transpose on GPU using cuTT lib? (default: OFF)")
ftype.append("CUTT_finetune")
cutt_op_tk = Optionbar(top,7,['ON','OFF'])
cutt_op_tk.set_default('OFF')
cutt_op_tk.set_info_text("build cuTT lib with optimization on current hardware? (default: OFF)")
## page python
ftype.append("PY")
python_tk = Optionbar(top,8,['ON','OFF'])
python_tk.set_default('ON')
python_tk.set_info_text("build python API? (default: ON)")
## final wrapping up
td = dict(zip(ftype,range(len(ftype))))
fin_tk = Final(top,10,path_frm=frm,frames=[mkl_tk,omp_tk,hptt_tk,hptt_op2_tk,hptt_op_tk,cuda_tk,cutt_tk,cutt_op_tk,python_tk],fdict=td,prefix_var=prefix_var)
fin_tk.set_info_text("Review install")
## chain:
mkl_tk.set_next_frame(omp_tk)
omp_tk.set_next_frame(hptt_tk)
hptt_tk.set_next_frame(hptt_op2_tk)
hptt_op2_tk.set_next_frame(hptt_op_tk)
hptt_op_tk.set_next_frame(cuda_tk)
cuda_tk.set_next_frame(cutt_tk)
cutt_tk.set_next_frame(cutt_op_tk)
cutt_op_tk.set_next_frame(python_tk)
python_tk.set_next_frame(fin_tk)
## visible entry point
mkl_tk.pack(side=TOP,fill="both")
top.mainloop()
exit(1)
def bool2str(bl):
if bl:
return "ON"
else:
return "OFF"
## list all the major options:
USE_MKL=False
USE_OMP=False
USE_CUDA=False
USE_CUTT=False
#CUTT_option_noalign=False
CUTT_option_finetune=False
USE_HPTT=False
HPTT_option_AVX=False
HPTT_option_IBM=False
HPTT_option_ARM=False
HPTT_option_finetune=False
BUILD_PYTHON=True
PREFIX=None
## checking linalg, and openmp.
tmp = input("[2] use mkl as linalg library (default OFF)? (Y/N):")
if(len(tmp.strip())!=0):
USE_MKL=resolve_yn(tmp)
print(" >>USE_MKL: ",USE_MKL)
print("--------------")
if(USE_MKL):
print(" -->[2a] force USE_OMP=True")
print("--------------")
else:
tmp = input("[2a] use openmp accelerate (default OFF)? (Y/N):")
if(len(tmp.strip())!=0):
USE_OMP=resolve_yn(tmp)
print(" >>USE_OMP:",USE_OMP)
print("--------------")
## checking HPTT:
tmp = input("[3] use hptt library to accelrate tensor transpose (default OFF)? (Y/N):")
if(len(tmp.strip())!=0):
USE_HPTT=resolve_yn(tmp)
print(" >>USE_HPTT: ",USE_HPTT)
print("--------------")
if USE_HPTT:
## additional options:
tmp = input("[3a] hptt option(1): fine tune for the native hardware (default OFF)? (Y/N):")
if(len(tmp.strip())!=0):
HPTT_option_finetune=resolve_yn(tmp)
print(" >>HPTT_ENABLE_FINE_TUNE:",HPTT_option_finetune)
print("--------------")
tmp = input("[3b] hptt option(2): variant options (1: AVX 2: IBM 3: ARM, default OFF)? (1,2,3 or enter for default):")
if(len(tmp.strip())!=0):
hptttype=resolve_num(tmp,{1,2,3})
if(hptttype==1):
HPTT_option_AVX=True
print(" >>HPTT_ENABLE_ABX:",HPTT_option_AVX)
elif(hptttype==2):
HPTT_option_IBM=True
print(" >>HPTT_ENABLE_IBM:",HPTT_option_IBM)
elif(hptttype==3):
HPTT_option_ARM=True
print(" >>HPTT_ENABLE_ARM:",HPTT_option_ARM)
else:
print(" *No additional options for hptt*")
print("--------------")
## checking CUDA:
tmp = input("[4] with GPU (CUDA) support (default OFF)? (Y/N):")
if(len(tmp.strip())!=0):
USE_CUDA=resolve_yn(tmp)
print(" >>USE_CUDA: ",USE_CUDA)
print("--------------")
if USE_CUDA:
## additional options:
tmp = input("[4a] cuda option(1): use cutt library to accelerate tensor transpose (default OFF)? (Y/N):")
if(len(tmp.strip())!=0):
USE_CUTT=resolve_yn(tmp)
print(" >>USE_CUTT:",USE_CUTT)
print("--------------")
if USE_CUTT:
## add-additional options:
tmp = input("[4a-1] cutt option(1): fine tune for the native hardware (default OFF)? (Y/N):")
if(len(tmp.strip())!=0):
CUTT_option_finetune=resolve_yn(tmp)
print(" >>CUTT_ENABLE_FINE_TUNE:",CUTT_option_finetune)
print("--------------")
## checking PYTHON:
tmp = input("[5] Build python API (default ON)? (Y/N):")
if(len(tmp.strip())!=0):
BUILD_PYTHON=resolve_yn(tmp)
print(" >>BUILD_PYTHON: ",BUILD_PYTHON)
print("--------------")
##=================================================================
print("*************************")
print(" Review install option ")
print("")
print(" USE_MKL: ",USE_MKL)
print(" USE_OMP: ",USE_OMP)
print(" USE_HPTT: ",USE_HPTT)
if(USE_HPTT):
print(" -- HPTT_option: ")
print(" HPTT_FINE_TUNE: ",HPTT_option_finetune)
if(HPTT_option_AVX):
print(" HPTT_ENABLE_ABX:",HPTT_option_AVX)
if(HPTT_option_IBM):
print(" HPTT_ENABLE_IBM:",HPTT_option_IBM)
if(HPTT_option_ARM):
print(" HPTT_ENABLE_ARM:",HPTT_option_ARM)
print(" USE_CUDA: ",USE_CUDA)
print(" USE_CUTT: ",USE_CUTT)
if(USE_CUTT):
print(" -- CUTT_option: ")
print(" CUTT_ENABLE_FINE_TUNE: ",CUTT_option_finetune)
print(" BUILD_PYTHON: ",BUILD_PYTHON)
print("*************************")
## generate sh file:
f = open("ainstall.sh",'w')
f.write("rm -rf build\n")
f.write("mkdir build\n")
f.write("cd build\n")
f.write("cmake")
if not PREFIX is None:
f.write(" -DCMAKE_INSTALL_PREFIX=%s"%(PREFIX))
if(USE_MKL):
f.write(" -DUSE_MKL=on")
else:
if(USE_OMP):
f.write(" -DUSE_OMP=on")
if(USE_HPTT):
f.write(" -DUSE_HPTT=on")
if(HPTT_option_finetune):
f.write(" -DHPTT_ENABLE_FINE_TUNE=on")
if(HPTT_option_AVX):
f.write(" -DHPTT_ENABLE_AVX=on")
if(HPTT_option_IBM):
f.write(" -DHPTT_ENABLE_IBM=on")
if(HPTT_option_ARM):
f.write(" -DHPTT_ENABLE_ARM=on")
if(USE_CUDA):
f.write(" -DUSE_CUDA=on")
if(USE_CUTT):
f.write(" -DUSE_CUTT=on")
if(CUTT_option_finetune):
f.write("-DCUTT_ENABLE_FINE_TUNE=on")
if(BUILD_PYTHON):
f.write(" -DBUILD_PYTHON=on")
else:
f.write(" -DBUILD_PYTHON=off")
f = open("ainstall.sh",'w')
f.write("rm -rf build\n")
f.write("mkdir build\n")
f.write("cd build\n")
f.write("cmake")
f.write(" ../\n")
f.write("make\n")
f.write("make install")
f.close()
|
1691836
|
import argparse
import os
import subprocess
import sys
from datetime import datetime
from config.Config import Config
from enums.Architectures import Arch
from utils.console import Console
from utils.utils import detect_arch
class ExportViewer:
def __init__(self, arch=Arch.x64):
self.path = str(Config().get_path("DUMPERS", f"dumpbin_{arch.value}"))
def check(self, filename, verbose=False):
Console.auto_line(f"[+] Starting Export Viewer At {datetime.utcnow()}")
if not os.path.isfile(filename):
Console.auto_line(f"[-] Export Viewer: Target File {filename} not found")
sys.exit(1)
try:
cmd = f"\"{self.path}\" -EXPORTS \"{filename}\""
# print(cmd)
output = subprocess.check_output(cmd).decode().rstrip()
if verbose:
Console.auto_line(output)
else:
do_print = False
headers = []
results = []
for line in output.split("\n"):
row = []
if line.find("ordinal") > -1 and line.find("hint") > -1:
headers = [x for x in line.rstrip("\r\n").split(" ") if x.strip() != ""]
do_print = True
continue
elif line.find("Summary") > -1:
do_print = False
if do_print:
results.append([x for x in line.rstrip("\r\n").split(" ") if x.strip() != ""])
from tabulate import tabulate
Console.auto_line(tabulate(results, headers=headers))
except subprocess.CalledProcessError as e:
Console.auto_line(f" [-] Error: {e}")
Console.auto_line(f"[+] Export Viewer Finished At {datetime.utcnow()}")
|
1691895
|
import os
from LAUG.aug.Word_Perturbation.multiwoz.multiwoz_eda import MultiwozEDA
from LAUG.aug.Word_Perturbation.multiwoz.db.slot_value_replace import MultiSourceDBLoader, MultiSourceDBLoaderArgs
from LAUG.aug.Word_Perturbation.multiwoz.util import load_json, dump_json
from LAUG import DATA_ROOT,REPO_ROOT
import json
import zipfile
def read_zipped_json(filepath, filename):
print("zip file path = ", filepath)
archive = zipfile.ZipFile(filepath, 'r')
return json.load(archive.open(filename))
class multiwoz_eda_config:
def __init__(self,):
self.multiwoz=read_zipped_json(os.path.join(DATA_ROOT, 'multiwoz','train.json.zip'),'train.json')
multiwoz_db_dir = os.path.join(DATA_ROOT, 'multiwoz', 'db')
multiwoz_multiwoz_domain_slot_map = {
('attraction', 'area'): ('attraction', 'Area'),
('attraction', 'type'): ('attraction', 'Type'),
('attraction', 'name'): ('attraction', 'Name'),
('attraction', 'address'): ('attraction', 'Addr'),
('hospital', 'department'): ('hospital', 'Department'),
('hospital', 'address'): ('hospital', 'Addr'),
('hotel', 'type'): ('hotel', 'Type'),
('hotel', 'area'): ('hotel', 'Area'),
('hotel', 'name'): ('hotel', 'Name'),
('hotel', 'address'): ('hotel', 'Addr'),
('restaurant', 'food'): ('restaurant', 'Food'),
('restaurant', 'area'): ('restaurant', 'Area'),
('restaurant', 'name'): ('restaurant', 'Name'),
('restaurant', 'address'): ('restaurant', 'Addr'),
('train', 'destination'): ('train', 'Dest'),
('train', 'departure'): ('train', 'Depart')
}
multiwoz_sgd_domain_slot_map = {
('train', 'dest'): ('train', 'to'),
('train', 'depart'): ('train', 'from'),
('hotel', 'name'): ('hotels', 'hotel_name'),
('hotel', 'addr'): ('hotels', 'address'),
('attraction', 'name'): ('travel', 'attraction_name'),
('restaurant', 'name'): ('restaurants', 'restaurant_name'),
('restaurant', 'addr'): ('restaurants', 'street_address')
}
loader_args = [MultiSourceDBLoaderArgs(multiwoz_db_dir, multiwoz_multiwoz_domain_slot_map)]
sgd_db_dir=os.path.join(REPO_ROOT,"LAUG/aug/Word_Perturbation/db/sgd-db/")
loader_args.append(MultiSourceDBLoaderArgs(
sgd_db_dir,
multiwoz_sgd_domain_slot_map
))
self.db_loader = MultiSourceDBLoader(loader_args)
def main(multiwoz_filepath, output_filepath,
sgd_db_dir=None,
alpha_sr=0.1, alpha_ri=0.1, alpha_rs=0.1, p_rd=0.1, num_aug=2,
p_slot_value_replacement=0.25):
multiwoz = load_json(multiwoz_filepath)
multiwoz_db_dir = os.path.join(DATA_ROOT, 'multiwoz', 'db')
multiwoz_multiwoz_domain_slot_map = {
('attraction', 'area'): ('attraction', 'Area'),
('attraction', 'type'): ('attraction', 'Type'),
('attraction', 'name'): ('attraction', 'Name'),
('attraction', 'address'): ('attraction', 'Addr'),
('hospital', 'department'): ('hospital', 'Department'),
('hospital', 'address'): ('hospital', 'Addr'),
('hotel', 'type'): ('hotel', 'Type'),
('hotel', 'area'): ('hotel', 'Area'),
('hotel', 'name'): ('hotel', 'Name'),
('hotel', 'address'): ('hotel', 'Addr'),
('restaurant', 'food'): ('restaurant', 'Food'),
('restaurant', 'area'): ('restaurant', 'Area'),
('restaurant', 'name'): ('restaurant', 'Name'),
('restaurant', 'address'): ('restaurant', 'Addr'),
('train', 'destination'): ('train', 'Dest'),
('train', 'departure'): ('train', 'Depart')
}
multiwoz_sgd_domain_slot_map = {
('train', 'dest'): ('train', 'to'),
('train', 'depart'): ('train', 'from'),
('hotel', 'name'): ('hotels', 'hotel_name'),
('hotel', 'addr'): ('hotels', 'address'),
('attraction', 'name'): ('travel', 'attraction_name'),
('restaurant', 'name'): ('restaurants', 'restaurant_name'),
('restaurant', 'addr'): ('restaurants', 'street_address')
}
loader_args = [MultiSourceDBLoaderArgs(multiwoz_db_dir, multiwoz_multiwoz_domain_slot_map)]
assert sgd_db_dir is not None
loader_args.append(MultiSourceDBLoaderArgs(
sgd_db_dir,
multiwoz_sgd_domain_slot_map
))
db_loader = MultiSourceDBLoader(loader_args)
eda = MultiwozEDA(multiwoz, db_loader,
slot_value_replacement_probability=p_slot_value_replacement,
alpha_sr=alpha_sr, alpha_ri=alpha_ri, alpha_rs=alpha_rs, p_rd=p_rd, num_aug=num_aug)
result = eda.augment_multiwoz_dataset('usr')
dump_json(result, output_filepath, indent=4)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--multiwoz_filepath", '--multiwoz', default='multiwoz.json')
parser.add_argument('--output_filepath', '--output', '-o', default='augmented_multiwoz.json')
parser.add_argument('--alpha_sr', type=float, default=0.1, help='probability of replacement')
parser.add_argument('--alpha_ri', type=float, default=0.1, help='probability of insertion')
parser.add_argument('--alpha_rs', type=float, default=0.1, help='probability of swap')
parser.add_argument('--p_rd', type=float, default=0.1, help="probability of deletion")
parser.add_argument('--num_aug', type=int, default=2,
help="generate `num_aug` candidates with EDA and randomly choose one dialog as augmented dialog.")
parser.add_argument('--p_slot_value_replacement', '-p_svr', type=float, default=0.25,
help='probability to replace a slot value.')
parser.add_argument('--sgd_db_dir', '--sgd', help='dir of sgd db.')
opts = parser.parse_args()
main(**vars(opts))
|
1691930
|
from pybricks.parameters import Port
class AnalogSensor:
"""
Generic or custom analog sensor.
Args:
port (Port): Port to which the sensor is connected.
"""
def __init__(self, port: Port):
if port == Port.A or port == Port.B or port == Port.C or port == Port.D:
raise ValueError("Sensors must use Port S1, S2, S3, or S4.")
def voltage(self) -> int:
"""
Measures analog voltage.
Returns:
Analog voltage in millivolts.
"""
return 0
def resistance(self) -> int:
"""
Measures resistance.
This value is only meaningful if the analog device is a passive load such as a resistor or thermistor.
Returns:
Resistance of the analog device in ohms.
"""
return 0
def active(self):
"""
Sets sensor to active mode. This sets pin 5 of the sensor port to high.
This is used in some analog sensors to control a switch. For example, if you use the NXT Light Sensor as a custom analog sensor, this method will turn the light on. From then on, voltage() returns the raw reflected light value.
"""
...
def passive(self):
"""
Sets sensor to passive mode. This sets pin 5 of the sensor port to low.
This is used in some analog sensors to control a switch. For example, if you use the NXT Light Sensor as a custom analog sensor, this method will turn the light off. From then on, voltage() returns the raw ambient light value.
"""
...
|
1691964
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.path
import numexpr as ne
import scipy as sp
import scipy.sparse
plt.ion()
import pybie2d
"""
Demonstrate how to use the pybie2d package to solve an interior/exterior
Laplace problem on a complicated domain using a global quadrature
And boundary collections
"""
N = 1000
NB1 = 500
NB2 = 600
NB3 = 600
# extract some functions for easy calling
squish = pybie2d.misc.curve_descriptions.squished_circle
star = pybie2d.misc.curve_descriptions.star
GSB = pybie2d.boundaries.global_smooth_boundary.global_smooth_boundary.Global_Smooth_Boundary
Grid = pybie2d.grid.Grid
PointSet = pybie2d.point_set.PointSet
Laplace_Layer_Apply = pybie2d.kernels.high_level.laplace.Laplace_Layer_Apply
Laplace_Layer_Singular_Apply = pybie2d.kernels.high_level.laplace.Laplace_Layer_Singular_Apply
Cauchy_Layer_Apply = pybie2d.kernels.high_level.cauchy.Cauchy_Layer_Apply
Find_Near_Points = pybie2d.misc.near_points.find_near_points
Pairing = pybie2d.pairing.Pairing
Boundary_Collection = pybie2d.boundaries.collection.BoundaryCollection
Evaluate_Tau = pybie2d.solvers.laplace_dirichlet.Evaluate_Tau
LaplaceDirichletSolver = pybie2d.solvers.laplace_dirichlet.LaplaceDirichletSolver
boundary1 = GSB(c=squish(NB1,r=2,b=0.3,rot=np.pi/4.0))
boundary2 = GSB(c=star(NB2,x=0.75,y=0.75,r=0.3,a=0.4,f=7,rot=np.pi/3.0))
boundary3 = GSB(c=star(NB3,x=-0.75,y=-0.75,r=0.4,a=0.05,f=11,rot=np.pi/3.0))
boundary = Boundary_Collection()
boundary.add([boundary1, boundary2, boundary3], ['i', 'e', 'e'])
boundary.amass_information()
def solution_func(x, y):
d2a = (x-0.75)**2 + (y-0.75)**2
d2b = (x+0.75)**2 + (y+0.75)**2
return ne.evaluate('log(sqrt(d2a)) + log(sqrt(d2b)) + 2*x + y')
bc1 = solution_func(boundary1.x, boundary1.y)
bc2 = solution_func(boundary2.x, boundary2.y)
bc3 = solution_func(boundary3.x, boundary3.y)
bc = np.concatenate([bc1, bc2, bc3])
def err_plot(up):
# compute the error
errorp = up - solution_func(full_grid.xg[phys], full_grid.yg[phys])
digitsp = -np.log10(np.abs(errorp)+1e-16)
digits = np.zeros_like(full_grid.xg)
digits[phys] = digitsp
mdigits = np.ma.array(digits, mask=ext)
# plot the error as a function of space (only good in interior)
fig, ax = plt.subplots(1,1)
clf = ax.imshow(mdigits[:,::-1].T, extent=[-2,2,-2,2],
cmap=mpl.cm.viridis_r)
ax.set_aspect('equal')
fig.colorbar(clf)
print('Error: {:0.2e}'.format(np.abs(errorp).max()))
################################################################################
# find physical region
full_grid = Grid([-2,2], N, [-2,2], N)
# this is hiding a lot of stuff!
phys1, ext1 = boundary1.find_interior_points(full_grid)
phys2, ext2 = boundary2.find_interior_points(full_grid)
phys3, ext3 = boundary3.find_interior_points(full_grid)
phys = full_grid.reshape(np.logical_and.reduce([phys1, ext2, ext3]))
ext = np.logical_not(phys)
################################################################################
# iteratively solve for the density
solver = LaplaceDirichletSolver(boundary, solve_type='iterative', check_close=False)
tau = solver.solve(bc, disp=True, restart=100, tol=1e-14)
################################################################################
# evaluate solution (no close corrections)
gridp = Grid([-2,2], N, [-2,2], N, mask=phys)
u = np.zeros_like(gridp.xg)
up = Evaluate_Tau(boundary, gridp, tau)
u[phys] = up
err_plot(up)
################################################################################
# make on-the-fly close corrections
|
1691967
|
import pytest
from protoactor.actor import PID
from protoactor.remote.messages import JsonMessage
from protoactor.remote.serialization import Serialization
from tests.remote.messages.protos_pb2 import DESCRIPTOR
@pytest.fixture(scope="session", autouse=True)
def register_file_descriptor():
Serialization().register_file_descriptor(DESCRIPTOR)
def test_can_serialize_and_deserialize_json_pid():
type_name = "actor.PID"
json = JsonMessage(type_name, "{ \"address\":\"123\", \"id\":\"456\"}")
data = Serialization().serialize(json, 1)
deserialized = Serialization().deserialize(type_name, data, 1)
assert deserialized.address == "123"
assert deserialized.id == "456"
def test_can_serialize_and_deserialize_json():
type_name = "remote_test_messages.Ping"
json = JsonMessage(type_name, "{ \"message\":\"Hello\"}")
data = Serialization().serialize(json, 1)
deserialized = Serialization().deserialize(type_name, data, 1)
assert deserialized.message == "Hello"
def test_can_serialize_and_deserialize_protobuf():
type_name = "actor.PID"
pid = PID(address='123', id='456')
data = Serialization().serialize(pid, 0)
deserialized = Serialization().deserialize(type_name, data, 0)
assert deserialized.address == "123"
assert deserialized.id == "456"
|
1692057
|
from django.contrib import admin
#from .models import Article, Category, BlogComment, Tag
#from .models import Image
# Register your models here.
#admin.site.register([Article, Category, BlogComment, Tag])
#admin.site.register([Image])
|
1692064
|
import numpy as np
from tgym.utils import calc_spread
def test_calc_spread():
spread_coefficients = [1, -0.1]
prices = np.array([1, 2, 10, 20])
spread_price = (-1, 1)
assert calc_spread(prices, spread_coefficients) == spread_price
|
1692083
|
import numpy as np
from mmcv.parallel import DataContainer as DC
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import to_tensor
@PIPELINES.register_module()
class ConcatVideoReferences(object):
"""Concat video references.
If the input list contains at least two dicts, concat the input list of
dict to one dict from 2-nd dict of the input list.
Args:
results (list[dict]): List of dict that contain keys such as 'img',
'img_metas', 'gt_masks','proposals', 'gt_bboxes',
'gt_bboxes_ignore', 'gt_labels','gt_semantic_seg',
'gt_instance_ids'.
Returns:
list[dict]: The first dict of outputs is the same as the first
dict of `results`. The second dict of outputs concats the
dicts in `results[1:]`.
"""
def __call__(self, results):
assert (isinstance(results, list)), 'results must be list'
outs = results[:1]
for i, result in enumerate(results[1:], 1):
if 'img' in result:
img = result['img']
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
if i == 1:
result['img'] = np.expand_dims(img, -1)
else:
outs[1]['img'] = np.concatenate(
(outs[1]['img'], np.expand_dims(img, -1)), axis=-1)
for key in ['img_metas', 'gt_masks']:
if key in result:
if i == 1:
result[key] = [result[key]]
else:
outs[1][key].append(result[key])
for key in [
'proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels',
'gt_instance_ids'
]:
if key not in result:
continue
value = result[key]
if value.ndim == 1:
value = value[:, None]
N = value.shape[0]
value = np.concatenate((np.full(
(N, 1), i - 1, dtype=np.float32), value),
axis=1)
if i == 1:
result[key] = value
else:
outs[1][key] = np.concatenate((outs[1][key], value),
axis=0)
if 'gt_semantic_seg' in result:
if i == 1:
result['gt_semantic_seg'] = result['gt_semantic_seg'][...,
None,
None]
else:
outs[1]['gt_semantic_seg'] = np.concatenate(
(outs[1]['gt_semantic_seg'],
result['gt_semantic_seg'][..., None, None]),
axis=-1)
if i == 1:
outs.append(result)
return outs
@PIPELINES.register_module()
class MultiImagesToTensor(object):
"""Multi images to tensor.
1. Transpose and convert image/multi-images to Tensor.
2. Add prefix to every key in the second dict of the inputs. Then, add
these keys and corresponding values into the outputs.
Args:
ref_prefix (str): The prefix of key added to the second dict of inputs.
Defaults to 'ref'.
"""
def __init__(self, ref_prefix='ref'):
self.ref_prefix = ref_prefix
def __call__(self, results):
"""Multi images to tensor.
1. Transpose and convert image/multi-images to Tensor.
2. Add prefix to every key in the second dict of the inputs. Then, add
these keys and corresponding values into the output dict.
Args:
results (list[dict]): List of two dicts.
Returns:
dict: Each key in the first dict of `results` remains unchanged.
Each key in the second dict of `results` adds `self.ref_prefix`
as prefix.
"""
outs = []
for _results in results:
_results = self.images_to_tensor(_results)
outs.append(_results)
data = {}
data.update(outs[0])
if len(outs) == 2:
for k, v in outs[1].items():
data[f'{self.ref_prefix}_{k}'] = v
return data
def images_to_tensor(self, results):
"""Transpose and convert images/multi-images to Tensor."""
if 'img' in results:
img = results['img']
if len(img.shape) == 3:
# (H, W, 3) to (3, H, W)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
else:
# (H, W, 3, N) to (N, 3, H, W)
img = np.ascontiguousarray(img.transpose(3, 2, 0, 1))
results['img'] = to_tensor(img)
if 'proposals' in results:
results['proposals'] = to_tensor(results['proposals'])
if 'img_metas' in results:
results['img_metas'] = DC(results['img_metas'], cpu_only=True)
return results
@PIPELINES.register_module()
class SeqDefaultFormatBundle(object):
"""Sequence Default formatting bundle.
It simplifies the pipeline of formatting common fields, including "img",
"img_metas", "proposals", "gt_bboxes", "gt_instance_ids",
"gt_match_indices", "gt_bboxes_ignore", "gt_labels", "gt_masks" and
"gt_semantic_seg". These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- img_metas: (1)to DataContainer (cpu_only=True)
- proposals: (1)to tensor, (2)to DataContainer
- gt_bboxes: (1)to tensor, (2)to DataContainer
- gt_instance_ids: (1)to tensor, (2)to DataContainer
- gt_match_indices: (1)to tensor, (2)to DataContainer
- gt_bboxes_ignore: (1)to tensor, (2)to DataContainer
- gt_labels: (1)to tensor, (2)to DataContainer
- gt_masks: (1)to DataContainer (cpu_only=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \
(3)to DataContainer (stack=True)
Args:
ref_prefix (str): The prefix of key added to the second dict of input
list. Defaults to 'ref'.
"""
def __init__(self, ref_prefix='ref'):
self.ref_prefix = ref_prefix
def __call__(self, results):
"""Sequence Default formatting bundle call function.
Args:
results (list[dict]): List of two dicts.
Returns:
dict: The result dict contains the data that is formatted with
default bundle. Each key in the second dict of the input list
adds `self.ref_prefix` as prefix.
"""
outs = []
for _results in results:
_results = self.default_format_bundle(_results)
outs.append(_results)
data = {}
data.update(outs[0])
for k, v in outs[1].items():
data[f'{self.ref_prefix}_{k}'] = v
return data
def default_format_bundle(self, results):
"""Transform and format common fields in results.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data that is formatted with
default bundle.
"""
if 'img' in results:
img = results['img']
if len(img.shape) == 3:
img = np.ascontiguousarray(img.transpose(2, 0, 1))
else:
img = np.ascontiguousarray(img.transpose(3, 2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
for key in [
'proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels',
'gt_instance_ids', 'gt_match_indices'
]:
if key not in results:
continue
results[key] = DC(to_tensor(results[key]))
for key in ['img_metas', 'gt_masks']:
if key in results:
results[key] = DC(results[key], cpu_only=True)
if 'gt_semantic_seg' in results:
semantic_seg = results['gt_semantic_seg']
if len(semantic_seg.shape) == 2:
semantic_seg = semantic_seg[None, ...]
else:
semantic_seg = np.ascontiguousarray(
semantic_seg.transpose(3, 2, 0, 1))
results['gt_semantic_seg'] = DC(
to_tensor(results['gt_semantic_seg']), stack=True)
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module()
class VideoCollect(object):
"""Collect data from the loader relevant to the specific task.
Args:
keys (Sequence[str]): Keys of results to be collected in ``data``.
meta_keys (Sequence[str]): Meta keys to be converted to
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
Defaults to None.
default_meta_keys (tuple): Default meta keys. Defaults to ('filename',
'ori_filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg',
'frame_id', 'is_video_data').
"""
def __init__(self,
keys,
meta_keys=None,
default_meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'scale_factor',
'flip', 'flip_direction', 'img_norm_cfg',
'frame_id', 'is_video_data')):
self.keys = keys
self.meta_keys = default_meta_keys
if meta_keys is not None:
if isinstance(meta_keys, str):
meta_keys = (meta_keys, )
else:
assert isinstance(meta_keys, tuple), \
'meta_keys must be str or tuple'
self.meta_keys += meta_keys
def __call__(self, results):
"""Call function to collect keys in results.
The keys in ``meta_keys`` and ``default_meta_keys`` will be converted
to :obj:mmcv.DataContainer.
Args:
results (list[dict] | dict): List of dict or dict which contains
the data to collect.
Returns:
list[dict] | dict: List of dict or dict that contains the
following keys:
- keys in ``self.keys``
- ``img_metas``
"""
results_is_dict = isinstance(results, dict)
if results_is_dict:
results = [results]
outs = []
for _results in results:
_results = self._add_default_meta_keys(_results)
_results = self._collect_meta_keys(_results)
outs.append(_results)
if results_is_dict:
outs[0]['img_metas'] = DC(outs[0]['img_metas'], cpu_only=True)
return outs[0] if results_is_dict else outs
def _collect_meta_keys(self, results):
"""Collect `self.keys` and `self.meta_keys` from `results` (dict)."""
data = {}
img_meta = {}
for key in self.meta_keys:
if key in results:
img_meta[key] = results[key]
elif key in results['img_info']:
img_meta[key] = results['img_info'][key]
data['img_metas'] = img_meta
for key in self.keys:
data[key] = results[key]
return data
def _add_default_meta_keys(self, results):
"""Add default meta keys.
We set default meta keys including `pad_shape`, `scale_factor` and
`img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and
`Pad` are implemented during the whole pipeline.
Args:
results (dict): Result dict contains the data to convert.
Returns:
results (dict): Updated result dict contains the data to convert.
"""
img = results['img']
results.setdefault('pad_shape', img.shape)
results.setdefault('scale_factor', 1.0)
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results.setdefault(
'img_norm_cfg',
dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False))
return results
@PIPELINES.register_module()
class ToList(object):
"""Use list to warp each value of the input dict.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: Updated result dict contains the data to convert.
"""
def __call__(self, results):
out = {}
for k, v in results.items():
out[k] = [v]
return out
|
1692096
|
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
import argparse
import datetime
import json
import contextlib
from func_timeout import func_timeout, FunctionTimedOut
import multiprocessing
import numpy as np
import os
import sys
from job_id_pair import JobIdPair
from job_table import JobTable
import scheduler
import utils
def simulate_with_timeout(experiment_id, policy_name,
throughputs_file, cluster_spec, lam, seed, interval,
jobs_to_complete, fixed_job_duration, solver,
generate_multi_gpu_jobs,
generate_multi_priority_jobs, simulate_steady_state,
log_dir, timeout, verbose, checkpoint_threshold,
profiling_percentage, num_reference_models,
num_gpus_per_server, ideal):
lam_str = 'lambda=%f.log' % (lam)
checkpoint_file = None
if checkpoint_threshold is not None:
checkpoint_file = os.path.join(log_dir, 'lambda=%f.pickle' % lam)
cluster_spec_str = 'v100:%d|p100:%d|k80:%d' % (cluster_spec['v100'],
cluster_spec['p100'],
cluster_spec['k80'])
policy = utils.get_policy(policy_name, solver=solver, seed=seed)
if verbose:
current_time = datetime.datetime.now()
print('[%s] [Experiment ID: %2d] '
'Configuration: cluster_spec=%s, policy=%s, '
'seed=%d, lam=%f, '
'profiling_percentage=%f, '
'num_reference_models=%d' % (current_time,
experiment_id,
cluster_spec_str,
policy.name,
seed, lam,
profiling_percentage,
num_reference_models))
with open(os.path.join(log_dir, lam_str), 'w') as f:
with contextlib.redirect_stderr(f), contextlib.redirect_stdout(f):
sched = scheduler.Scheduler(
policy,
throughputs_file=throughputs_file,
seed=seed,
time_per_iteration=interval,
simulate=True,
profiling_percentage=profiling_percentage,
num_reference_models=num_reference_models)
if timeout is None:
sched.simulate(cluster_spec, lam=lam,
jobs_to_complete=jobs_to_complete,
fixed_job_duration=fixed_job_duration,
generate_multi_gpu_jobs=generate_multi_gpu_jobs,
generate_multi_priority_jobs=generate_multi_priority_jobs,
simulate_steady_state=simulate_steady_state,
checkpoint_file=checkpoint_file,
checkpoint_threshold=checkpoint_threshold,
num_gpus_per_server=num_gpus_per_server,
ideal=ideal)
average_jct = sched.get_average_jct(jobs_to_complete)
utilization = 1.0
if not ideal:
utilization = sched.get_cluster_utilization()
else:
try:
func_timeout(timeout, sched.simulate,
args=(cluster_spec,),
kwargs={
'lam': lam,
'jobs_to_complete': jobs_to_complete,
'fixed_job_duration': fixed_job_duration,
'generate_multi_gpu_jobs': generate_multi_gpu_jobs,
'generate_multi_priority_jobs': generate_multi_priority_jobs,
'simulate_steady_state': simulate_steady_state,
'checkpoint_file': checkpoint_file,
'checkpoint_threshold': checkpoint_threshold,
'num_gpus_per_server': num_gpus_per_server,
'ideal': ideal
})
average_jct = sched.get_average_jct(jobs_to_complete)
utilization = sched.get_cluster_utilization()
except FunctionTimedOut:
average_jct = float('inf')
utilization = 1.0
if verbose:
current_time = datetime.datetime.now()
print('[%s] [Experiment ID: %2d] '
'Results: average JCT=%f, utilization=%f' % (current_time,
experiment_id,
average_jct,
utilization))
sched.shutdown()
return average_jct, utilization
def main(args):
if args.window_start >= args.window_end:
raise ValueError('Window start must be < than window end.')
if (args.throughput_lower_bound is None or
args.throughput_upper_bound is None):
raise ValueError('Throughput range must be specified.')
cutoff_throughputs = {}
if args.cutoff_throughputs_file is not None:
cutoff_throughputs = json.load(open(args.cutoff_throughputs_file, 'r'))
throughputs_file = args.throughputs_file
policy_names = args.policies
profiling_percentages = args.profiling_percentages
all_num_reference_models = args.num_reference_models
estimate_throughputs = (min(profiling_percentages) < 1.0 or
min(all_num_reference_models) < len(JobTable))
job_range = (args.window_start, args.window_end)
experiment_id = 0
with open(throughputs_file, 'r') as f:
throughputs = json.load(f)
raw_logs_dir = os.path.join(args.log_dir, 'raw_logs')
if not os.path.isdir(raw_logs_dir):
os.mkdir(raw_logs_dir)
jobs_to_complete = set()
for i in range(job_range[0], job_range[1]):
jobs_to_complete.add(JobIdPair(i, None))
all_args_list = []
for cluster_spec_str in args.cluster_spec:
cluster_spec_str_split = cluster_spec_str.split(':')
if len(cluster_spec_str_split) != 3:
raise ValueError('Invalid cluster spec %s' % (cluster_spec_str))
cluster_spec = {
'v100': int(cluster_spec_str_split[0]),
'p100': int(cluster_spec_str_split[1]),
'k80': int(cluster_spec_str_split[2]),
}
num_gpus_per_server_split = args.num_gpus_per_server.split(':')
num_gpus_per_server = {
'v100': int(num_gpus_per_server_split[0]),
'p100': int(num_gpus_per_server_split[1]),
'k80': int(num_gpus_per_server_split[2]),
}
raw_logs_cluster_spec_subdir = \
os.path.join(raw_logs_dir,
'v100=%d.p100=%d.k80=%d' % (cluster_spec['v100'],
cluster_spec['p100'],
cluster_spec['k80']))
if not os.path.isdir(raw_logs_cluster_spec_subdir):
os.mkdir(raw_logs_cluster_spec_subdir)
for policy_name in policy_names:
raw_logs_policy_subdir = os.path.join(raw_logs_cluster_spec_subdir,
policy_name)
if not os.path.isdir(raw_logs_policy_subdir):
os.mkdir(raw_logs_policy_subdir)
for profiling_percentage in profiling_percentages:
if estimate_throughputs:
profiling_percentage_str = \
'profiling_percentage=%f' % (profiling_percentage)
raw_logs_profiling_subdir = \
os.path.join(raw_logs_policy_subdir,
profiling_percentage_str)
if not os.path.isdir(raw_logs_profiling_subdir):
os.mkdir(raw_logs_profiling_subdir)
else:
raw_logs_profiling_subdir = raw_logs_policy_subdir
for i, num_reference_models in enumerate(args.num_reference_models):
if estimate_throughputs:
num_reference_models_str = \
'num_reference_models=%d' % (num_reference_models)
raw_logs_num_reference_models_subdir = \
os.path.join(raw_logs_profiling_subdir,
num_reference_models_str)
if not os.path.isdir(raw_logs_num_reference_models_subdir):
os.mkdir(raw_logs_num_reference_models_subdir)
else:
raw_logs_num_reference_models_subdir = \
raw_logs_policy_subdir
throughputs = \
list(np.linspace(args.throughput_lower_bound,
args.throughput_upper_bound,
num=args.num_data_points))
if throughputs[0] == 0.0:
throughputs = throughputs[1:]
for throughput in throughputs:
if (cluster_spec_str in cutoff_throughputs and
policy_name in cutoff_throughputs[cluster_spec_str]):
cutoff_throughput = \
cutoff_throughputs[cluster_spec_str][policy_name]
if throughput >= cutoff_throughput:
print('Throughput of %f is too high '
'for policy %s with cluster '
'spec %s.' % (throughput,
policy_name,
cluster_spec_str))
continue
lam = 3600.0 / throughput
for seed in args.seeds:
seed_str = 'seed=%d' % (seed)
raw_logs_seed_subdir = os.path.join(
raw_logs_num_reference_models_subdir,
seed_str)
if not os.path.isdir(raw_logs_seed_subdir):
os.mkdir(raw_logs_seed_subdir)
all_args_list.append((experiment_id, policy_name,
throughputs_file,
cluster_spec,
lam, seed, args.interval,
jobs_to_complete,
args.fixed_job_duration,
args.solver,
args.generate_multi_gpu_jobs,
args.generate_multi_priority_jobs,
args.simulate_steady_state,
raw_logs_seed_subdir,
args.timeout,
args.verbose,
args.checkpoint_threshold,
profiling_percentage,
num_reference_models,
num_gpus_per_server,
args.ideal))
experiment_id += 1
if len(all_args_list) > 0:
current_time = datetime.datetime.now()
print('[%s] Running %d total experiment(s)...' % (current_time,
len(all_args_list)))
with multiprocessing.Pool(args.processes) as p:
# Sort args in order of decreasing lambda to prioritize
# short-running jobs.
all_args_list.sort(key=lambda x: x[4], reverse=True)
results = [p.apply_async(simulate_with_timeout, args_list)
for args_list in all_args_list]
results = [result.get() for result in results]
else:
raise ValueError('No work to be done!')
if __name__=='__main__':
parser = argparse.ArgumentParser(
description='Sweep through lambda values')
fixed_range = parser.add_argument_group('Sweep over fixed range')
parser.add_argument('-l', '--log-dir', type=str, default='logs',
help='Log directory')
parser.add_argument('-s', '--window-start', type=int, default=0,
help='Measurement window start (job ID)')
parser.add_argument('-e', '--window-end', type=int, default=5000,
help='Measurement window end (job ID)')
parser.add_argument('-t', '--timeout', type=int, default=None,
help='Timeout (in seconds) for each run')
parser.add_argument('-j', '--processes', type=int, default=None,
help=('Number of processes to use in pool '
'(use as many as available if not specified)'))
parser.add_argument('-p', '--policies', type=str, nargs='+',
default=utils.get_available_policies(),
help='List of policies to sweep')
parser.add_argument('-c', '--cluster-spec', type=str, nargs='+',
default=['25:0:0', '12:12:0', '16:8:0', '8:8:8'],
help=('Cluster specification in the form of '
'#v100s:#p100s:#k80s'))
parser.add_argument('--num_gpus_per_server', type=str, default='1:1:1',
help=('Cluster specification in the form of '
'#v100s:#p100s:#k80s'))
parser.add_argument('--seeds', type=int, nargs='+',
default=[0, 1, 2, 3, 4],
help='List of random seeds')
parser.add_argument('-i', '--interval', type=int, default=360,
help='Interval length (in seconds)')
parser.add_argument('-f', '--fixed-job-duration', type=int, default=None,
help=('If set, fixes the duration of all jobs to the '
'specified value (in seconds)'))
parser.add_argument('--cutoff-throughputs-file', type=str, default=None,
help=('If set, uses the attached cutoff_throughputs '
'JSON file in sweep to limit args run'))
parser.add_argument('--throughputs-file', type=str,
default='simulation_throughputs.json',
help='Oracle throughputs file')
parser.add_argument('-m', '--generate-multi-gpu-jobs', action='store_true',
default=False,
help=('If set, generates multi-GPU jobs according to '
'a pre-defined distribution'))
parser.add_argument('--generate-multi-priority-jobs', action='store_true',
default=False,
help=('If set, generates some jobs with higher priority'))
parser.add_argument('--simulate-steady-state', action='store_true',
default=False,
help=('If set, adds as many jobs as there are workers '
'before beginning the simulation.'))
parser.add_argument('--solver', type=str, choices=['ECOS', 'GUROBI', 'SCS'],
default='ECOS', help='CVXPY solver')
parser.add_argument('-v', '--verbose', action='store_true', default=True,
help='Verbose')
parser.add_argument('--checkpoint-threshold', type=int, default=None,
help=('Checkpoint threshold, None if checkpointing is '
'disabled. Checkpoint is created after this '
'job ID is added.'))
parser.add_argument('--profiling_percentages', type=float, nargs='+',
default=[1.0],
help=('Percentages of machines dedicated to profiling '
'co-located job pairs'))
parser.add_argument('--num_reference_models', type=int, nargs='+',
default=[len(JobTable)],
help=('Number of reference models to use when '
'estimating throughputs'))
parser.add_argument('--ideal', action='store_true', default=False,
help='Run allocations 100%% ideally')
fixed_range.add_argument('-a', '--throughput-lower-bound', type=float,
default=None,
help=('Lower bound for throughput interval to '
'sweep'))
fixed_range.add_argument('-b', '--throughput-upper-bound', type=float,
default=None,
help=('Upper bound for throughput interval to '
'sweep'))
fixed_range.add_argument('-n', '--num-data-points', type=int, default=20,
help='Number of data points to sweep through')
args = parser.parse_args()
main(args)
|
1692185
|
from nose.tools import eq_
import openxc.version
def test_get_version():
version = openxc.version.get_version()
eq_(type(version), str)
|
1692219
|
import math
import numpy as np
class Robot:
def __init__(self, wheel_base, track_width, wheel_radius, max_v, max_w):
# w1<--track width--> w2
# ^ |
# | |
# wb |
# | |
# v |
# w3 ---------------- w4
self.wheel_base = wheel_base
self.track_width = track_width
self.wheel_radius = wheel_radius
self.max_v = max_v
self.max_w = max_w
wb = self.wheel_base/2.0
tw = self.track_width/2.0
r = self.wheel_radius
T = np.array([[1,-1,-(tw+wb)],
[1,1,(tw+wb)],
[1,1,-(tw+wb)],
[1,-1,(tw+wb)]])
self.inverse_transform_matrix=(1/r)*T
self.max_wheel_speed = max(abs(np.matmul(self.inverse_transform_matrix, np.array([[1.0],[1.0],[0.0]]))))
def compute_motor_velocities(input,robot,max_value=255):
motor_velocities = np.zeros(4)
if (len(input)<3):
return motor_velocities
robot_velocity = np.array([[input[0]],[input[1]],[input[2]]])
raw_velocities = np.matmul(robot.inverse_transform_matrix,robot_velocity)
if (max(raw_velocities) == 0.0):
return motor_velocities
sum =0
for i in raw_velocities:
sum = sum + abs(i)
for i in range(len(raw_velocities)):
motor_velocities[i] = raw_velocities[i]*max_value/robot.max_wheel_speed
return motor_velocities
|
1692228
|
from bentoml.yatai.grpc_interceptor.prom_server_interceptor import (
PromServerInterceptor,
ServiceLatencyInterceptor,
)
__all__ = [
"ServiceLatencyInterceptor",
"PromServerInterceptor",
]
|
1692251
|
import requests
import json
import datetime
import azure.functions as func
import base64
import hmac
import hashlib
import os
import logging
import re
from .state_manager import StateManager
customer_id = os.environ['WorkspaceID']
shared_key = os.environ['WorkspaceKey']
slack_api_bearer_token = os.environ['SlackAPIBearerToken']
logAnalyticsUri = os.environ.get('logAnalyticsUri')
log_type = 'SlackAudit'
slack_uri_audit = "https://api.slack.com/audit/v1/logs"
offset_limit = 1000
connection_string = os.environ['AzureWebJobsStorage']
if ((logAnalyticsUri in (None, '') or str(logAnalyticsUri).isspace())):
logAnalyticsUri = 'https://' + customer_id + '.ods.opinsights.azure.com'
pattern = r"https:\/\/([\w\-]+)\.ods\.opinsights\.azure.([a-zA-Z\.]+)$"
match = re.match(pattern,str(logAnalyticsUri))
if(not match):
raise Exception("Invalid Log Analytics Uri.")
def action_mapping(event):
action_id = event["action"]
action_dict = {
"workspace_created": "A workspace in an organization was created.",
"workspace_deleted": "A workspace in an organization was deleted.",
"workspace_accepted_migration": "An administrator on a workspace has accepted an invitation to migrate to a Grid organization.",
"workspace_declined_migration": "An administrator on a workspace has declined an invitation to migrate to a Grid organization.",
"migration_scheduled": "A migration was scheduled.",
"organization_verified": "Slack has confirmed the identity of your organization. The organization will now be denoted with a verified badge.",
"organization_unverified": "Slack has flagged a change in your organization’s identity and has unverified it. The organization will no longer be denoted with a verified badge.",
"organization_public_url_updated": "Your organization’s public URL has been changed.",
"organization_created": "An Enterprise Grid organization was created.",
"organization_deleted": "An Enterprise Grid organization was deleted.",
"organization_accepted_migration": "The Org Owner accepted a workspace invitation to join their organization.",
"organization_declined_migration": "The Org Owner declined a workspace invitation to join their organization.",
"billing_address_added": "A billing address was added. Includes a details parameter noting the timestamp the TOS was accepted.",
"emoji_added": "An emoji was added. Includes a details parameter with the name of the emoji.",
"emoji_removed": "An emoji was removed. Includes a details parameter with the name of the emoji.",
"emoji_aliased": "An emoji was given an alias. Includes a details parameter with the name of the alias.",
"emoji_renamed": "An emoji was renamed. Includes a details parameter with the previous and new names of the emoji.",
"message_tombstoned": "A message was tombstoned.",
"message_restored": "A message was restored.",
"manual_export_started": "A workspace admin or owner has started a standard export on a workspace.",
"manual_export_completed": "A standard export on a workspace has finished.",
"corporate_exports_approved": "The corporate export feature has been approved for use on a workspace.",
"corporate_exports_enabled": "The corporate export feature has been enabled for a workspace.",
"scheduled_export_started": "A scheduled corporate export has started.",
"scheduled_export_completed": "A scheduled corporate export has finished.",
"channels_export_started": "A channel export has begun.",
"channels_export_completed": "A channel export is complete.",
"pref.allow_calls": "A preference indicating whether Slack Calls can be used in this workspace has changed.",
"pref.allow_message_deletion": "Someone altered this workspace's settings around whether messages can be deleted or not.",
"pref.app_dir_only": "Whether only Slack App Directory apps can be installed or not in this workspace has changed.",
"pref.app_whitelist_enabled": "Someone's carefully carved or culled the list of apps this workspace has whitelisted.",
"pref.can_receive_shared_channels_invites": "Whether this workspace can receive invites to share channels with other workspaces has changed.",
"pref.commands_only_regular": "The setting determining whether restricted users are restricted from using slash commands was changed.",
"pref.custom_tos": "This workspace's settings on having a custom terms of service have changed.",
"pref.disallow_public_file_urls": "This workspace has modified their public file URL settings for files uploaded within it.",
"pref.dm_retention_changed": "The direct message (DM) retention setting changed. Includes a details parameter noting the previous and new values.",
"pref.dnd_enabled": "Do not disturb settings have been enabled for a workspace.",
"pref.dnd_end_hour": "The exact ending hour for workspace do not disturb settings has been set. Work hard and go home.",
"pref.dnd_start_hour": "The exact starting hour for workspace do not disturb settings has been set. Hopefully everyone is awake and ready to work by then.",
"pref.emoji_only_admins": "Someone modified the list of emoji-administrating admins, so you know who stole the cookies from the cookie jar.",
"pref.enterprise_default_channels": "Someone modified the list of default channels across the enterprise grid.",
"pref.enterprise_team_creation_request": "Someone has requested that your organization allow a new workspace to be created.",
"pref.file_retention_changed": "The file retention setting changed. Includes a details parameter noting the previous and new values.",
"pref.msg_edit_window_mins": "Someone edited the edit messaging window for a workspace!",
"pref.private_channel_retention_changed": "The group (private channel) retention setting changed. Includes a details parameter noting the previous and new values.",
"pref.public_channel_retention_changed": "The channel retention setting type changed. Includes a details parameter noting the previous and new values.",
"pref.retention_override_changed": "The retention override setting, allowing workspace members to set their own retention period for private channels and DMs, changed. Includes a details parameter noting the previous and new values.",
"pref.sign_in_with_slack_disabled": "This workspace changed their preference around allowing Sign in with Slack.",
"pref.slackbot_responses_disabled": "The settings around whether Slackbot's witty responses are enabled or disabled changed.",
"pref.slackbot_responses_only_admins": "There's a secret cabal of admins for those witty Slackbot responses and that list was changed.",
"pref.sso_setting_changed": "The Single Sign On (SSO) restriction changed. Includes a details parameter noting the previous and new values.",
"pref.stats_only_admins": "The list of admins that can work with workspace statistics only has changed.",
"pref.two_factor_auth_changed": "The two-factor authentication requiremented changed. Includes a details parameter noting the previous and new values.",
"pref.username_policy": "A workspace's username policy preference changed.",
"pref.who_can_archive_channels": "Who can archive channels indeed?",
"pref.who_can_create_delete_user_groups": "The list of who can create or delete user groups changed.",
"pref.who_can_create_private_channels": "It's like a who's who of who can create private channels, and it changed.",
"pref.who_can_create_public_channels": "The same as above, but for public channels.",
"pref.who_can_edit_user_groups": "The list of those who can edit user groups changed.",
"pref.who_can_manage_channel_posting_prefs": "Someone's been changing who can manage channel posting preferences",
"pref.who_can_manage_ext_shared_channels": "The list of who can manage externally shared channels has changed for this workspace.",
"pref.who_can_manage_guests": "The list of who can manage guests now has changed for this workspace.",
"pref.who_can_manage_shared_channels": "Settings around who can remove users from shared channels has changed for a workspace.",
"pref.who_can_remove_from_private_channels": "Settings around who can remove users from private channels has changed for a workspace.",
"pref.who_can_remove_from_public_channels": "Settings around who can remove users from public channels has changed for a workspace.",
"ekm_enrolled": "The workspace is now enrolled/managed by EKM.",
"ekm_unenrolled": "The workspace is no longer enrolled or managed by EKM.",
"ekm_key_added": "An EKM key was added for the workspace.",
"ekm_key_removed": "An EKM key was removed for the workspace.",
"ekm_clear_cache_set": "A revocation event has triggered a new TTL for cached date in this workspace.",
"ekm_logging_config_set": "Logging settings for this workspace's EKM configuration have changed.",
"ekm_slackbot_enroll_notification_sent": "Slack sent notifications about this workspace being enrolled in EKM.",
"ekm_slackbot_unenroll_notification_sent": "Slack sent notifications about this workspace no longer being enrolled in EKM.",
"ekm_slackbot_rekey_notification_sent": "Slack sent notifications about this workspace's EKM configuration being rekeyed.",
"ekm_slackbot_logging_notification_sent": "Slack sent notifications about logging changes to EKM in this workspace.",
"user_channel_join": "A user has joined a channel. The user field in this action contains a team identifier so that you can see which team the joining user comes from (useful for externally shared channels).",
"user_channel_leave": "A user has left a channel. This action contains a team identifier so that you can see which team the departing user comes from (useful for externally shared channels).",
"guest_channel_join": "A guest user has joined a channel. This action contains a team identifier so that you can see which team the joining guest comes from (useful for externally shared channels).",
"guest_channel_leave": "A guest user has left a channel. This action contains a team identifier so that you can see which team the departing guest comes from (useful for externally shared channels).",
"guest_created": "A guest was invited to a channel. This action contains a team identifier so that you can see which team the inviting user comes from.",
"channel_moved": "A channel has been moved to a different workspace.",
"public_channel_created": "A public channel was created.",
"private_channel_created": "A private channel was created.",
"public_channel_archive": "A public channel was archived.",
"private_channel_archive": "A private channel was archived.",
"public_channel_unarchive": "A public channel was unarchived.",
"private_channel_unarchive": "A private channel was unarchived.",
"public_channel_deleted": "A public channel was deleted.",
"private_channel_deleted": "A private channel was deleted.",
"mpim_converted_to_private": "A multi-party direct message was converted to a private channel.",
"public_channel_converted_to_private": "A channel which was once public is now private.",
"channel_email_address_created": "An email forwarding address was created for a channel.",
"channel_email_address_deleted": "An email forwarding address was deleted from channel.",
"external_shared_channel_connected": "A shared channel with another workspace has been connected with this one.",
"external_shared_channel_disconnected": "A shared channel with another workspace is no longer connected with this one.",
"external_shared_channel_reconnected": "A previously connected and then disconnected shared channel with another workspace is once again shared with this one.",
"external_shared_channel_invite_sent": "An invitation to join a shared channel was sent.",
"external_shared_channel_invite_accepted": "An invitation to join a shared channel was accepted! Nice.",
"external_shared_channel_invite_approved": "An invitation to join a shared channel was approved by an admin.",
"external_shared_channel_invite_created": "An invitation url to join a shared channel was created.",
"external_shared_channel_invite_declined": "An invitation to join a shared channel was declined.",
"external_shared_channel_invite_expired": "An invitation to join a shared channel expired.",
"external_shared_channel_invite_revoked": "An invitation to join a shared channel was revoked.",
"role_change_to_owner": "A team member was made an owner.",
"role_change_to_admin": "A team member was made an admin.",
"role_change_to_user": "A team member was a user.",
"role_change_to_guest": "A team member was made a guest.",
"owner_transferred": "An owner was transferred.",
"user_created": "A team member was created.",
"user_deactivated": "A team member was deactivated.",
"user_reactivated": "A team member was reactivated after having been deactivated.",
"user_login_failed": "A team member login failed",
"guest_created": "A guest was created.",
"guest_deactivated": "A guest was deactivated.",
"guest_reactivated": "A guest was reactivated after having been deactivated.",
"guest_expiration_set": "A guest had an account expiration time set.",
"guest_expired": "A guest was deactivated when the expiration time was reached.",
"guest_expiration_cleared": "A guest had an expiration time cleared (before this time arrived).",
"user_login": "A team member logged in.",
"user_logout": "A team member logged out.",
"custom_tos_accepted": "A team member accepted a custom terms of service agreement.",
"app_approved": "On workspaces that have admin approved apps enabled, an app has been approved but not yet installed.",
"app_restricted": "On workspaces that have admin approved apps enabled, an app has been restricted and cannot be installed.",
"app_installed": "An app has been installed. If a custom integration had been disabled, this event will also be triggered if it is re-enabled.",
"app_scopes_expanded": "An app has been granted additional access to resources on a workspace, via OAuth scopes. For most apps, this requires a re-install. For workspace apps, this may also happen via the permissions API.",
"app_resources_added": "Workspace apps have the ability to request access to a specific resource on a workspace, such as a channel or a DM, including wildcard resources (such as all public channels). This event is triggered when access has been granted.",
"app_uninstalled": "A Slack app was uninstalled.",
"app_token_preserved": "An app's token was preserved instead of revoked, usually due to an app owner or installer being removed from an organization.",
"file_downloaded": "A file was downloaded.",
"file_downloaded_blocked": "A file was blocked from being downloaded.",
"file_uploaded": "A file was uploaded. This action contains a team identifier so that you can see which team the uploading user comes from (useful for externally shared channels).",
"file_public_link_created": "A public link was created for a file. This action contains a team identifier so that you can see which team the creating user comes from (useful for externally shared channels).",
"file_public_link_revoked": "A public link was revoked from a file. This action contains a team identifier so that you can see which team the revoking user comes from (useful for externally shared channels).",
"file_shared": "A file was shared in another channel.",
"workflow_created": "A workflow has been created.",
"workflow_deleted": "A workflow has been deleted.",
"workflow_published": "A workflow has been published.",
"workflow_unpublished": "A workflow has been unpublished.",
"workflow_responses_csv_download": "A user downloaded a workflow’s responses as a CSV file."
}
if action_id in action_dict.keys():
action_desc = action_dict[action_id]
event["action_description"] = action_desc
return event
def process_events(events_obj):
map_result = map(action_mapping, events_obj)
to_list = list(map_result)
element_count = len(to_list)
global global_element_count, oldest, latest
if element_count > 0:
post_status_code = post_data(json.dumps(to_list))
if post_status_code is not None:
global_element_count = global_element_count + element_count
def build_signature(customer_id, shared_key, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + str(content_length) + "\n" + content_type + "\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(customer_id,encoded_hash)
return authorization
def post_data(body):
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
signature = build_signature(customer_id, shared_key, rfc1123date, content_length, method, content_type, resource)
uri = logAnalyticsUri + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': log_type,
'x-ms-date': rfc1123date
}
response = requests.post(uri,data=body, headers=headers)
if (response.status_code >= 200 and response.status_code <= 299):
return response.status_code
else:
logging.warn("Events are not processed into Azure. Response code: {}".format(response.status_code))
return None
def get_result_request(params):
try:
r = requests.get(url=slack_uri_audit,
headers={'Accept': 'application/json',
"Authorization": "Bearer "+ slack_api_bearer_token
},
params=params)
if r.status_code == 200:
if "entries" in r.json():
result = r.json()["entries"]
if len(result) > 0:
logging.info("Processing {} events".format(len(result)))
process_events(result)
else:
logging.info("There are no entries from the output.")
#check next_page cursor
if "response_metadata" in r.json():
if "next_cursor" in r.json()["response_metadata"]:
if r.json()["response_metadata"]["next_cursor"] == "":
return None
else:
return r.json()["response_metadata"]["next_cursor"]
else:
return None
else:
return None
elif r.status_code == 401:
logging.error("The authentication credentials are incorrect or missing. Error code: {}".format(r.status_code))
elif r.status_code == 403:
logging.error("The user does not have the required permissions. Error code: {}".format(r.status_code))
else:
logging.error("Something wrong. Error code: {}".format(r.status_code))
except Exception as err:
logging.error("Something wrong. Exception error text: {}".format(err))
def generate_date():
current_time = datetime.datetime.utcnow().replace(second=0, microsecond=0) - datetime.timedelta(minutes=10)
state = StateManager(connection_string=connection_string)
past_time = state.get()
if past_time is not None:
logging.info("The last time point is: {}".format(past_time))
else:
logging.info("There is no last time point, trying to get events for last hour.")
past_time = (current_time - datetime.timedelta(minutes=60)).strftime("%s")
state.post(current_time.strftime("%s"))
return (past_time, current_time.strftime("%s"))
def main(mytimer: func.TimerRequest) -> None:
if mytimer.past_due:
logging.info('The timer is past due!')
logging.info('Starting program')
global global_element_count
global_element_count = 0
oldest, latest = generate_date()
logging.info("Start processing events to Azure Sentinel. Time period: from {} to {}.".format(datetime.datetime.fromtimestamp(int(oldest)).strftime("%Y-%m-%dT%H:%M:%SZ"),
datetime.datetime.fromtimestamp(int(latest)).strftime("%Y-%m-%dT%H:%M:%SZ")))
params = {
"limit": offset_limit,
"oldest": oldest,
"latest": latest
}
next_cursor = get_result_request(params)
while next_cursor is not None:
params = {
"limit": offset_limit,
"oldest": oldest,
"latest": latest,
"cursor": next_cursor
}
next_cursor = get_result_request(params)
logging.info("Processed {} events to Azure Sentinel. Time period: from {} to {}.".format(global_element_count, datetime.datetime.fromtimestamp(int(oldest)).strftime("%Y-%m-%dT%H:%M:%SZ"),
datetime.datetime.fromtimestamp(int(latest)).strftime("%Y-%m-%dT%H:%M:%SZ")))
|
1692282
|
import pytest
from sitri import Sitri
from sitri.providers.contrib.system import SystemConfigProvider
@pytest.fixture(scope="module")
def test_sitri():
return Sitri(config_provider=SystemConfigProvider(prefix="test"))
|
1692294
|
class Stack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.q1 = []
self.q2 = []
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.q1.append(x)
def pop(self):
"""
:rtype: nothing
"""
while len(self.q1) > 1:
item = self.q1.pop(0)
self.q2.append(item)
val = self.q1.pop(0)
self.q1, self.q2 = self.q2, self.q1
return val
def top(self):
"""
:rtype: int
"""
while len(self.q1) > 1:
item = self.q1.pop(0)
self.q2.append(item)
val = self.q1.pop(0)
self.q2.append(val)
self.q1, self.q2 = self.q2, self.q1
return val
def empty(self):
"""
:rtype: bool
"""
return len(self.q1) == 0
|
1692325
|
from functools import wraps
from flask_login import login_required
from werkzeug.exceptions import abort
from sarna.model.enums import UserType
valid_auditors = {UserType.manager, UserType.trusted_auditor, UserType.auditor}
valid_trusted = {UserType.manager, UserType.trusted_auditor}
valid_managers = {UserType.manager}
valid_admins = {UserType.admin}
def admin_required(func):
from sarna.core.auth import current_user
needs_accounts = valid_admins
setattr(func, 'needs_accounts', needs_accounts)
@wraps(func)
@login_required
def decorated_view(*args, **kwargs):
if current_user.user_type not in needs_accounts:
abort(403)
else:
return func(*args, **kwargs)
return decorated_view
def manager_required(func):
from sarna.core.auth import current_user
needs_accounts = valid_managers
setattr(func, 'needs_accounts', needs_accounts)
@wraps(func)
@login_required
def decorated_view(*args, **kwargs):
if current_user.user_type not in needs_accounts:
abort(403)
else:
return func(*args, **kwargs)
return decorated_view
def trusted_required(func):
from sarna.core.auth import current_user
needs_accounts = valid_trusted
setattr(func, 'needs_accounts', needs_accounts)
@wraps(func)
@login_required
def decorated_view(*args, **kwargs):
if current_user.user_type not in needs_accounts:
abort(403)
else:
return func(*args, **kwargs)
return decorated_view
def auditor_required(func):
from sarna.core.auth import current_user
needs_accounts = valid_auditors
setattr(func, 'needs_accounts', needs_accounts)
@wraps(func)
@login_required
def decorated_view(*args, **kwargs):
if current_user.user_type not in needs_accounts:
abort(403)
else:
return func(*args, **kwargs)
return decorated_view
|
1692362
|
import zipfile
import sys
import os
import ase.io
from datetime import datetime
def extract_file(zipname, file_to_unzip, extract_to):
with zipfile.ZipFile(zipname, 'r') as traj_zip:
traj_zip.extract(file_to_unzip, extract_to)
def main():
"""
Given a directory containing adsorbate subdirectories, loops through all
runs and merges intermediate checkpoints into a single, full trajectory.
"""
#TODO: Impove efficiency for when dealing with larger systems
root_dir = sys.argv[1]
all_adsorbates = os.listdir(root_dir)
for adsorbate in all_adsorbates:
try:
# Check if directory is an adsorbate
int(adsorbate)
except Exception:
break
runs = os.listdir(os.path.join(root_dir, adsorbate))
for run in runs:
specific_names = os.listdir(os.path.join(root_dir, adsorbate, run))
# Redundant directory?
for fair_name in specific_names:
current_dir = os.path.join(root_dir, adsorbate, run, fair_name)
checkpoint_dir = os.path.join(current_dir, "checkpoints")
ordered_files = []
if os.path.isdir(checkpoint_dir):
pass
else:
print(current_dir)
if os.path.isdir(checkpoint_dir):
# Sort checkpoint files
checkpoint_files = os.listdir(checkpoint_dir)
sorted_checkpoints = sorted(
checkpoint_files,
key=lambda x: datetime.strptime(checkpoint_files[0][11:-4],
"%Y-%m-%dT%H:%M:%S.%f"))
for idx, checkpoint in enumerate(sorted_checkpoints):
# Extract vasprun.xml file from each checkpoint file
cp_name = checkpoint_dir+f"/{checkpoint}"
extract_file(cp_name, "vasprun.xml", current_dir)
saved_name = current_dir+f"/checkpoint_{idx}"
os.rename(current_dir+"/vasprun.xml", saved_name)
ordered_files.append(saved_name)
# Extract vasprun.xml file from final checkpoint
final_name = current_dir+"/relaxation_outputs.zip"
extract_file(final_name, "vasprun.xml", current_dir)
saved_name = current_dir+f"/checkpoint_{idx+1}"
os.rename(current_dir+"/vasprun.xml", saved_name)
# Read xml files and construct full ase trajectory file
ordered_files.append(saved_name)
for idx, traj in enumerate(ordered_files):
if idx==0:
full_traj = ase.io.read(filename=traj, index=':', format='vasp-xml')
else:
full_traj += ase.io.read(filename=traj, index='1:', format='vasp-xml')
if idx==len(ordered_files)-1:
ase.io.write(current_dir+f"/{fair_name}_{adsorbate}_full.traj", full_traj)
os.remove(traj)
else:
# No checkpoint run
# Read xml file and construct ase trajectory
final_name = current_dir+"/relaxation_outputs.zip"
extract_file(final_name, "vasprun.xml", current_dir)
full_traj = ase.io.read(filename=current_dir+"/vasprun.xml",
index=':', format='vasp-xml')
ase.io.write(current_dir+f"/{fair_name}_{adsorbate}_full.traj",
full_traj)
os.remove(current_dir+'/vasprun.xml')
if __name__ == "__main__":
main()
|
1692366
|
import numpy as np
import cv2
segmentation_colors = np.array([[0, 0, 0],
[255, 191, 0],
[192, 67, 251]], dtype=np.uint8)
detection_color = (191, 255, 0)
label = "car"
ORIGINAL_HORIZON_POINTS = np.float32([[571, 337], [652, 337]])
num_horizon_points = 0
new_horizon_points = []
def util_draw_seg(seg_map, image, alpha = 0.5):
# Convert segmentation prediction to colors
color_segmap = cv2.resize(image, (seg_map.shape[1], seg_map.shape[0]))
color_segmap[seg_map>0] = segmentation_colors[seg_map[seg_map>0]]
# Resize to match the image shape
color_segmap = cv2.resize(color_segmap, (image.shape[1],image.shape[0]))
# Fuse both images
if(alpha == 0):
combined_img = np.hstack((image, color_segmap))
else:
combined_img = cv2.addWeighted(image, alpha, color_segmap, (1-alpha),0)
return combined_img
# Ref: https://github.com/datvuthanh/HybridNets/blob/d43b0aa8de2a1d3280084270d29cf4c7abf640ae/utils/plot.py#L52
def util_draw_detections(boxes, scores, image, text=True):
tl = int(round(0.0015 * max(image.shape[0:2]))) # line thickness
tf = max(tl, 1) # font thickness
for box, score in zip(boxes, scores):
c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
cv2.rectangle(image, c1, c2, detection_color, thickness=tl)
if text:
s_size = cv2.getTextSize(str('{:.0%}'.format(score)), 0, fontScale=float(tl) / 3, thickness=tf)[0]
t_size = cv2.getTextSize(label, 0, fontScale=float(tl) / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0] + s_size[0] + 15, c1[1] - t_size[1] - 3
cv2.rectangle(image, c1, c2, detection_color, -1) # filled
cv2.putText(image, '{}: {:.0%}'.format(label, score), (c1[0], c1[1] - 2), 0, float(tl) / 3, [0, 0, 0],
thickness=tf, lineType=cv2.FONT_HERSHEY_SIMPLEX)
return image
def util_draw_bird_eye_view(seg_map, hoizon_points=ORIGINAL_HORIZON_POINTS):
img_h, img_w = seg_map.shape[:2]
bird_eye_view_w, bird_eye_view_h = (img_h, img_h)
offset = bird_eye_view_w/2.5
bird_eye_view_points = np.float32([[offset, bird_eye_view_h], [bird_eye_view_w - offset, bird_eye_view_h],
[offset, 0], [bird_eye_view_w - offset, 0]])
image_points = np.vstack((np.float32([[0, img_h], [img_w, img_h]]), hoizon_points))
M = cv2.getPerspectiveTransform(image_points, bird_eye_view_points)
bird_eye_seg_map = cv2.warpPerspective(seg_map, M, (bird_eye_view_w, bird_eye_view_h))
return bird_eye_seg_map
# Ref: https://github.com/datvuthanh/HybridNets/blob/d43b0aa8de2a1d3280084270d29cf4c7abf640ae/utils/utils.py#L615
def transform_boxes(boxes, anchors):
y_centers_a = (anchors[:, 0] + anchors[:, 2]) / 2
x_centers_a = (anchors[:, 1] + anchors[:, 3]) / 2
ha = anchors[:, 2] - anchors[:, 0]
wa = anchors[:, 3] - anchors[:, 1]
w = np.exp(boxes[:, 3]) * wa
h = np.exp(boxes[:, 2]) * ha
y_centers = boxes[:, 0] * ha + y_centers_a
x_centers = boxes[:, 1] * wa + x_centers_a
ymin = y_centers - h / 2.
xmin = x_centers - w / 2.
ymax = y_centers + h / 2.
xmax = x_centers + w / 2.
return np.vstack((xmin, ymin, xmax, ymax)).T
# Ref: https://python-ai-learn.com/2021/02/14/nmsfast/
def iou_np(box, boxes, area, areas):
x_min = np.maximum(box[0], boxes[:,0])
y_min = np.maximum(box[1], boxes[:,1])
x_max = np.minimum(box[2], boxes[:,2])
y_max = np.minimum(box[3], boxes[:,3])
w = np.maximum(0, x_max - x_min + 1)
h = np.maximum(0, y_max - y_min + 1)
intersect = w*h
iou_np = intersect / (area + areas - intersect)
return iou_np
# Ref: https://python-ai-learn.com/2021/02/14/nmsfast/
def nms_fast(bboxes, scores, iou_threshold=0.5):
areas = (bboxes[:,2] - bboxes[:,0] + 1) \
* (bboxes[:,3] - bboxes[:,1] + 1)
sort_index = np.argsort(scores)
i = -1
while(len(sort_index) >= 1 - i):
max_scr_ind = sort_index[i]
ind_list = sort_index[:i]
iou = iou_np(bboxes[max_scr_ind], bboxes[ind_list], \
areas[max_scr_ind], areas[ind_list])
del_index = np.where(iou >= iou_threshold)
sort_index = np.delete(sort_index, del_index)
i -= 1
bboxes = bboxes[sort_index]
scores = scores[sort_index]
return bboxes, scores
def get_horizon_points(image):
cv2.namedWindow("Get horizon points", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("Get horizon points", get_horizon_point)
# Draw horizontal line
image = cv2.line(image, (0,image.shape[0]//2),
(image.shape[1],image.shape[0]//2),
(0, 0, 251), 1)
cv2.imshow("Get horizon points", image)
num_lines = 0
while True:
if (num_lines == 0) and (num_horizon_points == 1):
image = cv2.line(image, (0,image.shape[0]),
(new_horizon_points[0][0], new_horizon_points[0][1]),
(192, 67, 251), 3)
image = cv2.circle(image, (new_horizon_points[0][0], new_horizon_points[0][1]),
5, (251, 191, 67), -1)
cv2.imshow("Get horizon points", image)
num_lines += 1
elif(num_lines == 1) and (num_horizon_points == 2):
image = cv2.line(image, (image.shape[1],image.shape[0]),
(new_horizon_points[1][0], new_horizon_points[1][1]),
(192, 67, 251), 3)
image = cv2.circle(image, (new_horizon_points[1][0], new_horizon_points[1][1]),
5, (251, 191, 67), -1)
cv2.imshow("Get horizon points", image)
num_lines += 1
break
cv2.waitKey(100)
cv2.waitKey(1000)
cv2.destroyWindow("Get horizon points")
horizon_points = np.float32(new_horizon_points)
print(f"horizon_points = np.{repr(horizon_points)}")
return horizon_points
def get_horizon_point(event,x,y,flags,param):
global num_horizon_points, new_horizon_points
if event == cv2.EVENT_LBUTTONDBLCLK:
new_horizon_points.append([x,y])
num_horizon_points += 1
|
1692404
|
import io
import glob
import os
import pytest
from readme_renderer.markdown import render, variants
MD_FIXTURES = [
(fn, os.path.splitext(fn)[0] + ".html", variant)
for variant in variants
for fn in glob.iglob(
os.path.join(
os.path.dirname(__file__),
"fixtures",
"test_" + variant + "*.md"
)
)
]
@pytest.mark.parametrize(
("md_filename", "html_filename", "variant"),
MD_FIXTURES,
)
def test_md_fixtures(md_filename, html_filename, variant):
# Get our Markup
with io.open(md_filename, encoding='utf-8') as f:
md_markup = f.read()
# Get our expected
with io.open(html_filename, encoding="utf-8") as f:
expected = f.read()
assert render(md_markup, variant=variant) == expected
def test_missing_variant():
assert render('Hello', variant="InvalidVariant") is None
|
1692407
|
import unittest
from katas.kyu_7.rule_of_divisibility_by_7 import seven
class SevenTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(seven(483), (42, 1))
def test_equals_2(self):
self.assertEqual(seven(371), (35, 1))
def test_equals_3(self):
self.assertEqual(seven(1603), (7, 2))
def test_equals_4(self):
self.assertEqual(seven(477557101), (28, 7))
def test_equals_5(self):
self.assertEqual(seven(0), (0, 0))
|
1692415
|
import os
import sys
import pytest
from sonic_platform_base.sonic_sfp import sfputilhelper
@pytest.fixture(scope="class")
def setup_class(request):
# Configure the setup
test_dir = os.path.dirname(os.path.realpath(__file__))
request.cls.port_config_file = os.path.join(test_dir, 'port_config.ini')
@pytest.mark.usefixtures("setup_class")
class TestSfpUtilHelper(object):
port_config_file = None
def test_read_port_mappings(self):
PORT_LIST = [
"Ethernet0",
"Ethernet4",
"Ethernet8",
"Ethernet12",
"Ethernet16",
"Ethernet20",
"Ethernet24",
"Ethernet28",
"Ethernet32",
"Ethernet36",
"Ethernet40",
"Ethernet44",
"Ethernet48"
]
sfputil_helper = sfputilhelper.SfpUtilHelper()
sfputil_helper.read_porttab_mappings(self.port_config_file, 0)
logical_port_list = sfputil_helper.logical
assert len(logical_port_list) == len(PORT_LIST)
for logical_port_name in logical_port_list:
assert logical_port_name in PORT_LIST
|
1692432
|
from tqdm import tqdm
class DLProgress(tqdm):
""" Class to show progress on dataset download """
# Progress bar code adapted from a Udacity machine learning project.
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
|
1692458
|
from kafka.structs import TopicPartition
from typing import Dict
from typing import List
from typing import Optional
class KafkaConsumer:
def __init__(
self,
bootstrap_servers: List[str],
enable_auto_commit: Optional[bool] = True,
group_id: Optional[str] = None,
):
...
def assign(self, parts: List[TopicPartition]) -> None:
...
def seek_to_beginning(self, partition: TopicPartition) -> None:
...
def end_offsets(self, parts: List[TopicPartition]) -> Dict[TopicPartition, int]:
...
class KafkaClient:
topic_partitions: Dict[str, List[int]]
def close(self) -> None:
...
def topics(self) -> List[str]:
...
|
1692459
|
from db import connect
from db.model import Basic, Decision
from sqlalchemy import and_
class DecisionFactory:
@staticmethod
def get_by_user(user_id):
"""
Get by user id
:param int user_id: user id
:return: decisions
:rtype: list[Decision]
"""
session = connect()
columns = [
Decision,
Basic.title_id,
Basic.title_type,
Basic.description,
Basic.image_url
]
query = session \
.query(*columns) \
.join(Basic, Decision.title_id == Basic.title_id) \
.filter(Decision.user_id == user_id)
try:
return query.all()
finally:
session.close()
@staticmethod
def save(decision):
"""
Save decision
:param Decision decision: decision object
"""
session = connect()
try:
session.add(decision)
session.commit()
finally:
session.close()
@staticmethod
def remove(user_id, title_id):
"""
Remove decision
:param int user_id: user id
:param str title_id: title id
"""
session = connect()
filters = [
Decision.user_id == user_id,
Decision.title_id == title_id
]
try:
session.query(Decision).filter(and_(*filters)).delete()
session.commit()
finally:
session.close()
|
1692469
|
import scipy.ndimage as scnd
import scipy.optimize as sio
import numpy as np
import stemtool as st
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib_scalebar.scalebar as mpss
import matplotlib.offsetbox as mploff
import matplotlib.gridspec as mpgs
import matplotlib as mpl
class atomic_dpc(object):
"""
Atomic Resolution DPC estimation
Parameters
----------
Data_4D: ndarray
Four-dimensional dataset where the first two
dimensions are real space scanning dimensions,
while the last two dimenions are the Fourier
space electron diffraction patterns
Data_ADF: ndarray
Simultaneously collected two-dimensional ADF-STEM
image
calib_pm: float
Real space pixel calibration in picometers
voltage: float
Microscope accelerating voltage in kV
aperture: float
The probe forming condenser aperture in milliradians
Notes
-----
This class function takes in a 4D-STEM image, and a simultaneously
collected atomic resolution ADF-STEM image. Based on the accelerating
voltage and the condenser aperture this calculates the center of mass
(C.O.M.) shifts in the central undiffracted beam. Using the idea that
the curl of the beam shift vectors, should be minimized at the correct
Fourier rotation angles, this class also corrects for rotation of the
collceted 4D-STEM data with respect to the optic axis. Using these, a
correct potential accumulation and charge accumulation maps could be
built. To prevent errors, we convert everything to SI units first.
Examples
--------
Run as:
>>> DPC = st.dpc.atomic_dpc(Data_4D, DataADF, calibration, voltage, aper)
Once the data is loaded, the ADF-STEM and the BF-STEM images could be
visualized as:
>>> DPC.show_ADF_BF()
Then the following call generates the mean CBED image, and if the show_image
call is True, shows the mean image.
>>> DPC.get_cbed(show_image = True)
The initial uncorrected DPC shifts are generated as:
>>> DPC.initial_dpc()
The corrected DPC shifts are generated:
>>> DPC.correct_dpc()
The charge map is generated through:
>>> DPC.show_charge()
While the potential map is generated though:
>>> DPC.show_potential()
If a section of the image needs to be observed, to visualize the beam shifts,
call the following:
>>> DPC.plot_color_dpc()
References
----------
.. [1] <NAME>. et al. "Atomic electric fields revealed by a quantum mechanical
approach to electron picodiffraction". Nat. Commun. 5:565303 doi: 10.1038/ncomms6653 (2014)
.. [2] Savitzky, <NAME>., <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME> et al. "py4DSTEM: a software package for
multimodal analysis of four-dimensional scanning transmission electron microscopy datasets."
arXiv preprint arXiv:2003.09523 (2020).
.. [3] Ishizuka, Akimitsu, <NAME>, <NAME>, <NAME>,
and <NAME>. "Boundary-artifact-free determination of
potential distribution from differential phase contrast signals."
Microscopy 66, no. 6 (2017): 397-405.
"""
def __init__(self, Data_4D, Data_ADF, calib_pm, voltage, aperture):
"""
Load the user defined values.
It also calculates the wavelength based on the accelerating voltage
This also loads several SI constants as the following attributes
`planck`: The Planck's constant
`epsilon0`: The dielectric permittivity of free space
`e_charge`: The charge of an electron in Coulombs
"""
self.data_adf = Data_ADF
self.data_4D = Data_4D
self.calib = calib_pm
self.voltage = voltage * 1000 # convert to volts
self.wavelength = st.sim.wavelength_ang(voltage) * (
10 ** (-10)
) # convert to meters
self.aperture = aperture / 1000 # convert to radians
self.planck = 6.62607004 * (10 ** (-34))
self.epsilon0 = 8.85418782 * (10 ** (-12))
self.e_charge = (-1) * 1.60217662 * (10 ** (-19))
e_mass = 9.109383 * (10 ** (-31))
c = 299792458
self.sigma = (
(2 * np.pi / (self.wavelength * self.voltage))
* ((e_mass * (c ** 2)) + (self.e_charge * self.voltage))
) / ((2 * e_mass * (c ** 2)) + (self.e_charge * self.voltage))
def show_ADF_BF(self, imsize=(20, 10)):
"""
The ADF-STEM image is already loaded, while the `data_bf`
attribute is obtained by summing up the 4D-STEM dataset along it's
Fourier dimensions. This is also a great checkpoint to see whether
the ADF-STEM and the BF-STEM images are the inverse of each other.
"""
self.data_bf = np.sum(self.data_4D, axis=(-1, -2))
fontsize = int(np.amax(np.asarray(imsize)))
plt.figure(figsize=imsize)
plt.subplot(1, 2, 1)
plt.imshow(self.data_adf, cmap="inferno")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 0
scalebar.color = "w"
plt.gca().add_artist(scalebar)
plt.axis("off")
at = mploff.AnchoredText(
"ADF-STEM", prop=dict(size=fontsize), frameon=True, loc="lower left"
)
at.patch.set_boxstyle("round, pad=0., rounding_size=0.2")
plt.gca().add_artist(at)
plt.subplot(1, 2, 2)
plt.imshow(self.data_bf, cmap="inferno")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 0
scalebar.color = "w"
plt.gca().add_artist(scalebar)
plt.axis("off")
at = mploff.AnchoredText(
"Summed 4D-STEM", prop=dict(size=fontsize), frameon=True, loc="lower left"
)
at.patch.set_boxstyle("round, pad=0., rounding_size=0.2")
plt.gca().add_artist(at)
plt.tight_layout()
def get_cbed(self, imsize=(15, 15), show_image=False):
"""
We calculate the mean CBED pattern by averaging the Fourier data, to
get the object attribute `cbed`. We fit this with a circle function to
obtain the object attributes:
`beam_x`: x-coordinates of the circle
`beam_y`: y-coordinates of the circle
`beam_r`: radius of the circle
We use the calculated radius and the known aperture size to get the Fourier
space calibration, which is stored as the `inverse` attribute
"""
self.cbed = np.mean(self.data_4D, axis=(0, 1))
self.beam_x, self.beam_y, self.beam_r = st.util.sobel_circle(self.cbed)
self.inverse = self.aperture / (self.beam_r * self.wavelength)
if show_image:
plt.figure(figsize=imsize)
plt.imshow(self.cbed, cmap="inferno")
scalebar = mpss.ScaleBar(self.inverse, "1/m", mpss.SI_LENGTH_RECIPROCAL)
scalebar.location = "lower right"
scalebar.box_alpha = 1
scalebar.color = "k"
plt.gca().add_artist(scalebar)
plt.axis("off")
def initial_dpc(self, imsize=(30, 17), normalize=True):
"""
This calculates the initial DPC center of mass shifts by measuring
the center of mass of each image in the 4D-STEM dataset, and then
comparing that center of mass with the average disk center of the
entire dataset.
"""
qq, pp = np.mgrid[0 : self.data_4D.shape[-1], 0 : self.data_4D.shape[-2]]
yy, xx = np.mgrid[0 : self.data_4D.shape[0], 0 : self.data_4D.shape[1]]
yy = np.ravel(yy)
xx = np.ravel(xx)
self.YCom = np.empty(self.data_4D.shape[0:2], dtype=np.float)
self.XCom = np.empty(self.data_4D.shape[0:2], dtype=np.float)
for ii in range(len(yy)):
pattern = self.data_4D[yy[ii], xx[ii], :, :]
self.YCom[yy[ii], xx[ii]] = self.inverse * (
(np.sum(np.multiply(qq, pattern)) / np.sum(pattern)) - self.beam_y
)
self.XCom[yy[ii], xx[ii]] = self.inverse * (
(np.sum(np.multiply(pp, pattern)) / np.sum(pattern)) - self.beam_x
)
if normalize:
self.YCom = self.YCom - np.mean(self.YCom)
self.XCom = self.XCom - np.mean(self.XCom)
vm = (np.amax(np.abs(np.concatenate((self.XCom, self.YCom), axis=1)))) / (
10 ** 9
)
fontsize = int(0.9 * np.amax(np.asarray(imsize)))
sc_font = {"weight": "bold", "size": fontsize}
plt.figure(figsize=imsize)
gs = mpgs.GridSpec(imsize[1], imsize[0])
ax1 = plt.subplot(gs[0:15, 0:15])
ax2 = plt.subplot(gs[0:15, 15:30])
ax3 = plt.subplot(gs[15:17, :])
ax1.imshow(self.XCom / (10 ** 9), vmin=-vm, vmax=vm, cmap="RdBu_r")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 1
scalebar.color = "k"
ax1.add_artist(scalebar)
at = mploff.AnchoredText(
"Shift in X direction",
prop=dict(size=fontsize),
frameon=True,
loc="upper left",
)
at.patch.set_boxstyle("round, pad= 0., rounding_size= 0.2")
ax1.add_artist(at)
ax1.axis("off")
ax2.imshow(self.YCom / (10 ** 9), vmin=-vm, vmax=vm, cmap="RdBu_r")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 1
scalebar.color = "k"
ax2.add_artist(scalebar)
at = mploff.AnchoredText(
"Shift in Y direction",
prop=dict(size=fontsize),
frameon=True,
loc="upper left",
)
at.patch.set_boxstyle("round, pad= 0., rounding_size= 0.2")
ax2.add_artist(at)
ax2.axis("off")
sb = np.zeros((10, 1000), dtype=np.float)
for ii in range(10):
sb[ii, :] = np.linspace(-vm, vm, 1000)
ax3.imshow(sb, cmap="RdBu_r")
ax3.yaxis.set_visible(False)
x1 = np.linspace(0, 1000, 8)
ax3.set_xticks(x1)
ax3.set_xticklabels(np.round(np.linspace(-vm, vm, 8), 2))
for axis in ["top", "bottom", "left", "right"]:
ax3.spines[axis].set_linewidth(2)
ax3.spines[axis].set_color("black")
ax3.xaxis.set_tick_params(width=2, length=6, direction="out", pad=10)
ax3.set_title(r"$\mathrm{Beam\: Shift\: \left(nm^{-1}\right)}$", **sc_font)
plt.tight_layout()
def correct_dpc(self, imsize=(30, 17)):
"""
This corrects for the rotation angle of the pixellated detector
with respect to the optic axis. Some pixellated detectors flip
the image, and if there is an image flip, it corrects it too.
The mechanism of this, we compare the gradient of both the flipped
and the unflipped DPC data at multiple rotation angles, and the value
that has the highest relative contrast with the ADF-STEM image is taken
as 90 degrees from the correct angle.
"""
flips = np.zeros(4, dtype=bool)
flips[2:4] = True
chg_sums = np.zeros(4, dtype=self.XCom.dtype)
angles = np.zeros(4, dtype=self.YCom.dtype)
x0 = 90
for ii in range(2):
to_flip = flips[2 * ii]
if to_flip:
xdpcf = np.flip(self.XCom)
else:
xdpcf = self.XCom
rho_dpc, phi_dpc = st.dpc.cart2pol(self.XCom, self.YCom)
x = sio.minimize(st.dpc.angle_fun, x0, args=(rho_dpc, phi_dpc))
min_x = x.x
sol1 = min_x - 90
sol2 = min_x + 90
chg_sums[int(2 * ii)] = np.sum(
st.dpc.charge_dpc(xdpcf, self.YCom, sol1) * self.data_adf
)
chg_sums[int(2 * ii + 1)] = np.sum(
st.dpc.charge_dpc(xdpcf, self.YCom, sol2) * self.data_adf
)
angles[int(2 * ii)] = sol1
angles[int(2 * ii + 1)] = sol2
self.angle = (-1) * angles[chg_sums == np.amin(chg_sums)][0]
self.final_flip = flips[chg_sums == np.amin(chg_sums)][0]
if self.final_flip:
xdpcf = np.fliplr(self.XCom)
else:
xdpcf = np.copy(self.XCom)
rho_dpc, phi_dpc = st.dpc.cart2pol(xdpcf, self.YCom)
self.XComC, self.YComC = st.dpc.pol2cart(
rho_dpc, (phi_dpc - (self.angle * ((np.pi) / 180)))
)
vm = (np.amax(np.abs(np.concatenate((self.XComC, self.YComC), axis=1)))) / (
10 ** 9
)
fontsize = int(0.9 * np.max(imsize))
sc_font = {"weight": "bold", "size": fontsize}
plt.figure(figsize=imsize)
gs = mpgs.GridSpec(imsize[1], imsize[0])
ax1 = plt.subplot(gs[0:15, 0:15])
ax2 = plt.subplot(gs[0:15, 15:30])
ax3 = plt.subplot(gs[15:17, :])
ax1.imshow(self.XComC / (10 ** 9), vmin=-vm, vmax=vm, cmap="RdBu_r")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 1
scalebar.color = "k"
ax1.add_artist(scalebar)
at = mploff.AnchoredText(
"Corrected shift in X direction",
prop=dict(size=fontsize),
frameon=True,
loc="upper left",
)
at.patch.set_boxstyle("round, pad= 0., rounding_size= 0.2")
ax1.add_artist(at)
ax1.axis("off")
ax2.imshow(self.YComC / (10 ** 9), vmin=-vm, vmax=vm, cmap="RdBu_r")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 1
scalebar.color = "k"
ax2.add_artist(scalebar)
at = mploff.AnchoredText(
"Corrected shift in Y direction",
prop=dict(size=fontsize),
frameon=True,
loc="upper left",
)
at.patch.set_boxstyle("round, pad= 0., rounding_size= 0.2")
ax2.add_artist(at)
ax2.axis("off")
sb = np.zeros((10, 1000), dtype=np.float)
for ii in range(10):
sb[ii, :] = np.linspace(-vm, vm, 1000)
ax3.imshow(sb, cmap="RdBu_r")
ax3.yaxis.set_visible(False)
x1 = np.linspace(0, 1000, 8)
ax3.set_xticks(x1)
ax3.set_xticklabels(np.round(np.linspace(-vm, vm, 8), 2))
for axis in ["top", "bottom", "left", "right"]:
ax3.spines[axis].set_linewidth(2)
ax3.spines[axis].set_color("black")
ax3.xaxis.set_tick_params(width=2, length=6, direction="out", pad=10)
ax3.set_title(r"$\mathrm{Beam\: Shift\: \left(nm^{-1}\right)}$", **sc_font)
plt.tight_layout()
self.MomentumX = self.planck * self.XComC
self.MomentumY = self.planck * self.YComC
# assuming infinitely thin sample
self.e_fieldX = self.MomentumX / self.e_charge
self.e_fieldY = self.MomentumY / self.e_charge
def show_charge(self, imsize=(15, 17)):
"""
We calculate the charge from the corrected DPC
center of mass datasets. This is done through
Poisson's equation.
"""
fontsize = int(np.amax(np.asarray(imsize)))
# Use Poisson's equation
self.charge = (
(
(np.gradient(self.e_fieldX)[1] + np.gradient(self.e_fieldY)[0])
* (self.calib * (10 ** (-12)))
)
* self.epsilon0
* 4
* np.pi
)
cm = np.amax(np.abs(self.charge))
plt.figure(figsize=imsize)
fontsize = int(0.9 * np.max(imsize))
sc_font = {"weight": "bold", "size": fontsize}
gs = mpgs.GridSpec(imsize[1], imsize[0])
ax1 = plt.subplot(gs[0:15, 0:15])
ax2 = plt.subplot(gs[15:17, :])
ax1.imshow(self.charge, vmin=-cm, vmax=cm, cmap="RdBu_r")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 1
scalebar.color = "k"
ax1.add_artist(scalebar)
ax1.axis("off")
at = mploff.AnchoredText(
"Charge from DPC", prop=dict(size=fontsize), frameon=True, loc="lower left"
)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax1.add_artist(at)
sb = np.zeros((10, 1000), dtype=np.float)
for ii in range(10):
sb[ii, :] = np.linspace(cm / self.e_charge, -(cm / self.e_charge), 1000)
ax2.imshow(sb, cmap="RdBu_r")
ax2.yaxis.set_visible(False)
no_labels = 7
x1 = np.linspace(0, 1000, no_labels)
ax2.set_xticks(x1)
ax2.set_xticklabels(
np.round(
np.linspace(cm / self.e_charge, -(cm / self.e_charge), no_labels), 6
)
)
for axis in ["top", "bottom", "left", "right"]:
ax2.spines[axis].set_linewidth(2)
ax2.spines[axis].set_color("black")
ax2.xaxis.set_tick_params(width=2, length=6, direction="out", pad=10)
ax2.set_title(r"$\mathrm{Charge\: Density\: \left(e^{-} \right)}$", **sc_font)
plt.tight_layout()
def show_potential(self, imsize=(15, 17)):
"""
Calculate the projected potential from the DPC measurements.
This is accomplished by calculating the phase shift iteratively
from the normalized center of mass shifts. Normalization means
calculating COM shifts in inverse length units and then multiplying
them with the electron wavelength to get an electron independent
mrad shift, which is used to generate the phase. This phase is
proportional to the projected potential for weak phase object
materials (with *lots* of assumptions)
"""
fontsize = int(np.amax(np.asarray(imsize)))
self.phase = st.dpc.integrate_dpc(
self.XComC * self.wavelength, self.YComC * self.wavelength
)
self.potential = self.phase / self.sigma
pm = np.amax(np.abs(self.potential)) * (10 ** 10)
plt.figure(figsize=imsize)
fontsize = int(0.9 * np.max(imsize))
sc_font = {"weight": "bold", "size": fontsize}
gs = mpgs.GridSpec(imsize[1], imsize[0])
ax1 = plt.subplot(gs[0:15, 0:15])
ax2 = plt.subplot(gs[15:17, :])
ax1.imshow(self.potential * (10 ** 10), vmin=-pm, vmax=pm, cmap="RdBu_r")
scalebar = mpss.ScaleBar(self.calib / 1000, "nm")
scalebar.location = "lower right"
scalebar.box_alpha = 1
scalebar.color = "k"
ax1.add_artist(scalebar)
ax1.axis("off")
at = mploff.AnchoredText(
"Calculated projected potential from DPC phase",
prop=dict(size=fontsize),
frameon=True,
loc="lower left",
)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax1.add_artist(at)
sb = np.zeros((10, 1000), dtype=np.float)
for ii in range(10):
sb[ii, :] = np.linspace(-pm, pm, 1000)
ax2.imshow(sb, cmap="RdBu_r")
ax2.yaxis.set_visible(False)
no_labels = 7
x1 = np.linspace(0, 1000, no_labels)
ax2.set_xticks(x1)
ax2.set_xticklabels(np.round(np.linspace(-pm, pm, no_labels), 6))
for axis in ["top", "bottom", "left", "right"]:
ax2.spines[axis].set_linewidth(2)
ax2.spines[axis].set_color("black")
ax2.xaxis.set_tick_params(width=2, length=6, direction="out", pad=10)
ax2.set_title(r"Projected Potential (VÅ)", **sc_font)
plt.tight_layout()
def plot_color_dpc(self, start_frac=0, size_frac=1, skip=2, imsize=(20, 10)):
"""
Use this to plot the corrected DPC center of mass shifts. If no variables
are passed, the arrows are overlaid on the entire image.
Parameters
----------
start_frac: float, optional
The starting fraction of the image, where you will cut from
to show the overlaid arrows. Default is 0
stop_frac: float, optional
The ending fraction of the image, where you will cut from
to show the overlaid arrows. Default is 1
"""
fontsize = int(np.amax(np.asarray(imsize)))
sc_font = {"weight": "bold", "size": fontsize}
mpl.rc("font", **sc_font)
cc = self.XComC + ((1j) * self.YComC)
cc_color = st.util.cp_image_val(cc)
cutstart = (np.asarray(self.XComC.shape) * start_frac).astype(int)
cut_stop = (np.asarray(self.XComC.shape) * (start_frac + size_frac)).astype(int)
ypos, xpos = np.mgrid[0 : self.YComC.shape[0], 0 : self.XComC.shape[1]]
ypos = ypos
xcut = xpos[cutstart[0] : cut_stop[0], cutstart[1] : cut_stop[1]]
ycut = np.flipud(ypos[cutstart[0] : cut_stop[0], cutstart[1] : cut_stop[1]])
dx = self.XComC[cutstart[0] : cut_stop[0], cutstart[1] : cut_stop[1]]
dy = self.YComC[cutstart[0] : cut_stop[0], cutstart[1] : cut_stop[1]]
cc_cut = cc_color[cutstart[0] : cut_stop[0], cutstart[1] : cut_stop[1]]
overlay = mpl.patches.Rectangle(
cutstart[0:2],
cut_stop[0] - cutstart[0],
cut_stop[1] - cutstart[1],
linewidth=1.5,
edgecolor="w",
facecolor="none",
)
plt.figure(figsize=imsize)
plt.subplot(1, 2, 1)
plt.imshow(cc_color)
scalebar = mpss.ScaleBar(self.calib, "pm")
scalebar.location = "lower right"
scalebar.box_alpha = 0
scalebar.color = "w"
plt.gca().add_artist(scalebar)
plt.axis("off")
at = mploff.AnchoredText(
"Center of Mass Shift",
prop=dict(size=fontsize),
frameon=True,
loc="lower left",
)
at.patch.set_boxstyle("round, pad=0., rounding_size=0.2")
plt.gca().add_artist(at)
plt.gca().add_patch(overlay)
plt.subplot(1, 2, 2)
plt.imshow(cc_cut)
plt.quiver(
xcut[::skip, ::skip] - cutstart[1],
ycut[::skip, ::skip] - cutstart[0],
dx[::skip, ::skip],
dy[::skip, ::skip],
pivot="mid",
color="w",
)
scalebar = mpss.ScaleBar(self.calib, "pm")
scalebar.location = "lower right"
scalebar.box_alpha = 0
scalebar.color = "w"
plt.gca().add_artist(scalebar)
plt.axis("off")
plt.tight_layout()
|
1692470
|
import time
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.text import get_valid_filename, slugify
from django.utils.encoding import python_2_unicode_compatible
EXTENSION_CHOICES = (
('.ini', _('INI file')),
('.yml', _('YAML file')),
('.xml', _('XML file')),
('.json', _('JSON file')),
)
@python_2_unicode_compatible
class Vassal(models.Model):
"""
Model for uWSGI Vassals
"""
title = models.CharField(
_('Name'),
max_length=250
)
name = models.CharField(
_('Filename'),
help_text=_('Name of uwsgi config file'),
max_length=255,
db_column='name',
editable=False
)
extension = models.CharField(
_('Extension'),
max_length=4,
default=EXTENSION_CHOICES[0],
choices=EXTENSION_CHOICES
)
config = models.TextField(
_('Config'),
help_text=_('Config file blob'),
db_column='config'
)
ts = models.FloatField(
_('Unix Timestamp'),
help_text=_('A number representing the modification time of this row in UNIX format'),
db_column='ts',
editable=False
)
uid = models.PositiveSmallIntegerField(
_('The UID of the vassal instance'),
help_text=_('Required in Tyrant mode (secure multi-user hosting) mode only.'),
db_column='uid',
blank=True,
null=True
)
gid = models.PositiveSmallIntegerField(
_('The GID of the vassal instance'),
help_text=_('Required in Tyrant mode (secure multi-user hosting) mode only.'),
db_column='gid',
blank=True,
null=True
)
created = models.DateTimeField(
_('Created at'),
auto_now_add=True
)
updated = models.DateTimeField(
_('Updated'),
auto_now=True,
blank=True,
null=True
)
enabled = models.BooleanField(
_('Enabled'),
default=True
)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.name = get_valid_filename(slugify(self.title) + self.extension)
self.ts = time.time()
super(Vassal, self).save(*args, **kwargs)
class Meta:
verbose_name = _("Emperor's Vassal")
verbose_name_plural = _("Emperor's Vassals")
db_table = 'vassals'
|
1692499
|
from abc import ABCMeta, abstractmethod
class AbstractDataHandler(object):
"""Abstract Data Handler Class
The data handler is an abstract base class providing an interface for all
subsequent (inherited) data handlers (both live and historic).
The goal of a (derived) data handler object is to output a generated set of
bars for each symbol requested. This will replicate how a live strategy
would function as current market data would be sent 'down the pipe'. Thus a
historic and live system will be treated identically by the rest of the
backtesting suite.
"""
__metaclass__ = ABCMeta
def __init__(self, events, symbol_handler, price_handler):
"""Initialize parameters of the abstract data handler object."""
self.events = events
self.symbol_handler = symbol_handler
self.price_handler = price_handler
self.continue_trading = True
@abstractmethod
def update(self):
"""Objects that implement the data handler abstract base class must
implement a method for obtaining new bars from the data source. This
method places the most recently available bars onto a data structure for
access by methods and objects requiring access to the underlying
financial data.
"""
raise NotImplementedError()
@abstractmethod
def request_prices(self):
"""Request the current price of assets."""
raise NotImplementedError()
|
1692528
|
import ast
import csv
import datetime
import pytz
from sqlalchemy import Column, Integer, Numeric, String, Boolean, DateTime
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Device(Base):
__tablename__ = 'devices'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, nullable=False)
otaa = Column(Boolean, nullable=False)
deveui = Column(Numeric, nullable=False, unique=True)
devclass = Column(String, nullable=False)
devaddr = Column(Integer, nullable=False)
appeui = Column(Numeric, nullable=False)
nwkskey = Column(Numeric, nullable=False)
appskey = Column(Numeric, nullable=False)
tx_chan = Column(Integer, nullable=True, default=1)
tx_datr = Column(String, nullable=True)
gw_addr = Column(String, nullable=True)
fcntup = Column(Integer, nullable=False, default=0)
fcntdown = Column(Integer, nullable=False, default=0)
fcnterror = Column(Boolean, nullable=False, default=False)
created = Column(DateTime(timezone=True), nullable=False)
updated = Column(DateTime(timezone=True), nullable=False)
@classmethod
def seed(cls, session):
devices = []
# Read fields from the CSV file
with open('devices.csv') as sfile:
reader = csv.DictReader(sfile)
for line in reader:
# Convert data
d = {}
for k,v in line.iteritems():
if k in {'name', 'devclass'}:
d[k] = v
continue
elif k in {'devaddr', 'nwkskey', 'appskey'} and v == '':
d[k] = None
continue
else:
d[k] = ast.literal_eval(v) if v else ''
devices.append(d)
# Set timestamps as UTC
for d in devices:
now = datetime.datetime.now(tz=pytz.utc).isoformat()
d['created'] = now
d['updated'] = now
# Insert rows
session.bulk_insert_mappings(Device, devices)
@classmethod
def clear (cls, session):
devices = session.query(Device).all()
for d in devices:
session.delete(d)
|
1692530
|
import os
import datetime
import requests, json
from django.contrib.auth.models import User
from django.core.mail import send_mail
## Global var to be used any time we need to use the range or min/max # of
## Google's category scoring scale. Names changed for more global usage.
## Ex: An accessibility score isn't "slow", it's "poor".
## Comes from Google documentation: https://developers.google.com/web/tools/lighthouse/v3/scoring
## Categories #s updated in 3.1.1
##
##
GOOGLE_SCORE_SCALE = {
"poor": {
"min": 0,
"max": 49
},
"average": {
"min": 50,
"max": 89
},
"good": {
"min": 90,
"max": 100
}
}
##
## User timings is an array of generic non-property-named objects.
## To get the one you want, you have to loop thru the array and find the one with the name you want.
## This is a common function that will do that for you.
## Simply pass the name of the timing you want, and the ARRAY OF OBJECTS to loop thru.
## @return {object} The requred timing object which you can then get the values (duration, start, etc) from.
##
##
def getUserTimingValue(timingName, userTimingsObject=None, userTimingsArray=None):
## Allows us to pass top level Lighthouse report "user-timing" object
## in here and single try/catch.
## Otherwise, assume an array of objects
returnObject = {
"name": "",
"duration": 0,
"startTime": 0,
"timingType": ""
}
try:
userTimings = userTimingsObject['details']['items']
except Exception as ex:
userTimings = userTimingsArray
## Try and find the user-timing they requested, else return an empty one.
try:
for item in userTimings:
if item['name'] == timingName:
returnObject = item
break
except Exception as ex:
pass
## Debug
#print(returnObject)
return returnObject
##
## Takes the HTTP error code passed and the message and pushes
## a message to the Slack web hook URL for our room.
##
##
def sendSlackAlert (errorCode, msg):
slackUrl = os.getenv('DJANGO_SLACK_ALERT_URL', '')
payload = {"text": "*[PageLab] %s just happened* \n%s" % (errorCode, msg)}
if slackUrl:
r = requests.post(slackUrl, json=payload)
##
## Generic "send email" method, accepts three simple arguements.
## NOTE: send_mail function requires email to be array.
##
##
def sendEmailNotification(sendToArr, emailTitle, emailBody):
## Debug and console print instead of actually sending while testing:
#print("[FRM notification] " + emailTitle, sendToArr, emailBody)
#return
## If no emails were setup or passed to us, then we can't send anything.
if len(sendToArr) == 0:
return;
try:
send_mail(
"[PageLab notification] %s" % emailTitle,
emailBody,
"<EMAIL>",
sendToArr,
fail_silently=True,
html_message='<div style="font-family:sans-serif;">%s</div>' % emailBody
)
except:
#TODO: LOG THIS as an error so we know if email sending is failing.
pass
##
## Takes a LighthouseRun queryset and creates data object used by the line chart
## on the report detail page to chart the score history.
##
##
def createHistoricalScoreChartData(LighthouseRunQueryset):
## Setup arrays of data for the line chart.
## Each object is an array that is simply passed to D3 and each represents a line on the chart.
lineChartData = {
'dates': ['x'],
'perfScores': ['Performance'],
'a11yScores': ['Accessibility '],
'seoScores': ['SEO'],
}
## Safety: IF there are actually any items in the inbound queryset, add them as data points.
if LighthouseRunQueryset is not None and LighthouseRunQueryset.count() > 0:
## Get list of field values as array data and add to our arrays setup above for each line.
lhRunsPerfScores = LighthouseRunQueryset.values_list('performance_score', flat=True)
lhRunsA11yScores = LighthouseRunQueryset.values_list('accessibility_score', flat=True)
lhRunsSeoScores = LighthouseRunQueryset.values_list('seo_score', flat=True)
## Add the data values array for each line we want to chart.
lineChartData['perfScores'].extend(list(lhRunsPerfScores))
lineChartData['a11yScores'].extend(list(lhRunsA11yScores))
lineChartData['seoScores'].extend(list(lhRunsSeoScores))
## Add dates, formatted, as x-axis array data.
for runData in LighthouseRunQueryset:
lineChartData['dates'].append(runData.created_date.strftime('%d-%m-%Y'))
## This is the exact specific data object this chart uses.
## We just echo this out to the JS. No further processing needed.
## It's all here, nice tight bundle and makes the page JS real clean.
data = {
'x': 'x',
'xFormat': '%d-%m-%Y',
'type': 'spline',
'columns': [
lineChartData['dates'],
lineChartData['perfScores'],
lineChartData['a11yScores'],
lineChartData['seoScores']
]
}
return data
## *** FUTURE FEATURE ***
##
## Will be used with date pickers UI to allow user to select start/stop date range
## of data they want charted and in the data table and other places.
##
##
## Takes a LighthouseRun queryset (for a given URL) and filters it to a given date scope.
## This is used in several views, so it's here. Also allows us to write a test for it.
## Use cases:
## Show chart/data with ALL lighthouse runs.
## Show chart/data with runs from the past X # days.
## Show chart/data with runs from Sept 5 to Oct 24.
## Show chart/data with runs up until Oct 16.
## Show chart/data with runs from Oct 17 and later.
##
##
def lighthouseRunsByDate(LighthouseRunQueryset, startDate=None, endDate=None):
if startDate is not None:
LighthouseRunQueryset = LighthouseRunQueryset.filter(created_date__gt=startDate)
if endDate is not None:
LighthouseRunQueryset = LighthouseRunQueryset.filter(created_date__lt=endDate)
return LighthouseRunQueryset
|
1692548
|
import cmath
if __name__ == '__main__':
s = input() #input a complex number
print(abs(complex(s))) #output the distance in polar coordinate
print(cmath.phase(complex(s))) #output the angle in polar coordinate
|
1692605
|
load(
"//reason/private:extensions.bzl",
"CMXA_EXT",
"CMI_EXT",
"CMO_EXT",
"CMX_EXT",
"C_EXT",
"H_EXT",
"MLI_EXT",
"ML_EXT",
"O_EXT",
)
load(
"//reason/private:providers.bzl",
"MlCompiledModule",
)
load(
":utils.bzl",
"TARGET_BYTECODE",
"TARGET_NATIVE",
"select_compiler",
)
def ocaml_compile_library(
ctx,
arguments,
c_sources,
ml_sources,
outputs,
runfiles,
sorted_sources,
toolchain,
):
"""
Compile a given set of OCaml .ml and .mli sources to their .cmo, .cmi, and
.cmx counterparts.
"""
ctx.actions.run_shell(
inputs=runfiles,
outputs=outputs,
tools=[
toolchain.ocamlc,
toolchain.ocamlopt,
],
command="""\
#!/bin/bash
# Compile .cmi and .cmo files
{_ocamlc} {arguments} $(cat {ml_sources})
# Compile .cmx files
{_ocamlopt} {arguments} $(cat {ml_sources}) {c_sources}
mkdir -p {output_dir}
# C sources will be compiled and put at the top level
find . -maxdepth 1 \
-name "*.o" \
-exec cp {{}} {output_dir}/ \;
find {source_dir} \
-name "*.cm*" \
-exec cp {{}} {output_dir}/ \;
find {source_dir} \
-name "*.o" \
-exec cp {{}} {output_dir}/ \;
cp -f $(cat {ml_sources}) {output_dir}/;
""".format(
_ocamlc=toolchain.ocamlc.path,
_ocamlopt=toolchain.ocamlopt.path,
arguments=" ".join(arguments),
c_sources=" ".join([c.path for c in c_sources]),
ml_sources=sorted_sources.path,
output_dir=outputs[0].dirname,
source_dir=ml_sources[0].dirname,
),
mnemonic="OCamlCompileLib",
progress_message="Compiling ({_in}) to ({out})".format(
_in=", ".join([s.basename for s in ml_sources] +
[c.basename for c in c_sources]),
out=", ".join([s.basename for s in outputs]),
),
)
def ocaml_compile_binary(
ctx,
arguments,
base_libs,
binfile,
c_deps,
c_sources,
deps,
ml_sources,
runfiles,
sorted_sources,
target,
toolchain,
):
"""
Compile a given set of OCaml .ml and .mli sources to a single binary file
Args:
ctx: the context argument from the rule invoking this macro
arguments: a list of string representing the compiler flags
base_libs: a list of target objects from the OCaml stdlib to link against
binfile: the binary file target
c_deps: a list of transitive C dependency targets
c_sources: depset of C sources for this binary
deps: a list of transitive ML dependency targets
ml_sources: a depset of ML sources for this binary
runfiles: list of all the files that need to be present at runtime
sorted_sources: a file target with ML sources in topological order
target: whether to compile to a native or bytecode binary
toolchain: the OCaml toolchain
"""
compiler = select_compiler(toolchain, target)
# Native binaries expect .cmx files while bytecode binaries expect .cmo
expected_object_ext = CMX_EXT
if target == TARGET_BYTECODE:
expected_object_ext = CMO_EXT
dep_libs = []
for d in deps:
name = d.basename
if ML_EXT in name or MLI_EXT in name:
dep_libs.extend([d])
# Extract all .cmxa baselib dependencies to include in linking
stdlib_libs = []
for baselib in base_libs:
if CMXA_EXT in baselib.basename:
stdlib_libs += [baselib]
ctx.actions.run_shell(
inputs=runfiles,
outputs=[binfile],
tools=[
toolchain.ocamlc,
toolchain.ocamlopt,
toolchain.ocamldep,
],
command="""\
#!/bin/bash
# Run ocamldep on all of the ml and mli dependencies for this binary
{_ocamldep} \
-sort \
$(echo {dep_libs} | tr " " "\n" | grep ".ml*") \
> .depend.all
# Extract only the compiled cmx files to use as input for the compiler
cat .depend.all \
| tr " " "\n" \
| grep ".ml$" \
| sed "s/\.ml.*$/{expected_object_ext}/g" \
| xargs \
> .depend.cmx
{_compiler} {arguments} \
{c_objs} \
{base_libs} \
$(cat .depend.cmx) $(cat {ml_sources}) {c_sources}
mkdir -p {output_dir}
find {source_dir} -name "{pattern}" -exec cp {{}} {output_dir}/ \;
""".format(
_compiler=compiler.path,
_ocamldep=toolchain.ocamldep.path,
arguments=" ".join(arguments),
base_libs=" ".join([b.path for b in stdlib_libs]),
c_objs=" ".join([o.path for o in c_deps]),
c_sources=" ".join([c.path for c in c_sources]),
expected_object_ext=expected_object_ext,
dep_libs=" ".join([l.path for l in dep_libs]),
ml_sources=sorted_sources.path,
output_dir=binfile.dirname,
pattern=binfile.basename,
source_dir=ml_sources[0].dirname,
),
mnemonic="OCamlCompileBin",
progress_message="Compiling ({_in}) to ({out})".format(
_in=", ".join([s.basename for s in ml_sources] +
[c.basename for c in c_sources]),
out=binfile.basename),
)
|
1692633
|
import sublime
from os import path
import urllib
import json
import subprocess
import re
from .settings import get_setting
server_addr = "http://127.0.0.1:15155"
cli = 'padawan'
server_command = 'padawan-server'
class Server:
def start(self):
subprocess.Popen(
server_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
def stop(self):
try:
self.sendRequest('kill', {})
return True
except Exception:
return False
def restart(self):
if self.stop():
self.start()
def sendRequest(self, command, params, data=''):
timeout = get_setting("padawan_timeout", 0.5)
addr = server_addr + "/"+command+"?" + urllib.parse.urlencode(params)
response = urllib.request.urlopen(
addr,
data.encode("utf8"),
timeout
)
result = json.loads(response.read().decode("utf8"))
if "error" in result:
raise ValueError(result["error"])
return result
class Editor:
def getView(self):
return sublime.active_window().active_view()
def log(self, message):
print(message)
def notify(self, message):
self.getView().set_status("PadawanStatus", message)
def progress(self, progress):
bars = int(progress / 5)
bars_str = ''
for i in range(20):
if i < bars:
bars_str += '='
else:
bars_str += ' '
bars_str = '[' + bars_str + ']'
message = "Progress {0} {1}%".format(bars_str, str(progress))
self.getView().set_status("PadawanProgress", message)
return
def error(self, error):
self.notify(error)
def callAfter(self, timeout, callback):
def Notifier():
if callback():
sublime.set_timeout(Notifier, timeout)
sublime.set_timeout(Notifier, timeout)
server = Server()
editor = Editor()
pathError = '''padawan command is not found in your $PATH. Please\
make sure you installed padawan.php package and\
configured your $PATH'''
class PadawanClient:
def GetCompletion(self, filepath, line_num, column_num, contents):
curPath = self.GetProjectRoot(filepath)
params = {
'filepath': filepath.replace(curPath, ""),
'line': line_num,
'column': column_num,
'path': curPath
}
result = self.DoRequest('complete', params, contents)
if not result:
return {"completion": []}
return result
def SaveIndex(self, filepath):
return self.DoRequest('save', {'filepath': filepath})
def DoRequest(self, command, params={}, data=''):
try:
return server.sendRequest(command, params, data)
except urllib.request.URLError:
editor.error("Padawan.php is not running")
except Exception as e:
editor.error("Error occured {0}".format(e))
return False
def AddPlugin(self, plugin):
composer = get_setting("padawan_composer", "composer")
composerCommand = composer + ' global require '
command = '{0} {2} && {1} plugin add {2}'.format(
composerCommand,
cli,
plugin
)
stream = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
def OnAdd(retcode):
if not retcode:
server.restart()
editor.notify("Plugin installed")
else:
if retcode == 127:
editor.error(pathError)
editor.error("Plugin installation failed")
def LogAdding():
retcode = stream.poll()
if retcode is not None:
return OnAdd(retcode)
line = stream.stdout.readline().decode("ascii")
editor.log(line)
return True
editor.callAfter(1e-4, LogAdding)
def RemovePlugin(self, plugin):
composer = get_setting("padawan_composer", "composer")
composerCommand = composer + ' global remove'
command = '{0} {1}'.format(
composerCommand,
plugin
)
stream = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
def onRemoved():
subprocess.Popen(
'{0}'.format(
cli + ' plugin remove ' + plugin
),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
).wait()
self.RestartServer()
return editor.notify("Plugin removed")
def LogRemoving():
retcode = stream.poll()
if retcode is not None:
return onRemoved()
line = stream.stdout.readline().decode("ascii")
editor.log(line)
return True
editor.callAfter(1e-4, LogRemoving)
def GetInstalledPlugins(self):
return self.DoRequest("plugins")["plugins"]
def Generate(self, filepath):
curPath = self.GetProjectRoot(filepath)
stream = subprocess.Popen(
'cd ' + curPath + ' && ' + cli + ' generate',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
def onGenerationEnd(retcode):
if retcode > 0:
if retcode == 127:
editor.error(pathError)
else:
editor.error("Error occured, code: {0}".format(
str(retcode)
))
return
server.restart()
editor.progress(100)
editor.notify("Index generated")
def ProcessGenerationPoll():
retcode = stream.poll()
if retcode is not None:
return onGenerationEnd(retcode)
line = stream.stdout.readline().decode("utf8")
errorMatch = re.search('Error: (.*)', line)
if errorMatch is not None:
retcode = 1
editor.error("{0}".format(
errorMatch.group(1).replace("'", "''")
))
return
match = re.search('Progress: ([0-9]+)', line)
if match is None:
return True
progress = int(match.group(1))
editor.progress(progress)
return True
editor.callAfter(1e-4, ProcessGenerationPoll)
def StartServer(self):
server.start()
def StopServer(self):
server.stop()
def RestartServer(self):
server.restart()
def GetProjectRoot(self, filepath):
curPath = path.dirname(filepath)
while curPath != '/' and not path.exists(
path.join(curPath, 'composer.json')
):
curPath = path.dirname(curPath)
if curPath == '/':
curPath = path.dirname(filepath)
return curPath
client = PadawanClient()
|
1692658
|
import unittest
import numpy as np
import pandas as pd
import os
import sys
sys.path.insert(0, os.path.abspath('../../../'))
from mastml.plots import Scatter, Histogram
class TestPlots(unittest.TestCase):
def test_scatter(self):
X = pd.Series(np.random.uniform(low=0.0, high=100, size=(50,)))
y = pd.Series(np.random.uniform(low=0.0, high=100, size=(50,)))
Scatter().plot_predicted_vs_true(y_true=X,
y_pred=y,
savepath=os.getcwd(),
x_label='TEST_scatter',
data_type='test',)
self.assertTrue(os.path.exists('parity_plot_test.png'))
os.remove('parity_plot_test.png')
return
def test_histogram(self):
X = pd.Series(np.random.uniform(low=0.0, high=100, size=(50,)))
Histogram().plot_histogram(df=X,
savepath=os.getcwd(),
file_name='TEST_hist',
x_label='TEST_hist')
self.assertTrue(os.path.exists('TEST_hist.png'))
self.assertTrue(os.path.exists('TEST_hist.xlsx'))
self.assertTrue(os.path.exists('TEST_hist_statistics.xlsx'))
os.remove('TEST_hist.png')
os.remove('TEST_hist.xlsx')
os.remove('TEST_hist_statistics.xlsx')
return
def test_residual_histogram(self):
X = pd.Series(np.random.uniform(low=0.0, high=100, size=(50,)))
y = pd.Series(np.random.uniform(low=0.0, high=100, size=(50,)))
Histogram().plot_residuals_histogram(y_true=X,
y_pred=y,
savepath=os.getcwd())
self.assertTrue(os.path.exists('residual_histogram.png'))
self.assertTrue(os.path.exists('residual_histogram.xlsx'))
self.assertTrue(os.path.exists('residual_histogram_statistics.xlsx'))
os.remove('residual_histogram.png')
os.remove('residual_histogram.xlsx')
os.remove('residual_histogram_statistics.xlsx')
return
if __name__ == '__main__':
unittest.main()
|
1692673
|
import pytest
from sentinelhub import SentinelHubSession
@pytest.mark.sh_integration
def test_session():
session = SentinelHubSession()
token = session.token
headers = session.session_headers
for item in [token, headers]:
assert isinstance(item, dict)
for key in ['access_token', 'expires_in', 'expires_at']:
assert key in token
same_token = session.token
assert token['access_token'] == same_token['access_token'], 'The token has been refreshed'
token['expires_at'] = 0
new_token = session.token
assert token['access_token'] != new_token['access_token'], 'The token has not been refreshed'
|
1692710
|
self.description = "Sysupgrade with sync packages having absurd epochs"
versions = (
"1234327518932650063289125782697890:1.0-1",
"1234327518932650063289125782697891:0.9-1",
"1234327518932650063289125782697891:1.0-1",
"1234327518932650063289125782697891:1.1-1",
"1234327518932650063289125782697892:1.0-1",
)
pkgvers = [(n, versions[n]) for n in range(len(versions))]
for k, v in pkgvers:
sp = pmpkg("pkg_%d" % k, v)
self.addpkg2db("sync", sp)
for k, v in pkgvers:
lp = pmpkg("pkg_%d" % k, versions[2])
self.addpkg2db("local", lp)
self.args = "-Su"
self.addrule("PACMAN_RETCODE=0")
for k, v in pkgvers:
right_ver = versions[max(k, 2)]
self.addrule("PKG_VERSION=pkg_%d|%s" % (k, right_ver))
|
1692728
|
import unittest
from mock import MagicMock
from src.domain.input_text.input_text_processor import InputTextProcessor
from src.domain.interaction.information_phase import InformationPhase
from src.domain.interaction.transition_to_question_answering_phase import TransitionToQuestionAnsweringPhase
class TestInformationPhase(unittest.TestCase):
_SOME_NON_SWITCHING_INPUT_TEXT = "some input text"
_SOME_RESPONSE = "some response"
def setUp(self):
self.information_phase = InformationPhase()
self.input_text_processor_mock = MagicMock(spec=InputTextProcessor)
def test__given__non_switching_input_text__when__fetching_next_interaction_phase__then__returns_itself(self):
expected_next_phase = self.information_phase
actual_next_phase = self.information_phase.fetch_next_interaction_phase(self._SOME_NON_SWITCHING_INPUT_TEXT)
self.assertEqual(expected_next_phase, actual_next_phase)
def test__given__switching_input_text__when__fetching_next_interaction_phase__then__returns_transition_to_question_answering_phase(
self
):
expected_next_phase = TransitionToQuestionAnsweringPhase()
actual_next_phase = self.information_phase.fetch_next_interaction_phase(
InformationPhase.SWITCHING_TO_QUESTION_ANSWERING_PHASE_MESSAGE
)
self.assertEqual(expected_next_phase, actual_next_phase)
def test__when__processing_input_text__then__input_text_processor_processes_context_statement_and_returns_appropriate_response(
self
):
expected_response = self._SOME_RESPONSE
self.input_text_processor_mock.process_context_statement.return_value = expected_response
actual_response = self.information_phase.process_input_text(
self._SOME_NON_SWITCHING_INPUT_TEXT, self.input_text_processor_mock
)
self.input_text_processor_mock.process_context_statement.assert_called_once_with(
self._SOME_NON_SWITCHING_INPUT_TEXT
)
self.assertEqual(expected_response, actual_response)
|
1692779
|
import torch
from tqdm import tqdm
# MLP + Positional Encoding
class Decoder(torch.nn.Module):
def __init__(self, input_dims = 3, internal_dims = 128, output_dims = 4, hidden = 5, multires = 2):
super().__init__()
self.embed_fn = None
if multires > 0:
embed_fn, input_ch = get_embedder(multires)
self.embed_fn = embed_fn
input_dims = input_ch
net = (torch.nn.Linear(input_dims, internal_dims, bias=False), torch.nn.ReLU())
for i in range(hidden-1):
net = net + (torch.nn.Linear(internal_dims, internal_dims, bias=False), torch.nn.ReLU())
net = net + (torch.nn.Linear(internal_dims, output_dims, bias=False),)
self.net = torch.nn.Sequential(*net)
def forward(self, p):
if self.embed_fn is not None:
p = self.embed_fn(p)
out = self.net(p)
return out
def pre_train_sphere(self, iter):
print ("Initialize SDF to sphere")
loss_fn = torch.nn.MSELoss()
optimizer = torch.optim.Adam(list(self.parameters()), lr=1e-4)
for i in tqdm(range(iter)):
p = torch.rand((1024,3), device='cuda') - 0.5
ref_value = torch.sqrt((p**2).sum(-1)) - 0.3
output = self(p)
loss = loss_fn(output[...,0], ref_value)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Pre-trained MLP", loss.item())
# Positional Encoding from https://github.com/yenchenlin/nerf-pytorch/blob/1f064835d2cca26e4df2d7d130daa39a8cee1795/run_nerf_helpers.py
class Embedder:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.create_embedding_fn()
def create_embedding_fn(self):
embed_fns = []
d = self.kwargs['input_dims']
out_dim = 0
if self.kwargs['include_input']:
embed_fns.append(lambda x : x)
out_dim += d
max_freq = self.kwargs['max_freq_log2']
N_freqs = self.kwargs['num_freqs']
if self.kwargs['log_sampling']:
freq_bands = 2.**torch.linspace(0., max_freq, steps=N_freqs)
else:
freq_bands = torch.linspace(2.**0., 2.**max_freq, steps=N_freqs)
for freq in freq_bands:
for p_fn in self.kwargs['periodic_fns']:
embed_fns.append(lambda x, p_fn=p_fn, freq=freq : p_fn(x * freq))
out_dim += d
self.embed_fns = embed_fns
self.out_dim = out_dim
def embed(self, inputs):
return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
def get_embedder(multires):
embed_kwargs = {
'include_input' : True,
'input_dims' : 3,
'max_freq_log2' : multires-1,
'num_freqs' : multires,
'log_sampling' : True,
'periodic_fns' : [torch.sin, torch.cos],
}
embedder_obj = Embedder(**embed_kwargs)
embed = lambda x, eo=embedder_obj : eo.embed(x)
return embed, embedder_obj.out_dim
|
1692783
|
import argparse
import os
import torch
import yaml
from train.train import train
os.environ['NCCL_LL_THRESHOLD'] = '0'
parser = argparse.ArgumentParser(description='Train model on multiple cards')
parser.add_argument('--config', help='path to yaml config file')
parser.add_argument('--local_rank', type=int, help='local gpu id')
args = parser.parse_args()
config = yaml.safe_load(open(args.config))
torch.distributed.init_process_group(backend='nccl', init_method='env://')
config['local_rank'] = args.local_rank
torch.cuda.set_device(args.local_rank)
train(config)
|
1692791
|
from unittest import TestCase
from torch.nn import BCELoss
from tqdm import tqdm
from experiments import Experiment
from experiments.environment import get_env
from experiments.utils import flatten
from models.transformers import JointBERT
from wiki.data_helpers import JointBERTWikiDataHelper
class ExperimentTest(TestCase):
def test_cls_init(self):
env = get_env()
exp = Experiment(
# random_seed=0,
epochs=1,
model_cls='models.transformers.JointBERT',
model_params={
'bert_model_path': env['bert_dir'] + '/bert-base-cased',
'labels_count': 3,
},
loss_func_cls='torch.nn.BCELoss', # loss,
model_output_to_loss_input=lambda ys: ys.double(),
data_helper_cls='wiki.data_helpers.JointBERTWikiDataHelper',
data_helper_params={
'wiki_relations_path': '../wiki/relations.csv',
'wiki_articles_path': '../wiki/docs.pickle',
'labels': ['employer', 'country_of_citizenship'],
# 'employer' # 'capital' # 'country_of_citizenship' #'educated_at' # 'opposite_of'
'label_col': 'relation_name',
'negative_sampling_ratio': 1.,
'train_test_split': 0.7,
'max_seq_length': 512,
'train_batch_size': 4,
'test_batch_size': 4,
'bert_model_path': '/Volumes/data/repo/data/bert/bert-base-cased',
# 'bert_tokenizer_cls': '',
'bert_tokenizer_params': {
'do_lower_case': False,
},
'df_limit': 3,
},
tqdm_cls='tqdm.tqdm',
output_dir='../output',
)
assert isinstance(exp.model, JointBERT)
assert isinstance(exp.data_helper, JointBERTWikiDataHelper)
assert isinstance(exp.loss_func, BCELoss)
assert tqdm == exp.tqdm_cls
print(flatten(exp.to_dict()))
exp.run()
|
1692837
|
from .AutomatonGenerators import generate_random_dfa, generate_random_mealy_machine, generate_random_moore_machine, generate_random_markov_chain
from .AutomatonGenerators import generate_random_mdp, generate_random_ONFSM
from .FileHandler import save_automaton_to_file, load_automaton_from_file, visualize_automaton
from .ModelChecking import model_check_experiment, mdp_2_prism_format, model_check_properties, get_properties_file, get_correct_prop_values, compare_automata
from ..automata.StochasticMealyMachine import smm_to_mdp_conversion
from .BenchmarkSULs import *
from .DataHandler import *
|
1692838
|
from enum import Enum
class CriminalCase:
pass
class CriminalOffence(Enum):
# List from https://www.cps.gov.uk/sites/default/files/
# documents/publications/annex_1a_table_of_offences_scheme_c.pdf
DANGEROUS_DRIVING = "Dangerous driving"
ENDANGERING_AN_AIRCRAFT = "Endangering an aircraft"
FALSE_ACCOUNTING = "False accounting"
IMPERSONATING_CUSTOMS_OFFICER = "Impersonating Customs officer"
KEEPING_A_DISORDERLY_HOUSE = "Keeping a disorderly house"
CORRUPTION_IN_PUBLIC_OFFICE = "Corruption in public office"
CUTTING_AWAY_BUOYS_ETC = "Cutting away buoys etc"
FALSE_EVIDENCE_BEFORE_EUROPEAN_COURT = "False evidence before European Court"
FIRING_ON_REVENUE_VESSEL = "Firing on Revenue vessel"
FRAUDULENT_EVASION_OF_AGRICULTURAL_LEVY = "Fraudulent evasion of agricultural levy"
OBSTRUCTING_ENGINE_OR_CARRIAGE_ON_RAILWAY = (
"Obstructing engine or carriage on railway"
)
class PNCId:
def __init__(self, value: str):
self.value = value
class Suspect:
def __init__(self, offence: CriminalOffence):
self.offence = offence
class PoliceInvestigation:
def __init__(self, pnc_id: PNCId, suspect: Suspect):
if not pnc_id:
raise ValueError("You must provide a PNC Id")
if not suspect:
raise ValueError("You must provide a suspect")
self.pnc_id = pnc_id
self.suspects = {suspect}
class PreChargeDecision:
pass
|
1692844
|
def create_dictionary_from_lexicon(path, punctuation_marks):
words = ["<unk>", "</s>"]
with open(path, 'r') as f:
for line in f:
word = line.strip()
if word not in punctuation_marks:
words.append(word)
return dict(zip(words, range(len(words))))
def create_phone_dictionary_from_lexicon(nonsilence_phones_path, silence_phones_path):
phones = []
for path in [nonsilence_phones_path, silence_phones_path]:
with open(path, 'r') as f:
for line in f:
phones.append(line.strip())
return dict(zip(phones, range(len(phones))))
def create_lexicon(path):
lexicon = dict()
with open(path, 'r') as f:
for line in f:
(word, pronunciation) = line.strip().split(None, 1)
lexicon[word] = pronunciation.split()
return lexicon
def create_dictionary_from_punctuation_marks(punctuation_marks):
punctuation_marks = ["<SPACE>"] + punctuation_marks + ["</s>"]
return dict(zip(punctuation_marks, range(len(punctuation_marks))))
|
1692849
|
import os
import copy
from continual_rl.policies.policy_base import PolicyBase
from continual_rl.policies.impala.impala_policy_config import ImpalaPolicyConfig
from continual_rl.policies.impala.impala_environment_runner import ImpalaEnvironmentRunner
from continual_rl.policies.impala.nets import ImpalaNet
from continual_rl.policies.impala.torchbeast.monobeast import Monobeast
from continual_rl.utils.utils import Utils
class ImpalaPolicy(PolicyBase):
"""
With IMPALA, the parallelism is the point, so rather than splitting it up into compute_action and train like normal,
just let the existing IMPALA implementation handle it all.
This policy is now basically a container for the Monobeast object itself, which holds persistent information
(e.g. the model and the replay buffers).
"""
def __init__(self, config: ImpalaPolicyConfig, observation_space, action_spaces, impala_class: Monobeast = None,
policy_net_class: ImpalaNet = None):
super().__init__()
self._config = config
self._action_spaces = action_spaces
model_flags = self._create_model_flags()
if impala_class is None:
impala_class = Monobeast
if policy_net_class is None:
policy_net_class = ImpalaNet
self.impala_trainer = impala_class(model_flags, observation_space, action_spaces, policy_net_class)
def _create_model_flags(self):
"""
Finishes populating the config to contain the rest of the flags used by IMPALA in the creation of the model.
"""
# torchbeast will change flags, so copy it so config remains unchanged for other tasks.
flags = copy.deepcopy(self._config)
flags.savedir = str(self._config.output_dir)
return flags
def get_environment_runner(self, task_spec):
return ImpalaEnvironmentRunner(self._config, self)
def compute_action(self, observation, task_id, action_space_id, last_timestep_data, eval_mode):
pass
def train(self, storage_buffer):
pass
def save(self, output_path_dir, cycle_id, task_id, task_total_steps):
self.impala_trainer.save(output_path_dir)
def load(self, output_path_dir):
self.impala_trainer.load(output_path_dir)
|
1692873
|
try:
from rgbmatrix import graphics
except ImportError:
from RGBMatrixEmulator import graphics
from data.config.color import Color
from data.config.layout import Layout
from data.standings import Division, League
from utils import center_text_position
def render_standings(canvas, layout: Layout, colors: Color, division: Division, stat):
league = division.name[:2] # al or nl
__fill_bg(canvas, colors, league)
if canvas.width > 32:
__render_static_wide_standings(canvas, layout, colors, division, league)
else:
return __render_rotating_standings(canvas, layout, colors, division, stat, league)
def __render_rotating_standings(canvas, layout, colors, division, stat, league):
coords = layout.coords("standings")
font = layout.font("standings")
divider_color = get_standings_color_node(colors, "divider", league)
stat_color = get_standings_color_node(colors, "stat", league)
team_stat_color = get_standings_color_node(colors, "team.stat", league)
team_name_color = get_standings_color_node(colors, "team.name", league)
team_elim_color = get_standings_color_node(colors, "team.elim", league)
team_clinched_color = get_standings_color_node(colors, "team.clinched", league)
offset = coords["offset"]
graphics.DrawLine(canvas, 0, 0, coords["width"], 0, divider_color)
graphics.DrawText(canvas, font["font"], coords["stat_title"]["x"], offset, stat_color, stat.upper())
graphics.DrawLine(canvas, coords["divider"]["x"], 0, coords["divider"]["x"], coords["height"], divider_color)
for team in division.teams:
graphics.DrawLine(canvas, 0, offset, coords["width"], offset, divider_color)
team_text = "{:3s}".format(team.team_abbrev)
stat_text = str(getattr(team, stat))
color = team_elim_color if team.elim else (team_clinched_color if team.clinched else team_name_color)
graphics.DrawText(canvas, font["font"], coords["team"]["name"]["x"], offset, color, team_text)
color = team_elim_color if team.elim else (team_clinched_color if team.clinched else team_stat_color)
graphics.DrawText(canvas, font["font"], coords["team"]["record"]["x"], offset, color, stat_text)
offset += coords["offset"]
def __render_static_wide_standings(canvas, layout, colors, division, league):
coords = layout.coords("standings")
font = layout.font("standings")
divider_color = get_standings_color_node(colors, "divider", league)
team_stat_color = get_standings_color_node(colors, "team.stat", league)
team_name_color = get_standings_color_node(colors, "team.name", league)
team_elim_color = get_standings_color_node(colors, "team.elim", league)
team_clinched_color = get_standings_color_node(colors, "team.clinched", league)
start = coords.get("start", 0)
offset = coords["offset"]
graphics.DrawLine(canvas, 0, start, coords["width"], start, divider_color)
graphics.DrawLine(
canvas, coords["divider"]["x"], start, coords["divider"]["x"], start + coords["height"], divider_color
)
offset += start
for team in division.teams:
graphics.DrawLine(canvas, 0, offset, coords["width"], offset, divider_color)
color = team_elim_color if team.elim else (team_clinched_color if team.clinched else team_name_color)
team_text = team.team_abbrev
graphics.DrawText(canvas, font["font"], coords["team"]["name"]["x"], offset, color, team_text)
record_text = "{}-{}".format(team.w, team.l)
record_text_x = center_text_position(record_text, coords["team"]["record"]["x"], font["size"]["width"])
if "-" in str(team.gb):
gb_text = " - "
else:
gb_text = "{:>4s}".format(str(team.gb))
gb_text_x = coords["team"]["games_back"]["x"] - (len(gb_text) * font["size"]["width"])
color = team_elim_color if team.elim else (team_clinched_color if team.clinched else team_stat_color)
graphics.DrawText(canvas, font["font"], record_text_x, offset, color, record_text)
graphics.DrawText(canvas, font["font"], gb_text_x, offset, color, gb_text)
offset += coords["offset"]
def __fill_bg(canvas, colors, league: str):
bg_color = get_standings_color_node(colors, "background", league)
canvas.Fill(bg_color.red, bg_color.green, bg_color.blue)
def get_standings_color_node(colors, node_name, league):
# try the league-specific color node.
# If not present, go with the standard "standings"
try:
return colors.graphics_color(f"standings.{league.lower()}.{node_name}")
except KeyError:
return colors.graphics_color(f"standings.{node_name}")
def render_bracket(canvas, layout, colors, league: League):
__fill_bg(canvas, colors, league.name)
coords = layout.coords("standings.postseason")
font = layout.font("standings")
team_name_color = get_standings_color_node(colors, "team.name", league.name)
divider_color = get_standings_color_node(colors, "divider", league.name)
matchup_gap = coords["matchup_y_gap"]
winner_offset = matchup_gap // 2
series_gap = coords["series_x_gap"]
char_width = font["size"]["width"] + 2
wc_x = coords["wc_x_start"]
wc_y = coords["wc_y_start"]
ds_x = wc_x + series_gap
ds_a_y = wc_y + winner_offset
ds_b_y = coords["ds_b_y_start"]
lcs_x = ds_x + series_gap
champ_y = (ds_b_y + ds_a_y) // 2 + winner_offset
champ_x = lcs_x + series_gap
# draw bracket lines
# wc divider
graphics.DrawLine(canvas, wc_x, wc_y, wc_x + series_gap - char_width // 2, wc_y, divider_color)
# drop down
graphics.DrawLine(canvas, ds_x - char_width // 2, wc_y, ds_x - char_width // 2, ds_a_y, divider_color)
# ds a divider
graphics.DrawLine(
canvas, ds_x - char_width // 2, ds_a_y, ds_x + series_gap - char_width // 2, ds_a_y, divider_color
)
# connect to lcs
graphics.DrawLine(
canvas, lcs_x - char_width // 2, ds_a_y, lcs_x - char_width // 2, ds_a_y + winner_offset, divider_color
)
# ds b divider
graphics.DrawLine(canvas, ds_x, ds_b_y, ds_x + series_gap - char_width // 2, ds_b_y, divider_color)
# connect to lcs
graphics.DrawLine(
canvas, lcs_x - char_width // 2, ds_b_y, lcs_x - char_width // 2, ds_b_y - winner_offset, divider_color
)
# lcs horizonals
graphics.DrawLine(
canvas,
lcs_x - char_width // 2,
ds_a_y + winner_offset,
lcs_x + series_gap - char_width // 2,
ds_a_y + winner_offset,
divider_color,
)
graphics.DrawLine(
canvas,
lcs_x - char_width // 2,
ds_b_y - winner_offset,
lcs_x + series_gap - char_width // 2,
ds_b_y - winner_offset,
divider_color,
)
# champ lines
graphics.DrawLine(
canvas,
champ_x - char_width // 2,
ds_a_y + winner_offset,
champ_x - char_width // 2,
ds_b_y - winner_offset,
divider_color,
)
graphics.DrawLine(
canvas, champ_x - char_width // 2, champ_y - winner_offset, champ_x, champ_y - winner_offset, divider_color,
)
# draw bracket text
# wc teams
graphics.DrawText(canvas, font["font"], wc_x, wc_y, team_name_color, league.wc2)
graphics.DrawText(canvas, font["font"], wc_x, wc_y + matchup_gap, team_name_color, league.wc1)
# DS A teams
graphics.DrawText(canvas, font["font"], ds_x, ds_a_y, team_name_color, league.wc_winner)
graphics.DrawText(canvas, font["font"], ds_x, ds_a_y + matchup_gap, team_name_color, league.ds_one)
# DS B
graphics.DrawText(canvas, font["font"], ds_x, ds_b_y, team_name_color, league.ds_three)
graphics.DrawText(canvas, font["font"], ds_x, ds_b_y + matchup_gap, team_name_color, league.ds_two)
# LCS
graphics.DrawText(canvas, font["font"], lcs_x, ds_a_y + winner_offset, team_name_color, league.l_two)
graphics.DrawText(canvas, font["font"], lcs_x, ds_b_y + winner_offset, team_name_color, league.l_one)
# league champ
graphics.DrawText(canvas, font["font"], champ_x + 1, champ_y, team_name_color, league.champ)
|
1692925
|
import torch
from typing import List
from yacs.config import CfgNode
from torch.nn import functional as F
from .jenson_shannon_divergence import jensen_shannon_divergence
from .build import LOSS_REGISTRY
@LOSS_REGISTRY.register('weighted_focal_loss')
class WeightedFocalLoss(torch.nn.Module):
"""
If dysfunctional, likely incorrect implementation
"""
def __init__(self, loss_cfg: CfgNode, num_classes: int, weights: List, **kwargs):
"""
:param num_classes: number of classes
:param weights: default class weights
:param gamma: gamma for focal loss
"""
super(WeightedFocalLoss, self).__init__()
self.class_weights = torch.nn.Parameter(torch.tensor(weights), requires_grad=False)
self.dummy_eyes = torch.nn.Parameter(torch.eye(num_classes), requires_grad=False)
focal_loss_cfg = loss_cfg.FOCAL_LOSS
self.gamma = focal_loss_cfg.GAMMA
def forward(self, logits, labels, js_divergence=False):
if js_divergence:
logits, logits_aug1, logits_aug2 = torch.chunk(logits, 3, dim=0)
loss = jensen_shannon_divergence(logits, logits_aug1, logits_aug2)
else:
loss = 0
targets = self.dummy_eyes[labels]
alpha = self.class_weights[labels]
ce_loss = F.binary_cross_entropy_with_logits(
logits, targets, reduction="none"
)
# https://github.com/richardaecn/class-balanced-loss/blob/master/src/cifar_main.py#L226-L266
# The below is SUPPOSED TO BE EQUIVALENT TO
# p = logits.sigmoid()
# pt = p * targets + (1 - p) * (1 - targets)
# modulator = (1 - pt)**gamma
# I don't know how, I don't know why
modulator = torch.exp(
-self.gamma * targets * logits - self.gamma * torch.log1p(torch.exp(-1 * logits)))
loss = modulator * ce_loss
loss = loss.sum(dim=1)
loss = loss * alpha
avg_loss = loss.mean()
preds = torch.argmax(logits.float(), dim=1)
corrects = (labels == preds)
acc = torch.sum(corrects) / (len(corrects) + 0.0)
return avg_loss, acc
|
1692947
|
from polyphony import testbench
def ifexp01(x, y):
return True if x == y else False
@testbench
def test():
assert False == ifexp01(0, 1)
assert True == ifexp01(1, 1)
assert False == ifexp01(True, False)
test()
|
1692956
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .utils import *
from .affine_net import *
from .momentum_net import *
import mermaid.module_parameters as pars
import mermaid.model_factory as py_mf
import mermaid.utils as py_utils
from functools import partial
from mermaid.libraries.functions.stn_nd import STNFunction_ND_BCXYZ
class MermaidNet(nn.Module):
"""
this network is an end to end system for momentum generation and mermaid registration
include the following parts
1 . (optional) affine net the affine network is used to affine the source and target image
2. the momentum generation net work, this network is a u-net like encoder decoder
3. the mermaid part, an map-based registration model would be called from the Mermaid tookit
In detail of implementation, we should take care of the memory issue, one possible solution is using low-resolution mapping and then upsampling the transformation map
1. affine network, this is a pretrained network, so only the forward model is used,
in current design, the input and output of this net is not downsampled
2. momentum generation net, this is a trainable network, but we would have a low-res factor to train it at a low-resolution
the input may still at original resolution (for high quality interpolation), but the size during the computation and of the output are determined by the low-res factor
3. mermaid part, this is an non-parametric unit, where should call from the mermaid, and the output transformation map should be upsampled to the
full resolution size. All momentum based mermaid registration method should be supported. (todo support velcoity methods)
so the input and the output of each part should be
1. affine: input: source, target, output: s_warped, affine_map
2. momentum: input: init_warped_source, target, output: low_res_mom
3. mermaid: input: s, low_res_mom, low_res_initial_map output: map, warped_source
pay attention in Mermaid toolkit, the image intensity and identity transformation coord are normalized into [0,1],
while in networks the intensity and identity transformation coord are normalized into [-1,1],
todo use the coordinate system consistent with mermaid [0,1]
"""
def __init__(self, img_sz=None, opt=None):
super(MermaidNet, self).__init__()
opt_mermaid = opt['tsk_set']['reg']['mermaid_net']
low_res_factor = opt['tsk_set']['reg'][('low_res_factor',1.,"factor of low-resolution map")]
batch_sz = opt['tsk_set']['batch_sz']
self.record_path = opt['tsk_set']['path']['record_path']
"""record path of the task"""
self.is_train = opt['tsk_set'][('train',False,'if is in train mode')]
"""if is in train mode"""
self.epoch = 0
"""the current epoch"""
self.using_physical_coord = opt_mermaid[('using_physical_coord',False,'use physical coordinate system')]
"""'use physical coordinate system"""
self.loss_type = opt['tsk_set']['loss'][('type','lncc',"the similarity measure type, support list: 'l1','mse','ncc','lncc'")]
"""the similarity measure supported by the mermaid: 'ssd','ncc','ncc_positive','ncc_negative', 'lncc', 'omt'"""
self.compute_inverse_map = opt['tsk_set']['reg'][('compute_inverse_map', False,"compute the inverse transformation map")]
"""compute the inverse transformation map"""
self.mermaid_net_json_pth = opt_mermaid[('mermaid_net_json_pth','',"the path for mermaid settings json")]
"""the path for mermaid settings json"""
self.sym_factor = opt_mermaid[('sym_factor',500,'factor on symmetric loss')]
"""factor on symmetric loss"""
self.epoch_activate_sym = opt_mermaid[('epoch_activate_sym',-1,'epoch activate the symmetric loss')]
"""epoch activate the symmetric loss"""
self.epoch_activate_multi_step = opt_mermaid[('epoch_activate_multi_step',-1,'epoch activate the multi-step')]
"""epoch activate the multi-step"""
self.reset_lr_for_multi_step = opt_mermaid[('reset_lr_for_multi_step',False,'if True, reset learning rate when multi-step begins')]
"""if True, reset learning rate when multi-step begins"""
self.lr_for_multi_step = opt_mermaid[('lr_for_multi_step',opt['tsk_set']['optim']['lr']/2,'if reset_lr_for_multi_step, reset learning rate when multi-step begins')]
"""if reset_lr_for_multi_step, reset learning rate when multi-step begins"""
self.multi_step = opt_mermaid[('num_step',2,'compute multi-step loss')]
"""compute multi-step loss"""
self.using_affine_init = opt_mermaid[('using_affine_init',True,'if ture, deploy an affine network before mermaid-net')]
"""if ture, deploy an affine network before mermaid-net"""
self.load_trained_affine_net = opt_mermaid[('load_trained_affine_net',True,'if true load_trained_affine_net; if false, the affine network is not initialized')]
"""if true load_trained_affine_net; if false, the affine network is not initialized"""
self.affine_init_path = opt_mermaid[('affine_init_path','',"the path of trained affined network")]
"""the path of trained affined network"""
self.affine_resoltuion = opt_mermaid[('affine_resoltuion',[-1,-1,-1],"the image resolution input for affine")]
self.affine_refine_step = opt_mermaid[('affine_refine_step', 5, "the multi-step num in affine refinement")]
"""the multi-step num in affine refinement"""
self.optimize_momentum_network = opt_mermaid[('optimize_momentum_network',True,'if true, optimize the momentum network')]
"""if true optimize the momentum network"""
self.epoch_list_fixed_momentum_network = opt_mermaid[('epoch_list_fixed_momentum_network',[-1],'list of epoch, fix the momentum network')]
"""list of epoch, fix the momentum network"""
self.epoch_list_fixed_deep_smoother_network = opt_mermaid[('epoch_list_fixed_deep_smoother_network',[-1],'epoch_list_fixed_deep_smoother_network')]
"""epoch_list_fixed_deep_smoother_network"""
self.clamp_momentum = opt_mermaid[('clamp_momentum',False,'clamp_momentum')]
"""if true, clamp_momentum"""
self.clamp_thre =opt_mermaid[('clamp_thre',1.0,'clamp momentum into [-clamp_thre, clamp_thre]')]
"""clamp momentum into [-clamp_thre, clamp_thre]"""
self.use_adaptive_smoother = False
self.print_loss_every_n_iter = 10 if self.is_train else 1
self.using_sym_on = True if self.is_train else False
if self.clamp_momentum:
print("Attention, the clamp momentum is on")
##### TODO the sigma also need to be set like sqrt(batch_sz) ##########
batch_sz = batch_sz if not self.using_sym_on else batch_sz*2
self.img_sz = [batch_sz, 1] + img_sz
self.affine_resoltuion = [batch_sz, 1]+ self.affine_resoltuion
self.dim = len(img_sz)
self.standard_spacing = 1. / (np.array(img_sz) - 1)
""" here we define the standard spacing measures the image coord from 0 to 1"""
spacing_to_refer = opt['dataset'][('spacing_to_refer',[1, 1, 1],'the physical spacing in numpy coordinate, only activate when using_physical_coord is true')]
self.spacing = normalize_spacing(spacing_to_refer, img_sz) if self.using_physical_coord else 1. / (
np.array(img_sz) - 1)
self.spacing = normalize_spacing(self.spacing, self.input_img_sz) if self.using_physical_coord else self.spacing
self.spacing = np.array(self.spacing) if type(self.spacing) is not np.ndarray else self.spacing
self.low_res_factor = low_res_factor
self.momentum_net = MomentumNet(low_res_factor,opt_mermaid)
if self.using_affine_init:
self.init_affine_net(opt)
else:
print("Attention, the affine net is not used")
self.mermaid_unit_st = None
self.init_mermaid_env()
self.print_count = 0
self.print_every_epoch_flag = True
self.n_batch = -1
self.inverse_map = None
def check_if_update_lr(self):
"""
check if the learning rate need to be updated, in mermaid net, it is implemented for adjusting the lr in the multi-step training
:return: if update the lr, return True and new lr, else return False and None
"""
if self.epoch == self.epoch_activate_multi_step and self.reset_lr_for_multi_step:
lr = self.lr_for_multi_step
self.reset_lr_for_multi_step = False
print("the lr is change into {} due to the activation of the multi-step".format(lr))
return True, lr
else:
return False, None
def init_affine_net(self,opt):
"""
initialize the affine network, if an affine_init_path is given , then load the affine model from the path.
:param opt: ParameterDict, task setting
:return:
"""
self.affine_net = AffineNetSym(self.img_sz[2:],opt)
self.affine_param = None
self.affine_net.compute_loss = False
self.affine_net.epoch_activate_sym = 1e7 # todo to fix this unatural setting
self.affine_net.set_step(self.affine_refine_step)
model_path = self.affine_init_path
if self.load_trained_affine_net and self.is_train:
checkpoint = torch.load(model_path, map_location='cpu')
self.affine_net.load_state_dict(checkpoint['state_dict'])
self.affine_net.cuda()
print("Affine model is initialized!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
else:
print("The Affine model is added, but not initialized, this should only take place when a complete checkpoint (including affine model) will be loaded")
self.affine_net.eval()
def set_cur_epoch(self,epoch=-1):
"""
set current epoch
:param epoch:
:return:
"""
if self.epoch !=epoch+1:
self.print_every_epoch_flag=True
self.epoch = epoch+1
def set_loss_fn(self, loss_fn):
"""
set loss function (disabled)
:param loss_fn:
:return:
"""
pass
def save_cur_mermaid_settings(self,params):
"""
save the mermaid settings into task record folder
:param params:
:return:
"""
saving_path = os.path.join(self.record_path,'nonp_setting.json')
params.write_JSON(saving_path, save_int=False)
params.write_JSON_comments(saving_path.replace('.json','_comment.json'))
def init_mermaid_env(self):
"""
setup the mermaid environment
* saving the settings into record folder
* initialize model from model, criterion and related variables
"""
spacing = self.spacing
params = pars.ParameterDict()
params.load_JSON( self.mermaid_net_json_pth) #''../easyreg/cur_settings_svf.json')
print(" The mermaid setting from {} included:".format(self.mermaid_net_json_pth))
print(params)
model_name = params['model']['registration_model']['type']
use_map = params['model']['deformation']['use_map']
compute_similarity_measure_at_low_res = params['model']['deformation'][
('compute_similarity_measure_at_low_res', False, 'to compute Sim at lower resolution')]
params['model']['registration_model']['similarity_measure']['type'] =self.loss_type
params.print_settings_off()
self.mermaid_low_res_factor = self.low_res_factor
smoother_type = params['model']['registration_model']['forward_model']['smoother']['type']
self.use_adaptive_smoother =smoother_type=='learned_multiGaussianCombination'
lowResSize = None
lowResSpacing = None
##
if self.mermaid_low_res_factor == 1.0 or self.mermaid_low_res_factor == [1., 1., 1.]:
self.mermaid_low_res_factor = None
self.lowResSize = self.img_sz
self.lowResSpacing = spacing
##
if self.mermaid_low_res_factor is not None:
lowResSize = get_res_size_from_size(self.img_sz, self.mermaid_low_res_factor)
lowResSpacing = get_res_spacing_from_spacing(spacing, self.img_sz, lowResSize)
self.lowResSize = lowResSize
self.lowResSpacing = lowResSpacing
if self.mermaid_low_res_factor is not None:
# computes model at a lower resolution than the image similarity
if compute_similarity_measure_at_low_res:
mf = py_mf.ModelFactory(lowResSize, lowResSpacing, lowResSize, lowResSpacing)
else:
mf = py_mf.ModelFactory(self.img_sz, spacing, lowResSize, lowResSpacing)
else:
# computes model and similarity at the same resolution
mf = py_mf.ModelFactory(self.img_sz, spacing, self.img_sz, spacing)
model, criterion = mf.create_registration_model(model_name, params['model'], compute_inverse_map=self.compute_inverse_map)
if use_map:
# create the identity map [0,1]^d, since we will use a map-based implementation
_id = py_utils.identity_map_multiN(self.img_sz, spacing)
self.identityMap = torch.from_numpy(_id).cuda()
if self.mermaid_low_res_factor is not None:
# create a lower resolution map for the computations
lowres_id = py_utils.identity_map_multiN(lowResSize, lowResSpacing)
self.lowResIdentityMap = torch.from_numpy(lowres_id).cuda()
resize_affine_input = all([sz != -1 for sz in self.affine_resoltuion[2:]])
if resize_affine_input:
self.affine_spacing = get_res_spacing_from_spacing(spacing, self.img_sz, self.affine_resoltuion)
affine_id = py_utils.identity_map_multiN(self.affine_resoltuion, self.affine_spacing)
self.affineIdentityMap = torch.from_numpy(affine_id).cuda()
self.lowRes_fn = partial(get_resampled_image, spacing=spacing, desiredSize=lowResSize, zero_boundary=False,identity_map=self.lowResIdentityMap)
self.mermaid_unit_st = model.cuda()
self.criterion = criterion
self.mermaid_unit_st.associate_parameters_with_module()
self.save_cur_mermaid_settings(params)
def get_loss(self):
"""
get the overall loss
:return:
"""
return self.overall_loss
def __cal_sym_loss(self,rec_phiWarped):
"""
compute the symmetric loss,
:math: `loss_{sym} = \|(\varphi^{s t})^{-1} \circ(\varphi^{t s})^{-1}-i d\|_{2}^{2}`
:param rec_phiWarped:the transformation map, including two direction ( s-t, t-s in batch dimension)
:return: mean(`loss_{sym}`)
"""
trans1 = STNFunction_ND_BCXYZ(self.spacing,zero_boundary=False)
trans2 = STNFunction_ND_BCXYZ(self.spacing,zero_boundary=False)
st_map = rec_phiWarped[:self.n_batch]
ts_map = rec_phiWarped[self.n_batch:]
identity_map = self.identityMap[0:self.n_batch]
trans_st = trans1(identity_map,st_map)
trans_st_ts = trans2(trans_st,ts_map)
return torch.mean((identity_map- trans_st_ts)**2)
def do_criterion_cal(self, ISource, ITarget,cur_epoch=-1):
"""
get the loss according to mermaid criterion
:param ISource: Source image with full size
:param ITarget: Target image with full size
:param cur_epoch: current epoch
:return: overall loss (include sim, reg and sym(optional)), similarity loss and the regularization loss
"""
# todo the image is not necessary be normalized to [0,1] here, just keep -1,1 would be fine
ISource = (ISource + 1.) / 2.
ITarget = (ITarget + 1.) / 2.
loss_overall_energy, sim_energy, reg_energy = self.criterion(self.identityMap, self.rec_phiWarped, ISource,
ITarget, self.low_moving,
self.mermaid_unit_st.get_variables_to_transfer_to_loss_function(),
None)
if not self.using_sym_on:
if self.print_count % self.print_loss_every_n_iter == 0 and cur_epoch>=0:
print('the loss_over_all:{} sim_energy:{}, reg_energy:{}'.format(loss_overall_energy.item(),
sim_energy.item(),
reg_energy.item()))
else:
sym_energy = self.__cal_sym_loss(self.rec_phiWarped)
sym_factor = self.sym_factor # min(sigmoid_explode(cur_epoch,static=1, k=8)*0.01*gl_sym_factor,1.*gl_sym_factor) #static=5, k=4)*0.01,1) static=10, k=10)*0.01
loss_overall_energy = loss_overall_energy + sym_factor * sym_energy
if self.print_count % self.print_loss_every_n_iter == 0 and cur_epoch >= 0:
print('the loss_over_all:{} sim_energy:{},sym_factor: {} sym_energy: {} reg_energy:{}'.format(
loss_overall_energy.item(),
sim_energy.item(),
sym_factor,
sym_energy.item(),
reg_energy.item()))
if self.step_loss is not None:
self.step_loss += loss_overall_energy
loss_overall_energy = self.step_loss
if self.cur_step<self.step-1:
self.print_count -= 1
self.print_count += 1
return loss_overall_energy, sim_energy, reg_energy
def set_mermaid_param(self,mermaid_unit,criterion, s, t, m,s_full=None):
"""
set variables need to be passed into mermaid model and mermaid criterion
:param mermaid_unit: model created by mermaid
:param criterion: criterion create by mermaid
:param s: source image (can be downsampled)
:param t: target image (can be downsampled)
:param m: momentum (can be downsampled)
:param s_full: full resolution image ( to get better sampling results)
:return:
"""
mermaid_unit.set_dictionary_to_pass_to_integrator({'I0': s, 'I1': t,'I0_full':s_full})
criterion.set_dictionary_to_pass_to_smoother({'I0': s, 'I1': t,'I0_full':s_full})
mermaid_unit.m = m
criterion.m = m
def __freeze_param(self,params):
"""
freeze the parameters during training
:param params: the parameters to be trained
:return:
"""
for param in params:
param.requires_grad = False
def __active_param(self,params):
"""
active the frozen parameters
:param params: the parameters to be activated
:return:
"""
for param in params:
param.requires_grad = True
def get_inverse_map(self,use_01=False):
"""
get the inverse map
:param use_01: if ture, get the map in [0,1] else in [-1,1]
:return: the inverse map
"""
if use_01 or self.inverse_map is None:
return self.inverse_map
else:
return self.inverse_map*2-1
def init_mermaid_param(self,s):
"""
initialize the mermaid parameters
:param s: source image taken as adaptive smoother input
:return:
"""
if self.use_adaptive_smoother:
if self.epoch in self.epoch_list_fixed_deep_smoother_network:
#self.mermaid_unit_st.smoother._enable_force_nn_gradients_to_zero_hooks()
self.__freeze_param(self.mermaid_unit_st.smoother.ws.parameters())
else:
self.__active_param(self.mermaid_unit_st.smoother.ws.parameters())
if self.mermaid_low_res_factor is not None:
if s.shape[0]==self.lowResIdentityMap.shape[0]:
low_s= get_resampled_image(s, self.spacing, self.lowResSize, 1, zero_boundary=True, identity_map=self.lowResIdentityMap)
else:
n_batch = s.shape[0]
lowResSize = self.lowResSize.copy()
lowResSize[0] = n_batch
low_s = get_resampled_image(s, self.spacing, lowResSize, 1, zero_boundary=True,
identity_map=self.lowResIdentityMap[0:n_batch])
return low_s
else:
return None
def do_mermaid_reg(self,mermaid_unit,criterion, s, t, m, phi,low_s=None,low_t=None,inv_map=None):
"""
perform mermaid registrtion unit
:param s: source image
:param t: target image
:param m: initial momentum
:param phi: initial deformation field
:param low_s: downsampled source
:param low_t: downsampled target
:param inv_map: inversed map
:return: warped image, transformation map
"""
if self.mermaid_low_res_factor is not None:
self.set_mermaid_param(mermaid_unit,criterion,low_s, low_t, m,s)
if not self.compute_inverse_map:
maps = mermaid_unit(self.lowRes_fn(phi), low_s, variables_from_optimizer={'epoch':self.epoch})
else:
maps, inverse_maps = mermaid_unit(self.lowRes_fn(phi), low_s,phi_inv=self.lowRes_fn(inv_map), variables_from_optimizer={'epoch':self.epoch})
desiredSz = self.img_sz
rec_phiWarped = get_resampled_image(maps, self.lowResSpacing, desiredSz, 1,zero_boundary=False,identity_map=self.identityMap)
if self.compute_inverse_map:
self.inverse_map = get_resampled_image(inverse_maps, self.lowResSpacing, desiredSz, 1,
zero_boundary=False,identity_map=self.identityMap)
else:
self.set_mermaid_param(mermaid_unit,criterion,s, t, m,s)
if not self.compute_inverse_map:
maps = mermaid_unit(phi, s, variables_from_optimizer={'epoch':self.epoch})
else:
maps, self.inverse_map = mermaid_unit(phi, s,phi_inv = inv_map, variables_from_optimizer = {'epoch': self.epoch})
rec_phiWarped = maps
rec_IWarped = py_utils.compute_warped_image_multiNC(s, rec_phiWarped, self.spacing, 1,zero_boundary=True)
self.rec_phiWarped = rec_phiWarped
return rec_IWarped, rec_phiWarped
def __get_momentum(self):
momentum = self.mermaid_unit_st.m[:self.n_batch]
return momentum
def __get_adaptive_smoother_map(self):
"""
get the adaptive smoother weight map from spatial-variant regualrizer model
supported weighting type 'sqrt_w_K_sqrt_w' and 'w_K_w'
for weighting type == 'w_k_w'
:math:`\sigma^{2}(x)=\sum_{i=0}^{N-1} w^2_{i}(x) \sigma_{i}^{2}`
for weighting type = 'sqrt_w_K_sqrt_w'
:math:`\sigma^{2}(x)=\sum_{i=0}^{N-1} w_{i}(x) \sigma_{i}^{2}`
:return: adapative smoother weight map `\sigma`
"""
adaptive_smoother_map = self.mermaid_unit_st.smoother.get_deep_smoother_weights()
weighting_type = self.mermaid_unit_st.smoother.weighting_type
if not self.using_sym_on:
adaptive_smoother_map = adaptive_smoother_map.detach()
else:
adaptive_smoother_map = adaptive_smoother_map[:self.n_batch].detach()
gaussian_weights = self.mermaid_unit_st.smoother.get_gaussian_weights()
gaussian_weights = gaussian_weights.detach()
print(" the current global gaussian weight is {}".format(gaussian_weights))
gaussian_stds = self.mermaid_unit_st.smoother.get_gaussian_stds()
gaussian_stds = gaussian_stds.detach()
print(" the current global gaussian stds is {}".format(gaussian_stds))
view_sz = [1] + [len(gaussian_stds)] + [1] * dim
gaussian_stds = gaussian_stds.view(*view_sz)
if weighting_type == 'w_K_w':
adaptive_smoother_map = adaptive_smoother_map**2 # todo add if judgement, this is true only when we use w_K_W
smoother_map = adaptive_smoother_map*(gaussian_stds**2)
smoother_map = torch.sqrt(torch.sum(smoother_map,1,keepdim=True))
#_,smoother_map = torch.max(adaptive_smoother_map.detach(),dim=1,keepdim=True)
self._display_stats(smoother_map.float(),'statistic for max_smoother map')
return smoother_map
def _display_stats(self, Ia, iname):
"""
statistic analysis on variable, print min, mean, max and std
:param Ia: the input variable
:param iname: variable name
:return:
"""
Ia_min = Ia.min().detach().cpu().numpy()
Ia_max = Ia.max().detach().cpu().numpy()
Ia_mean = Ia.mean().detach().cpu().numpy()
Ia_std = Ia.std().detach().cpu().numpy()
print('{}:after: [{:.2f},{:.2f},{:.2f}]({:.2f})'.format(iname, Ia_min,Ia_mean,Ia_max,Ia_std))
def get_extra_to_plot(self):
"""
plot extra image, i.e. the initial weight map of rdmm model
:return: extra image, name
"""
if self.use_adaptive_smoother:
# the last step adaptive smoother is returned, todo add the first stage smoother
return self.__get_adaptive_smoother_map(), 'Inital_weight'
else:
return self.__get_momentum(), "Momentum"
def __transfer_return_var(self,rec_IWarped,rec_phiWarped,affine_img):
"""
normalize the image into [0,1] while map into [-1,1]
:param rec_IWarped: warped image
:param rec_phiWarped: transformation map
:param affine_img: affine image
:return:
"""
return (rec_IWarped).detach(), (rec_phiWarped * 2. - 1.).detach(), ((affine_img+1.)/2.).detach()
def affine_forward(self,moving, target=None):
if self.using_affine_init:
with torch.no_grad():
toaffine_moving, toaffine_target = moving, target
resize_affine_input = all([sz != -1 for sz in self.affine_resoltuion[2:]])
if resize_affine_input:
toaffine_moving = get_resampled_image(toaffine_moving, self.spacing, self.affine_resoltuion, identity_map=self.affineIdentityMap)
toaffine_target = get_resampled_image(toaffine_target, self.spacing, self.affine_resoltuion, identity_map=self.affineIdentityMap)
affine_img, affine_map, affine_param = self.affine_net(toaffine_moving, toaffine_target)
self.affine_param = affine_param
affine_map = (affine_map + 1) / 2.
inverse_map = None
if self.compute_inverse_map:
inverse_map = self.affine_net.get_inverse_map(use_01=True)
if resize_affine_input:
affine_img = py_utils.compute_warped_image_multiNC(moving, affine_map, self.spacing, 1,
zero_boundary=True, use_01_input=True)
if self.using_physical_coord:
for i in range(self.dim):
affine_map[:, i] = affine_map[:, i] * self.spacing[i] / self.standard_spacing[i]
if self.compute_inverse_map:
for i in range(self.dim):
inverse_map[:, i] = inverse_map[:, i] * self.spacing[i] / self.standard_spacing[i]
self.inverse_map = inverse_map
else:
num_b = moving.shape[0]
affine_map = self.identityMap[:num_b].clone()
if self.compute_inverse_map:
self.inverse_map = self.identityMap[:num_b].clone()
affine_img = moving
return affine_img, affine_map
def single_forward(self, moving, target=None):
"""
single step mermaid registration
:param moving: moving image with intensity [-1,1]
:param target: target image with intensity [-1,1]
:return: warped image with intensity[0,1], transformation map [-1,1], affined image [0,1] (if no affine trans used, return moving)
"""
affine_img, affine_map = self.affine_forward(moving,target)
record_is_grad_enabled = torch.is_grad_enabled()
if not self.optimize_momentum_network or self.epoch in self.epoch_list_fixed_momentum_network:
torch.set_grad_enabled(False)
if self.print_every_epoch_flag:
if self.epoch in self.epoch_list_fixed_momentum_network:
print("In this epoch, the momentum network is fixed")
if self.epoch in self.epoch_list_fixed_deep_smoother_network:
print("In this epoch, the deep smoother deep network is fixed")
self.print_every_epoch_flag = False
input = torch.cat((affine_img, target), 1)
m = self.momentum_net(input)
if self.clamp_momentum:
m=m.clamp(max=self.clamp_thre,min=-self.clamp_thre)
moving = (moving + 1) / 2.
target = (target + 1) / 2.
self.low_moving = self.init_mermaid_param(moving)
self.low_target = self.init_mermaid_param(target)
torch.set_grad_enabled(record_is_grad_enabled)
rec_IWarped, rec_phiWarped = self.do_mermaid_reg(self.mermaid_unit_st,self.criterion,moving, target, m, affine_map,self.low_moving, self.low_target,self.inverse_map)
self.rec_phiWarped = rec_phiWarped
if self.using_physical_coord:
rec_phiWarped_tmp = rec_phiWarped.detach().clone()
for i in range(self.dim):
rec_phiWarped_tmp[:, i] = rec_phiWarped[:, i] * self.standard_spacing[i] / self.spacing[i]
rec_phiWarped = rec_phiWarped_tmp
self.overall_loss,_,_= self.do_criterion_cal(moving, target, cur_epoch=self.epoch)
return self.__transfer_return_var(rec_IWarped, rec_phiWarped, affine_img)
def sym_forward(self, moving, target=None):
"""
symmetric single step mermaid registration
the "source" is concatenated by source and target, the "target" is concatenated by target and source
then the single_forward is called
:param moving: moving image with intensity [-1,1]
:param target: target image with intensity [-1,1]
:return: warped image with intensity[0,1], transformation map [-1,1], affined image [0,1] (if no affine trans used, return moving)
"""
self.n_batch = moving.shape[0]
moving_sym = torch.cat((moving, target), 0)
target_sym = torch.cat((target, moving), 0)
rec_IWarped_st, rec_phiWarped_st, affine_img_st = self.single_forward(moving_sym, target_sym)
return rec_IWarped_st[:self.n_batch],rec_phiWarped_st[:self.n_batch], affine_img_st[:self.n_batch]
def mutli_step_forward(self, moving,target=None):
"""
mutli-step mermaid registration
:param moving: moving image with intensity [-1,1]
:param target: target image with intensity [-1,1]
:return: warped image with intensity[0,1], transformation map [-1,1], affined image [0,1] (if no affine trans used, return moving)
"""
self.step_loss = None
affine_img, affine_map = self.affine_forward(moving,target)
warped_img = affine_img
init_map = affine_map
rec_IWarped = None
rec_phiWarped = None
moving_n = (moving + 1) / 2. # [-1,1] ->[0,1]
target_n = (target + 1) / 2. # [-1,1] ->[0,1]
self.low_moving = self.init_mermaid_param(moving_n)
self.low_target = self.init_mermaid_param(target_n)
for i in range(self.step):
self.cur_step = i
record_is_grad_enabled = torch.is_grad_enabled()
if not self.optimize_momentum_network or self.epoch in self.epoch_list_fixed_momentum_network:
torch.set_grad_enabled(False)
if self.print_every_epoch_flag:
if self.epoch in self.epoch_list_fixed_momentum_network:
print("In this epoch, the momentum network is fixed")
if self.epoch in self.epoch_list_fixed_deep_smoother_network:
print("In this epoch, the deep regularizer network is fixed")
self.print_every_epoch_flag = False
input = torch.cat((warped_img, target), 1)
m = self.momentum_net(input)
if self.clamp_momentum:
m=m.clamp(max=self.clamp_thre,min=-self.clamp_thre)
torch.set_grad_enabled(record_is_grad_enabled)
rec_IWarped, rec_phiWarped = self.do_mermaid_reg(self.mermaid_unit_st,self.criterion,moving_n, target_n, m, init_map,self.low_moving, self.low_target, self.inverse_map)
warped_img = rec_IWarped * 2 - 1 # [0,1] -> [-1,1]
init_map = rec_phiWarped # [0,1]
self.rec_phiWarped = rec_phiWarped
if i < self.step - 1:
self.step_loss, _, _ = self.do_criterion_cal(moving, target, self.epoch)
if self.using_physical_coord:
rec_phiWarped_tmp = rec_phiWarped.detach().clone()
for i in range(self.dim):
rec_phiWarped_tmp[:, i] = rec_phiWarped[:, i] * self.standard_spacing[i] / self.spacing[i]
rec_phiWarped = rec_phiWarped_tmp
self.overall_loss,_,_= self.do_criterion_cal(moving, target, cur_epoch=self.epoch)
return self.__transfer_return_var(rec_IWarped, rec_phiWarped, affine_img)
def mutli_step_sym_forward(self,moving, target= None):
"""
symmetric multi-step mermaid registration
the "source" is concatenated by source and target, the "target" is concatenated by target and source
then the multi-step forward is called
:param moving: moving image with intensity [-1,1]
:param target: target image with intensity [-1,1]
:return: warped image with intensity[0,1], transformation map [-1,1], affined image [0,1] (if no affine trans used, return moving)
"""
moving_sym = torch.cat((moving, target), 0)
target_sym = torch.cat((target, moving), 0)
rec_IWarped, rec_phiWarped, affine_img = self.mutli_step_forward(moving_sym, target_sym)
return rec_IWarped[:self.n_batch], rec_phiWarped[:self.n_batch], affine_img[:self.n_batch]
def get_affine_map(self,moving, target):
"""
compute affine map from the affine registration network
:param moving: moving image [-1, 1]
:param target: target image [-1, 1]
:return: affined image [-1,1]
"""
with torch.no_grad():
affine_img, affine_map, _ = self.affine_net(moving, target)
return affine_map
def get_step_config(self):
"""
check if the multi-step, symmetric forward shoud be activated
:return:
"""
if self.is_train:
self.step = self.multi_step if self.epoch > self.epoch_activate_multi_step else 1
self.using_sym_on = True if self.epoch> self.epoch_activate_sym else False
else:
self.step = self.multi_step
self.using_sym_on = False
def forward(self, moving, target, moving_mask=None, target_mask=None):
"""
forward the mermaid registration model
:param moving: moving image intensity normalized in [-1,1]
:param target: target image intensity normalized in [-1,1]
:return: warped image with intensity[0,1], transformation map [-1,1], affined image [0,1] (if no affine trans used, return moving)
"""
self.get_step_config()
self.n_batch = moving.shape[0]
if self.using_sym_on:
if not self.print_count:
print(" The mermaid network is in multi-step and symmetric mode, with step {}".format(self.step))
return self.mutli_step_sym_forward(moving,target)
else:
if not self.print_count:
print(" The mermaid network is in multi-step mode, with step {}".format(self.step))
return self.mutli_step_forward(moving, target)
|
1692964
|
from direct.directnotify import DirectNotifyGlobal
from pandac.PandaModules import ModelNode, LODNode, NodePath
from pandac.PandaModules import TextureStage
from pandac.PandaModules import AnimControlCollection, Character, PartSubset
from pirates.ship import ShipGlobals
from pirates.ship import ShipBlueprints
MastSubset = PartSubset()
MastSubset.addExcludeJoint('def_sail*')
MastSubset.addExcludeJoint('def_ladder*')
SailSubset = PartSubset()
SailSubset.addExcludeJoint('transform')
SailSubset.addExcludeJoint('def_mast*')
SailSubset.addExcludeJoint('def_ladder*')
SailSubset.addIncludeJoint('def_sail*')
HitMastSubset = PartSubset()
HitMastSubset.addIncludeJoint('transform')
HitMastSubset.addIncludeJoint('def_ladder*')
HitMastSubset.addIncludeJoint('def_mast*')
HitMastSubset.addExcludeJoint('def_sail*')
MissingAnims = {ShipGlobals.SKEL_WARSHIPL3: ('tiedup', 'rolldown', 'rollup'),ShipGlobals.SKEL_INTERCEPTORL3: ('tiedup', 'rolldown', 'rollup')}
SailReplace = {ShipGlobals.QUEEN_ANNES_REVENGE: 0}
class ShipFactory():
notify = DirectNotifyGlobal.directNotify.newCategory('ShipFactory')
def __init__(self, phasedLoading=False):
self.wantProws = config.GetBool('want-sprits', 0)
self.hulls = {}
self.texInfo = ({}, {}, {})
self.models = {}
self.mastSets = {}
ShipBlueprints.setupWheel()
ShipBlueprints.setupShipTextures()
self.preprocessMast(ShipGlobals.Masts.Main_Tri)
self.preprocessMast(ShipGlobals.Masts.Fore_Tri)
self.preprocessHull(ShipGlobals.INTERCEPTORL1)
self.preprocessMast(ShipGlobals.Masts.Skel_Main_A)
self.preprocessMast(ShipGlobals.Masts.Skel_Main_B)
self.preprocessMast(ShipGlobals.Masts.Skel_Tri)
self.preprocessMast(ShipGlobals.Masts.Skel_Fore)
self.preprocessMast(ShipGlobals.Masts.Skel_Aft)
self.preprocessHull(ShipGlobals.SKEL_INTERCEPTORL3)
self.preprocessHull(ShipGlobals.SKEL_WARSHIPL3)
if not phasedLoading:
self.handlePhase4()
self.handlePhase5()
self.baseLayer = TextureStage('base')
self.colorLayer = TextureStage('color')
self.logoLayer = TextureStage('logo')
self.logoLayerNoColor = TextureStage('logoNoColor')
self.logoLayerNoColorInv = TextureStage('logoNoColorInverse')
self.logoLayerInv = TextureStage('logoInverse')
self.vertLayer = TextureStage('vertex')
self.colorLayer.setSort(1)
self.colorLayer.setCombineRgb(TextureStage.CMReplace, TextureStage.CSTexture, TextureStage.COSrcColor)
self.colorLayer.setCombineAlpha(TextureStage.CMReplace, TextureStage.CSTexture, TextureStage.COSrcAlpha)
self.colorLayer.setTexcoordName('uvColor')
self.logoLayer.setSort(2)
self.logoLayer.setCombineRgb(TextureStage.CMInterpolate, TextureStage.CSTexture, TextureStage.COSrcColor, TextureStage.CSPrevious, TextureStage.COSrcColor, TextureStage.CSTexture, TextureStage.COSrcAlpha)
self.logoLayer.setCombineAlpha(TextureStage.CMReplace, TextureStage.CSPrevious, TextureStage.COSrcAlpha)
self.logoLayer.setTexcoordName('uvLogo')
self.logoLayerInv.setSort(2)
self.logoLayerInv.setCombineRgb(TextureStage.CMInterpolate, TextureStage.CSTexture, TextureStage.COOneMinusSrcColor, TextureStage.CSPrevious, TextureStage.COSrcColor, TextureStage.CSTexture, TextureStage.COSrcAlpha)
self.logoLayerInv.setCombineAlpha(TextureStage.CMReplace, TextureStage.CSPrevious, TextureStage.COSrcAlpha)
self.logoLayerInv.setTexcoordName('uvLogo')
self.logoLayerNoColor.setSort(2)
self.logoLayerNoColor.setCombineRgb(TextureStage.CMInterpolate, TextureStage.CSTexture, TextureStage.COSrcColor, TextureStage.CSConstant, TextureStage.COSrcColor, TextureStage.CSTexture, TextureStage.COSrcAlpha)
self.logoLayerNoColor.setCombineAlpha(TextureStage.CMReplace, TextureStage.CSPrevious, TextureStage.COSrcAlpha)
self.logoLayerNoColor.setTexcoordName('uvLogo')
self.logoLayerNoColor.setColor((1, 1, 1, 1))
self.logoLayerNoColorInv.setSort(2)
self.logoLayerNoColorInv.setCombineRgb(TextureStage.CMInterpolate, TextureStage.CSTexture, TextureStage.COOneMinusSrcColor, TextureStage.CSConstant, TextureStage.COSrcColor, TextureStage.CSTexture, TextureStage.COSrcAlpha)
self.logoLayerNoColorInv.setCombineAlpha(TextureStage.CMReplace, TextureStage.CSPrevious, TextureStage.COSrcAlpha)
self.logoLayerNoColorInv.setTexcoordName('uvLogo')
self.logoLayerNoColorInv.setColor((1, 1, 1, 1))
self.vertLayer.setSort(3)
self.vertLayer.setCombineRgb(TextureStage.CMModulate, TextureStage.CSPrevious, TextureStage.COSrcColor, TextureStage.CSPrimaryColor, TextureStage.COSrcColor)
self.vertLayer.setCombineAlpha(TextureStage.CMReplace, TextureStage.CSPrimaryColor, TextureStage.COSrcAlpha)
self.baseLayer.setSort(4)
self.baseLayer.setCombineRgb(TextureStage.CMModulate, TextureStage.CSTexture, TextureStage.COSrcColor, TextureStage.CSPrevious, TextureStage.COSrcColor)
self.baseLayer.setCombineAlpha(TextureStage.CMModulate, TextureStage.CSTexture, TextureStage.COSrcAlpha, TextureStage.CSPrevious, TextureStage.COSrcAlpha)
def handlePhase4(self):
self.preprocessHull(ShipGlobals.INTERCEPTORL2)
self.preprocessHull(ShipGlobals.INTERCEPTORL3)
self.preprocessMast(ShipGlobals.Masts.Main_Square)
self.preprocessMast(ShipGlobals.Masts.Aft_Tri)
self.preprocessMast(ShipGlobals.Masts.Fore_Multi)
self.preprocessHull(ShipGlobals.WARSHIPL1)
self.preprocessHull(ShipGlobals.WARSHIPL2)
self.preprocessHull(ShipGlobals.WARSHIPL3)
self.preprocessHull(ShipGlobals.MERCHANTL1)
self.preprocessHull(ShipGlobals.MERCHANTL2)
self.preprocessHull(ShipGlobals.MERCHANTL3)
self.preprocessHull(ShipGlobals.BRIGL1)
self.preprocessHull(ShipGlobals.BRIGL2)
self.preprocessHull(ShipGlobals.BRIGL3)
self.preprocessHull(ShipGlobals.QUEEN_ANNES_REVENGE)
self.sprits = ShipBlueprints.preprocessSprits()
def handlePhase5(self):
self.preprocessHull(ShipGlobals.BLACK_PEARL)
self.preprocessHull(ShipGlobals.GOLIATH)
self.preprocessHull(ShipGlobals.SHIP_OF_THE_LINE)
self.preprocessHull(ShipGlobals.EL_PATRONS_SHIP)
def handlePhase6(self):
self.preprocessHull(ShipGlobals.CARRACKL1)
self.preprocessHull(ShipGlobals.CARRACKL2)
self.preprocessHull(ShipGlobals.CARRACKL3)
def preprocessMast(self, mastClass):
self.mastSets[mastClass] = ShipBlueprints.generateMastCache(mastClass)
def preprocessHull(self, modelClass):
self.hulls[modelClass] = ShipBlueprints.generateHullCache(modelClass)
def getHull(self, modelClass, custom):
return self.hulls[modelClass].getHullAsset(custom)
def getShip(self, shipClass, style=ShipGlobals.Styles.Undefined, logo=ShipGlobals.Logos.Undefined, hullDesign=None, detailLevel=2, wantWheel=True, hullMaterial=None, sailMaterial=None, sailPattern=None, prowType=None, invertLogo=False):
from pirates.ship import Ship
modelClass = ShipGlobals.getModelClass(shipClass)
shipConfig = ShipGlobals.getShipConfig(shipClass)
if style == ShipGlobals.Styles.Undefined:
style = shipConfig['defaultStyle']
complexCustomization = 0
if sailPattern or sailMaterial or hullMaterial or SailReplace.has_key(shipClass):
complexCustomization = 1
if not prowType:
prowType = shipConfig['prow']
if not hullMaterial:
hullMaterial = style
if not sailMaterial:
if SailReplace.has_key(shipClass):
sailMaterial = SailReplace[shipClass]
else:
sailMaterial = style
if not sailPattern:
sailPattern = style
shipHullTexture = ShipBlueprints.getShipTexture(hullMaterial)
shipTextureSail = ShipBlueprints.getShipTexture(sailMaterial)
logoTex = None
if logo:
logoTex = ShipBlueprints.getLogoTexture(logo)
sailPatternTex = None
if sailPattern:
sailPatternTex = ShipBlueprints.getSailTexture(sailPattern)
self.notify.debug('%s %s' % (sailPattern, logo))
if logo == ShipGlobals.Logos.Undefined:
logo = shipConfig['sailLogo']
if logo in ShipGlobals.MAST_LOGO_PLACEMENT_LIST:
placeLogos = 1
else:
placeLogos = 0
if modelClass <= ShipGlobals.INTERCEPTORL3:
mastHax = True
else:
mastHax = False
customHull = hullDesign is not None
customMasts = logo != 0 or sailPattern != 0
hull = self.getHull(modelClass, customHull)
breakAnims = {}
metaAnims = {}
hitAnims = {}
root = NodePath('Ship')
hull.locators.reparentTo(root)
charRoot = root.attachNewNode(Character('ShipChar'))
collisions = root.attachNewNode('collisions')
lodNode = charRoot.attachNewNode(LODNode('lod'))
if detailLevel == 0:
lodNode.node().addSwitch(200, 0)
lodNode.node().addSwitch(800, 200)
lodNode.node().addSwitch(100000, 800)
high = lodNode.attachNewNode('high')
low = lodNode.attachNewNode('low')
med = NodePath('med')
superlow = lodNode.attachNewNode('superlow')
else:
if detailLevel == 1:
lodNode.node().addSwitch(300, 0)
lodNode.node().addSwitch(1000, 300)
lodNode.node().addSwitch(2000, 1000)
lodNode.node().addSwitch(100000, 2000)
high = lodNode.attachNewNode('high')
med = lodNode.attachNewNode('med')
low = lodNode.attachNewNode('low')
superlow = lodNode.attachNewNode('superlow')
else:
lodNode.node().addSwitch(750, 0)
lodNode.node().addSwitch(3000, 750)
lodNode.node().addSwitch(8000, 3000)
lodNode.node().addSwitch(100000, 8000)
high = lodNode.attachNewNode('high')
med = lodNode.attachNewNode('med')
low = lodNode.attachNewNode('low')
superlow = lodNode.attachNewNode('superlow')
mastSetup = ShipGlobals.getMastSetup(shipClass)
for data in [(0, 'location_mainmast_0'), (1, 'location_mainmast_1'), (2, 'location_mainmast_2'), (3, 'location_aftmast*'), (4, 'location_foremast*')]:
mastData = mastSetup.get(data[0])
if mastData:
mast = self.mastSets[mastData[0]].getMastSet(mastData[1] - 1, customMasts)
mastRoot = hull.locators.find('**/%s' % data[1]).getTransform(hull.locators)
model = NodePath(mast.charRoot)
model.setTransform(mastRoot)
if complexCustomization:
model.setTexture(shipTextureSail)
useLogoTex = logoTex
if placeLogos:
mastNum = data[0]
if mastNum not in ShipGlobals.MAST_LOGO_PLACEMENT.get(modelClass):
useLogoTex = None
charBundle = mast.charRoot.getBundle(0)
if data[0] < 3:
for side in ['left', 'right']:
ropeNode = hull.locators.find('**/location_ropeLadder_%s_%s' % (side, data[0]))
if ropeNode:
transform = ropeNode.getTransform(NodePath(mast.charRoot))
charBundle.findChild('def_ladder_0_%s' % side).applyFreeze(transform)
if sailPatternTex and useLogoTex:
for node in model.findAllMatches('**/sails'):
node.setTextureOff(TextureStage.getDefault())
node.setTexture(self.colorLayer, sailPatternTex)
if invertLogo:
node.setTexture(self.logoLayerInv, logoTex)
else:
node.setTexture(self.logoLayer, logoTex)
node.setTexture(self.vertLayer, shipTextureSail)
node.setTexture(self.baseLayer, shipTextureSail)
else:
if sailPatternTex:
for node in model.findAllMatches('**/sails'):
node.setTextureOff(TextureStage.getDefault())
node.setTexture(self.colorLayer, sailPatternTex)
node.setTexture(self.vertLayer, shipTextureSail)
node.setTexture(self.baseLayer, shipTextureSail)
elif useLogoTex:
for node in model.findAllMatches('**/sails'):
node.setTextureOff(TextureStage.getDefault())
if invertLogo:
node.setTexture(self.logoLayerNoColorInv, logoTex)
else:
node.setTexture(self.logoLayerNoColor, logoTex)
node.setTexture(self.vertLayer, shipTextureSail)
node.setTexture(self.baseLayer, shipTextureSail)
model.flattenLight()
if detailLevel == 0:
model.find('**/low').copyTo(high)
model.find('**/low').copyTo(low)
model.find('**/superlow').copyTo(superlow)
else:
if detailLevel == 1:
model.find('**/med').copyTo(high)
model.find('**/med').copyTo(med)
low.node().stealChildren(model.find('**/low').node())
superlow.node().stealChildren(model.find('**/superlow').node())
elif detailLevel == 2:
high.node().stealChildren(model.find('**/high').node())
med.node().stealChildren(model.find('**/med').node())
low.node().stealChildren(model.find('**/low').node())
superlow.node().stealChildren(model.find('**/superlow').node())
mastRoot = mast.collisions.find('**/collision_masts')
if modelClass > ShipGlobals.INTERCEPTORL3 or data[0] != 3:
mastCode = str(data[0])
mastRoot.setTag('Mast Code', mastCode)
else:
mastRoot.setName('colldision_sub_mast')
mastRoot.reparentTo(collisions.find('**/collision_masts'))
mastCode = '0'
for coll in mast.collisions.findAllMatches('**/collision_sail_*'):
coll.setName('Sail-%s' % data[0])
coll.setTag('Mast Code', mastCode)
for coll in mast.collisions.findAllMatches('**/sail_*'):
coll.setName('Sail-%s' % data[0])
coll.setTag('Mast Code', mastCode)
collisions.node().stealChildren(mast.collisions.node())
charBundle = mast.charRoot.getBundle(0)
if mastHax and data[0] == 3:
breakAnims[0][0].storeAnim(charBundle.loadBindAnim(loader.loader, mast.breakAnim[0], -1, MastSubset, True), '1')
breakAnims[0][1].storeAnim(charBundle.loadBindAnim(loader.loader, mast.breakAnim[1], -1, MastSubset, True), '1')
tempHit = hitAnims[0]
tempHit[0].storeAnim(charBundle.loadBindAnim(loader.loader, mast.hitAnim, -1, HitMastSubset, True), '1')
tempHit[1].storeAnim(charBundle.loadBindAnim(loader.loader, mast.hitAnim, -1, PartSubset(), True), '1')
else:
breakAnims[data[0]] = (
AnimControlCollection(), AnimControlCollection())
breakAnims[data[0]][0].storeAnim(charBundle.loadBindAnim(loader.loader, mast.breakAnim[0], -1, MastSubset, True), '0')
breakAnims[data[0]][1].storeAnim(charBundle.loadBindAnim(loader.loader, mast.breakAnim[1], -1, MastSubset, True), '0')
tempHit = [AnimControlCollection(), AnimControlCollection()]
tempHit[0].storeAnim(charBundle.loadBindAnim(loader.loader, mast.hitAnim, -1, HitMastSubset, True), '0')
tempHit[1].storeAnim(charBundle.loadBindAnim(loader.loader, mast.hitAnim, -1, PartSubset(), True), '0')
hitAnims[data[0]] = tempHit
for anim, fileName in mast.metaAnims.iteritems():
if anim not in metaAnims:
metaAnims[anim] = AnimControlCollection()
if anim not in MissingAnims.get(modelClass, []):
ac = charBundle.loadBindAnim(loader.loader, fileName, -1, SailSubset, True)
if ac:
metaAnims[anim].storeAnim(ac, str(metaAnims[anim].getNumAnims()))
charRoot.node().combineWith(mast.charRoot)
if self.wantProws and prowType:
highSprit, medSprit, lowSprit = self.sprits[prowType].getAsset()
transform = hull.locators.find('**/location_bowsprit').getTransform(hull.locators)
highSprit.setTransform(transform)
medSprit.setTransform(transform)
lowSprit.setTransform(transform)
highSprit.reparentTo(hull.geoms[0])
medSprit.reparentTo(hull.geoms[1])
lowSprit.reparentTo(hull.geoms[2])
if wantWheel:
shipWheel = ShipBlueprints.getWheel()
wheelPoint = hull.locators.find('**/location_wheel;+s').getTransform(hull.locators)
shipWheel.setTransform(wheelPoint)
shipWheel.flattenLight()
shipWheel.find('**/collisions').copyTo(collisions)
hull.geoms[0].node().stealChildren(shipWheel.find('**/high').node())
hull.geoms[1].node().stealChildren(shipWheel.find('**/med').node())
hull.geoms[2].node().stealChildren(shipWheel.find('**/low').node())
if complexCustomization:
hull.geoms[0].setTexture(shipHullTexture)
hull.geoms[0].flattenLight()
hull.geoms[1].setTexture(shipHullTexture)
hull.geoms[1].flattenLight()
hull.geoms[2].setTexture(shipHullTexture)
hull.geoms[2].flattenLight()
hull.geoms[3].setTexture(shipHullTexture)
hull.geoms[3].flattenLight()
high.attachNewNode(ModelNode('non-animated')).node().stealChildren(hull.geoms[0].node())
med.attachNewNode(ModelNode('non-animated')).node().stealChildren(hull.geoms[1].node())
low.attachNewNode(ModelNode('non-animated')).node().stealChildren(hull.geoms[2].node())
superlow.attachNewNode(ModelNode('non-animated')).node().stealChildren(hull.geoms[3].node())
collisions.node().stealChildren(hull.collisions.node())
hull.locators.stash()
charRoot.flattenStrong()
ship = Ship.Ship(shipClass, root, breakAnims, hitAnims, metaAnims, collisions, hull.locators)
if not complexCustomization:
ship.char.setTexture(shipHullTexture)
return ship
def getAIShip(self, shipClass):
from pirates.ship import ShipAI
modelClass = ShipGlobals.getModelClass(shipClass)
hull = self.getHull(modelClass, 0)
root = NodePath('Ship')
collisions = root.attachNewNode('collisions')
mastSetup = ShipGlobals.getMastSetup(shipClass)
for data in [(0, 'location_mainmast_0'), (1, 'location_mainmast_1'), (2, 'location_mainmast_2'), (3, 'location_aftmast*'), (4, 'location_foremast*')]:
mastData = mastSetup.get(data[0])
if mastData:
mast = self.mastSets[mastData[0]].getMastSet(mastData[1] - 1)
model = NodePath(mast.charRoot)
model.setPos(hull.locators.find('**/%s' % data[1]).getPos(hull.locators))
model.setHpr(hull.locators.find('**/%s' % data[1]).getHpr(hull.locators))
model.setScale(hull.locators.find('**/%s' % data[1]).getScale(hull.locators))
if modelClass > ShipGlobals.INTERCEPTORL3 or data[0] != 3:
mastCode = str(data[0])
else:
mastCode = '0'
mast.collisions.find('**/collision_masts').setTag('Mast Code', mastCode)
collisions.node().stealChildren(mast.collisions.node())
collisions.node().stealChildren(hull.collisions.node())
hull.locators.reparentTo(root)
ship = ShipAI.ShipAI(root, collisions, hull.locators)
ship.modelRoot.setTag('Mast Code', str(255))
ship.modelRoot.setTag('Hull Code', str(255))
return ship
|
1692996
|
from relogic.logickit.scorer.scorer import Scorer
from relogic.logickit.utils.utils import softmax, sigmoid
import torch.nn.functional as F
import torch
from tqdm import tqdm
import os
import subprocess
import json
class RecallScorer(Scorer):
def __init__(self, label_mapping, topk, correct_label='1', dump_to_file=None):
super(RecallScorer, self).__init__()
self.label_mapping = label_mapping
self._inv_label_mapping = {v: k for k, v in label_mapping.items()}
self._examples = []
self._preds = []
self.topk = topk
self.correct_label = correct_label
if dump_to_file:
self.dump_to_file_path = os.path.join(dump_to_file["output_dir"], dump_to_file["task_name"] + "_dump.json")
def update(self, mbs, predictions, loss, extra_args):
super(RecallScorer, self).update(mbs, predictions, loss, extra_args)
for example, preds in zip(mbs.examples, predictions):
self._examples.append(example)
self._preds.append(preds)
def get_loss(self):
return 0
def _get_results(self):
if self.dump_to_file_path:
self.dump_to_file_handler = open(self.dump_to_file_path, 'w')
self._n_hit_left, self._n_hit_right, self._n_total_left, self._n_total_right = 0, 0, 0, 0
pred_collection = [{}, {}] # forward direction and backward direction
gold_collection = [{}, {}]
for example, preds in zip(self._examples, self._preds):
prob = preds[self.label_mapping[self.correct_label]].item()
query_id, candidate_id, direction = example.guid.split('-')
direction = int(direction)
if direction == 1:
query_id, candidate_id = candidate_id, query_id
if self.dump_to_file_handler:
self.dump_to_file_handler.write("{} {} {} {}\n".format(query_id, candidate_id, direction, prob))
if query_id not in pred_collection[direction]:
pred_collection[direction][query_id] = []
pred_collection[direction][query_id].append((candidate_id, prob))
# if example.label == self.correct_label:
# gold_collection[direction][query_id] = candidate_id
gold_query_id, gold_candidate_id, gold_direction = example.gold_pair.split('-')
gold_direction = int(gold_direction)
if gold_direction == 1:
gold_query_id, gold_candidate_id = gold_candidate_id, gold_query_id
gold_collection[gold_direction][gold_query_id] = gold_candidate_id
if len(pred_collection[0]) != len(gold_collection[0]) or len(pred_collection[1]) != len(gold_collection[1]):
raise ValueError("The query size in pred collectdion {}|{} is different from gold collection {}|{}".format(
len(pred_collection[0]), len(pred_collection[1]), len(gold_collection[0]), len(gold_collection[1])))
for d in range(2):
for query_id in pred_collection[d]:
sorted_list = sorted(pred_collection[d][query_id], key=lambda x: x[1], reverse=True)
candidate_ids = [x[0] for x in sorted_list][:self.topk]
if gold_collection[d][query_id] in candidate_ids:
if d == 0:
self._n_hit_left += 1
else:
self._n_hit_right += 1
self._n_total_left = len(gold_collection[0])
self._n_total_right = len(gold_collection[1])
if self.dump_to_file_path:
self.dump_to_file_handler.close()
return [("hits_left", self._n_hit_left),
("hits_right", self._n_hit_right),
("total_left", self._n_total_left),
("total_right", self._n_total_right),
("recall_left", self._n_hit_left / self._n_total_left),
("recall_right", self._n_hit_right / self._n_total_right)]
class CartesianMatchingRecallScorer(Scorer):
def __init__(self, topk, qrels_file_path, dump_to_file=None):
super(CartesianMatchingRecallScorer, self).__init__()
self.topk = topk
self.qrels_file_path = qrels_file_path
if dump_to_file:
self.dump_to_file_path = os.path.join(dump_to_file["output_dir"], dump_to_file["task_name"] + "_dump.json")
self.dump_to_file_handler = open(self.dump_to_file_path, 'w')
else:
print("You need to specify the dump_to_file path for the pair matching task")
exit()
def update(self, mbs, reprs, loss, extra_args):
super(CartesianMatchingRecallScorer, self).update(mbs, reprs, loss, extra_args)
for example, repr in zip(mbs.examples, reprs):
self.dump_to_file_handler.write(json.dumps({
"guid": example.guid,
"feature": " ".join([str(f) for f in repr.cpu().data.numpy()])}) + "\n")
def get_loss(self):
return 0
def _get_results(self):
self.dump_to_file_handler.close()
dir = os.path.abspath(os.path.dirname(__file__))
recall_eval_path = os.path.join(dir, '..', '..', '..', 'evals', 'pair_matching', 'entity_align_eval.py')
eval_out = subprocess.check_output(["python", recall_eval_path, "-e", self.dump_to_file_path, "-g", self.qrels_file_path])
eval_out_lines = str(eval_out, 'utf-8').split('\n')
results = []
for line in eval_out_lines:
if line.startswith("Hits@1:"):
score = float(line.strip().split(" ")[1].strip("%"))
results.append(score)
return [("recall_left", results[0]),
("recall_right", results[1])]
# class CartesianMatchingRecallScorer(Scorer):
# def __init__(self, topk, dump_to_file=None):
# super(CartesianMatchingRecallScorer, self).__init__()
# self.topk = topk
# self.left = {}
# self.right = {}
# self.left_to_right_gold = {}
# self.right_to_left_gold = {}
# if dump_to_file:
# self.dump_to_file_path = os.path.join(dump_to_file["output_dir"], dump_to_file["task_name"] + "_dump.json")
#
#
# def update(self, mbs, reprs, loss, extra_args):
# super(CartesianMatchingRecallScorer, self).update(mbs, reprs, loss, extra_args)
# for example, repr in zip(mbs.examples, reprs):
# left_id, right_id, direc = example.guid.split('-')
# if direc == "0":
# self.left[left_id] = repr
# else:
# self.right[right_id] = repr
# left_id, right_id, direc = example.gold_pair.split('-')
# if direc == "0":
# self.left_to_right_gold[left_id] = right_id
# else:
# self.right_to_left_gold[right_id] = left_id
#
# def get_loss(self):
# return 0
#
# def _get_results(self):
# if self.dump_to_file_path:
# self.dump_to_file_handler = open(self.dump_to_file_path, 'w')
#
# self._n_hit_left, self._n_hit_right, self._n_total_left, self._n_total_right = 0, 0, 0, 0
# if self.dump_to_file_path:
# for key in self.left:
# self.dump_to_file_handler.write("{}\t".format(key) + " ".join([str(f) for f in self.left[key].cpu().data.numpy()]) + '\n')
# for key in self.right:
# self.dump_to_file_handler.write("{}\t".format(key) + " ".join([str(f) for f in self.right[key].cpu().data.numpy()]) + '\n')
# left_to_right_distance = {}
# right_to_left_distance = {}
# print("Evaluating left to right")
# for left_id in self.left.keys():
# left_to_right_distance[left_id] = {}
# for right_id in self.right.keys():
# left_to_right_distance[left_id][right_id] = torch.sum(torch.abs(self.left[left_id] - self.right[right_id])).item()
# print("Evaluating right to left")
# for right_id in self.right.keys():
# right_to_left_distance[right_id] = {}
# for left_id in self.left.keys():
# right_to_left_distance[right_id][left_id] = torch.sum(torch.abs(self.right[right_id] - self.left[left_id])).item()
#
# for left_id in self.left.keys():
# sorted_list = sorted(left_to_right_distance[left_id].items(), key=lambda x: x[1])
# candidate_ids = [x[0] for x in sorted_list][:self.topk]
# if self.left_to_right_gold[left_id] in candidate_ids:
# self._n_hit_left += 1
# # if dump_to_file:
# # for right_id, dist in sorted_list:
# # fout.write("{}\t{}\t{}\n".format(left_id, right_id, dist))
# for right_id in self.right.keys():
# sorted_list = sorted(right_to_left_distance[right_id].items(), key=lambda x: x[1])
# candidate_ids = [x[0] for x in sorted_list][:self.topk]
# if self.right_to_left_gold[right_id] in candidate_ids:
# self._n_hit_right += 1
# # if dump_to_file:
# # for left_id, dist in sorted_list:
# # fout.write("{}\t{}\t{}\n".format(right_id, left_id, dist))
#
# if self.dump_to_file_path:
# self.dump_to_file_handler.close()
#
# self._n_total_left = len(self.left_to_right_gold)
# self._n_total_right = len(self.right_to_left_gold)
# return [("hits_left", self._n_hit_left),
# ("hits_right", self._n_hit_right),
# ("total_left", self._n_total_left),
# ("total_right", self._n_total_right),
# ("recall_left", self._n_hit_left / self._n_total_left),
# ("recall_right", self._n_hit_right / self._n_total_right)]
class RetrievalScorer(Scorer):
"""
"""
def __init__(self, label_mapping, qrels_file_path, correct_label='1', dump_to_file=None, regression=False):
super(RetrievalScorer, self).__init__()
self.label_mapping = label_mapping
self._inv_label_mapping = {v: k for k, v in label_mapping.items()}
self._examples = []
self._preds = []
self.correct_label = correct_label
self.qrels_file_path = qrels_file_path
self.regression = regression
# Because we need to leverage trec_eval to calculate the scores, so dump_to_file can not be None
if dump_to_file:
self.dump_to_file_path = os.path.join(dump_to_file["output_dir"], dump_to_file["task_name"] + "_dump.json")
self.dump_to_file_handler = open(self.dump_to_file_path, 'w')
else:
print("You need to specify the dump_to_file path for the retrieval task")
exit()
def update(self, mbs, predictions, loss, extra):
super(RetrievalScorer, self).update(mbs, predictions, loss, extra)
# TODO: we are going to migrate the interface !
predictions = predictions["logits"]
for example, preds in zip(mbs.examples, predictions):
self._examples.append(example)
self._preds.append(preds.data.cpu().numpy())
def get_loss(self):
return 0
def _get_results(self):
topic_doc_collection = {}
for example, preds in zip(self._examples, self._preds):
if self.regression:
score = sigmoid(preds)[0]
else:
preds = softmax(preds)
score = preds[self.label_mapping[self.correct_label]]
text_a_id, text_b_id = example.guid.split('|')
if text_a_id not in topic_doc_collection:
topic_doc_collection[text_a_id] = {}
topic_doc_collection[text_a_id][text_b_id] = max(topic_doc_collection[text_a_id].get(text_b_id, 0), score)
for text_a_id in topic_doc_collection:
for text_b_id in topic_doc_collection[text_a_id]:
score = topic_doc_collection[text_a_id][text_b_id]
self.dump_to_file_handler.write("{} Q0 {} 0 {} rerank\n".format(text_a_id, text_b_id, score))
self.dump_to_file_handler.flush()
self.dump_to_file_handler.close()
dir = os.path.abspath(os.path.dirname(__file__))
trec_eval_path = os.path.join(dir, '..', '..', '..', 'evals', 'trec_eval', 'trec_eval.9.0.4/trec_eval')
trec_out = subprocess.check_output([trec_eval_path, self.qrels_file_path , self.dump_to_file_path])
trec_out_lines = str(trec_out, 'utf-8').split('\n')
mean_average_precision = float(trec_out_lines[5].split('\t')[-1])
# mean_reciprocal_rank = float(trec_out_lines[9].split('\t')[-1])
# p_30 = float(trec_out_lines[25].split('\t')[-1])
return [("map", mean_average_precision)]
|
1693001
|
from common.diagrams import Diagram
from common.definitions import G_PROTEIN_SEGMENTS
from residue.models import Residue
from residue.models import ResidueGenericNumber
from residue.models import ResidueNumberingScheme
from django.utils.safestring import mark_safe
from math import cos, sin, pi, floor,sqrt
from datetime import datetime
from collections import OrderedDict
class DrawGproteinPlot(Diagram):
def __init__(self, residue_list, protein_class, protein_name, nobuttons = None):
self.nobuttons = 'gprotein'
self.type = 'snakeplot'
plot_data = {}
plot_data['direction'] = [0, 0, 1, 0, 1, 0, 1, 0] # 0: EC->IC, 1: IC->EC
plot_data['helixRadius'] = 70
self.receptorId = protein_name
self.family = protein_class
self.output = ''
# FIXME DO PUREIMAGE
# $pureImage = isset($_GET['pureimage']) && $_GET['pureimage'] == 'TRUE' ? TRUE : FALSE;
# get sequence, baldwin, and bw information of this receptor
self.sequence = residue_list
self.segments = {}
self.segments_full = OrderedDict()
i = 0
for r in self.sequence:
if r.protein_segment:
segment = str(r.protein_segment.slug)
elif r.segment_slug: #from family aligment
segment = str(r.segment_slug)
if segment not in self.segments:
self.segments[segment] = []
self.segments_full[segment] = r.protein_segment
label = ''
displaylabel = ''
if r.generic_number:
label = r.generic_number.label
elif hasattr(r, 'family_generic_number'):
label = r.family_generic_number
if r.display_generic_number: displaylabel = r.display_generic_number.label
displaylabel = r.amino_acid + str(r.sequence_number) + " \n " + displaylabel
if hasattr(r, 'frequency'):
displaylabel = displaylabel + "\n" + r.frequency
self.segments[segment].append([r.sequence_number, r.amino_acid,label,displaylabel])
i += 1
# for helix_num in range(1,2): #FIX for missing generic numbers
# rs = self.segments['H5']
# for i in range(0,len(rs)):
# if not rs[i][2]:
# if i+1<len(rs): #if there is a next one
# if rs[i+1][2]: #if it has generic number
# number = str(int(rs[i+1][2].split('x')[1])-1)
# rs[i][2] = str(helix_num) + "x" + number
# print(rs[i][2])
self.helixWidth = 85 # Width of helix
self.resNumPerRow = 4 # Residue number per row in helix
self.angleDeg = 22.0 # Angle size of each helix turn
self.residue_radius = 12 # Radius of the residue circle
# svg image padding offset
self.offsetX = 0 #-200
self.offsetY = 0 #-50
# margin between two helixes
self.margin = 10
# highest and lowest bound of this svg
self.high =0
self.low = 0
# keep track of max Y positions of intra/extra loops
self.maxY = {'bottom':0,'top':0}
self.maxX = {'left':0,'right':0}
# helices length
# helicesLength = Svg::getSnakePlotHelicesLength($baldwin, $helixWidth, $angleDeg) #FIXME
# top and bottom residue coords in each helix
self.TBCoords = {}
self.output = ""
self.traceoutput = ""
self.helixoutput = ""
# Draw sheets and helices
self.count = 1
self.count_sheet = 0
for s in G_PROTEIN_SEGMENTS['Full']:
if s in self.segments_full:
if self.segments_full[s].category=='helix':
self.helixoutput += self.drawSnakePlotHelix(s)
self.count += 1
elif self.segments_full[s].category=='sheet':
self.helixoutput += self.drawSnakePlotSheet(s)
self.count += 1
self.count_sheet += 1
# Draw loops
self.count = 0
for s in G_PROTEIN_SEGMENTS['Full']:
if s in self.segments_full and self.segments_full[s].category=='loop':
#pass
self.drawSnakePlotLoop(s)
else:
self.count += 1
def __str__(self):
self.output = "<g id=snake transform='translate(0, " + str(-self.low+ self.offsetY) + ")'>" + self.traceoutput+self.output+self.helixoutput+self.drawToolTip() + "</g>"; #for resizing height
return mark_safe(self.create(self.output,self.maxX['right']+30,self.high-self.low+self.offsetY*2,"snakeplot", self.nobuttons))
def drawSnakePlotHelix(self, segment):
rs = self.segments[segment]
helix_num = self.count
self.TBCoords[helix_num] = {}
if helix_num%2!=0: rs.reverse() # reverse direction for even helix because they go from inside to outside
output_residues = []
res_num = len(self.segments[segment])
output_residue_in = ''
output_residue_out = ''
output_trace = ''
startX = self.helixWidth+self.offsetX+(self.margin+self.helixWidth)*(helix_num-1)-(self.count_sheet*20)
startY = self.offsetY
row_length = 3
row_pos = 0
row = 0
prevGeneric = '0.0.0'
bulgeX = 0
bulgeY = 0
bulge = 0
skip = 0
indentX = -self.residue_radius+3
indentY = 3
for i in range(0,res_num):
prevGeneric_number = prevGeneric.split('.')[2]
currGeneric_number = rs[i][2].split('.')[2]
if ((helix_num%2==0 and prevGeneric_number+'1'==currGeneric_number) or (helix_num%2!=0 and str(int(prevGeneric_number)-1)+'1'==currGeneric_number)) and i!=0:
bulge = 1
if row_pos==0: # if first in row, use space for bulge
bulgeY = 5
bulgeX = 7
else:
bulgeY = 5
bulgeX = 5
row_length+=1
elif i!=0 and ((helix_num%2!=0 and int(prevGeneric_number)-1!= int(currGeneric_number)) or (helix_num%2==0 and int(prevGeneric_number)+1!= int(currGeneric_number))):
skip = 1
if row_pos!=0 and row_pos+1<row_length:
nextX =round(startX-(row_pos+1)*self.residue_radius*1.5+indentX+bulgeX)
nextY = round(startY+row*self.residue_radius*2.4+(row_pos+1)*self.residue_radius*0.5+indentY+bulgeY)
output_trace += "<line x1="+str(prevX)+" y1="+str(prevY)+" x2="+str(nextX)+" y2="+str(nextY)+" stroke='grey' fill='none' stroke-width='1' stroke-dasharray='1,1' />"
row_pos +=1
elif row_pos+1==row_length:
row+=1
row_pos=0
row_length = 3 if row_length == 4 else 4
else:
row_pos +=1
# move left as you go down a row
x = round(startX-row_pos*self.residue_radius*1.6+indentX+bulgeX)
# Move down with right amount
y = round(startY+row*self.residue_radius*2.4+row_pos*self.residue_radius*0.5+indentY+bulgeY)
output_residue = self.DrawResidue(x,y,rs[i][1], rs[i][0], rs[i][3], self.residue_radius)
if x<self.maxX['left']: self.maxX['left'] = x
if x>self.maxX['right']: self.maxX['right'] = x
row_pos += 1
if bulge==1:
if row_pos==1: # if first in row, use space for bulge
bulgeY = -3
bulgeX = 10
else:
bulgeY = -3
bulgeX = 7
rs[i][2] = prevGeneric # make it the prev one, to catch missing ones correctly
bulge = 0
if row_length==3:
output_residue_in += output_residue
else:
output_residue_out += output_residue
output_residues.append(output_residue)
if i==0: self.TBCoords[helix_num]['top'] = [x,y]
if i==res_num-1: self.TBCoords[helix_num]['bottom'] = [x,y]
if (row_pos==1 and row!=0) or (skip==1 and row_pos==2): # if need for trace
if row_length==3: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-21)+" "+str(y-8)+" T"+str(x)+" "+str(y)
if row_length>=4: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-24)+" "+str(y-7)+" T"+str(x)+" "+str(y)
output_trace += "<path d='" + points + "' stroke='grey' fill='none' stroke-width='2' />"
# alternate between 4 and 3 res per row
if row_length>3 and row_pos>=row_length:
row_length=3
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = -self.residue_radius+3
indentY = 3
elif row_length==3 and row_pos>=3:
row_length=4
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = 0
indentY = 0
skip = 0
prevX = x
prevY = y
prevGeneric = rs[i][2]
temp = ''
if helix_num%2!=0: output_residues.reverse()
for res in output_residues:
temp += res
return output_trace+temp
def drawSnakePlotSheet(self, segment):
rs = self.segments[segment]
helix_num = self.count
self.TBCoords[helix_num] = {}
if helix_num%2!=0: rs.reverse() # reverse direction for even helix because they go from inside to outside
output_residues = []
res_num = len(self.segments[segment])
output_residue_in = ''
output_residue_out = ''
output_trace = ''
startX = 50+self.offsetX+(self.margin+self.helixWidth)*(helix_num-1)-(self.count_sheet*20)
startY = self.offsetY
row_length = 3
row_pos = 0
row = 0
prevGeneric = '0.0.0'
bulgeX = 0
bulgeY = 0
bulge = 0
skip = 0
indentX = -self.residue_radius+3
indentY = 3
for i in range(0,res_num):
prevGeneric_number = prevGeneric.split('.')[2]
currGeneric_number = rs[i][2].split('.')[2]
if (helix_num%2==0 and prevGeneric_number+'1'==currGeneric_number) or (helix_num%2!=0 and str(int(prevGeneric_number)-1)+'1'==currGeneric_number):
bulge = 1
if row_pos==0: # if first in row, use space for bulge
bulgeY = 5
bulgeX = 7
else:
bulgeY = 5
bulgeX = 5
row_length+=1
elif i!=0 and ((helix_num%2!=0 and int(prevGeneric_number)-1!= int(currGeneric_number)) or (helix_num%2==0 and int(prevGeneric_number)+1!= int(currGeneric_number))):
skip = 1
if row_pos!=0 and row_pos+1<row_length:
nextX =round(startX-(row_pos+1)*self.residue_radius*1.5+indentX+bulgeX)
nextY = round(startY+row*self.residue_radius*2.4+(row_pos+1)*self.residue_radius*0.5+indentY+bulgeY)
#output_trace += "<line x1="+str(prevX)+" y1="+str(prevY)+" x2="+str(nextX)+" y2="+str(nextY)+" stroke='grey' fill='none' stroke-width='1' stroke-dasharray='1,1' />"
row_pos +=1
elif row_pos+1==row_length:
row+=1
row_pos=0
row_length = 3 if row_length == 4 else 4
else:
row_pos +=1
# move left as you go down a row
x = round(startX) #+indentX+bulgeX
# Move down with right amount
y = round(startY+i*self.residue_radius*1.5)
output_residue = self.DrawResidueSquare(x,y,rs[i][1], rs[i][0], rs[i][3], self.residue_radius)
if x<self.maxX['left']: self.maxX['left'] = x
if x>self.maxX['right']: self.maxX['right'] = x
row_pos += 1
if bulge==1:
if row_pos==1: # if first in row, use space for bulge
bulgeY = -3
bulgeX = 10
else:
bulgeY = -3
bulgeX = 7
rs[i][2] = prevGeneric # make it the prev one, to catch missing ones correctly
bulge = 0
if row_length==3:
output_residue_in += output_residue
else:
output_residue_out += output_residue
output_residues.append(output_residue)
if i==0: self.TBCoords[helix_num]['top'] = [x,y]
if i==res_num-1: self.TBCoords[helix_num]['bottom'] = [x,y]
if (row_pos==1 and row!=0) or (skip==1 and row_pos==2): # if need for trace
if row_length==3: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-21)+" "+str(y-8)+" T"+str(x)+" "+str(y)
if row_length>=4: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-24)+" "+str(y-7)+" T"+str(x)+" "+str(y)
# output_trace += "<path d='" + points + "' stroke='grey' fill='none' stroke-width='2' />"
# alternate between 4 and 3 res per row
if row_length>3 and row_pos>=row_length:
row_length=3
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = -self.residue_radius+3
indentY = 3
elif row_length==3 and row_pos>=3:
row_length=4
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = 0
indentY = 0
skip = 0
prevX = x
prevY = y
prevGeneric = rs[i][2]
temp = ''
if helix_num%2!=0: output_residues.reverse()
for res in output_residues:
temp += res
return output_trace+temp
def drawSnakePlotLoop(self, segment):
y_offset = 50
font_size = 12
font_family = 'courier'
bezier_pull = 80
name = segment
x_at_max_y = 0
rs = self.segments[segment] # get residues
start = 1
res_before = []
res_helix = []
res_after = []
if self.count % 2 == 0:
position = 'bottom'
orientation = 1
else:
position = 'top'
orientation = -1
if self.count not in self.TBCoords:
return 0
if self.count+1 not in self.TBCoords:
return 0
# Get positions of two linking residues from each helix
x1 = self.TBCoords[self.count][position][0]
y1 = self.TBCoords[self.count][position][1]
x2 = self.TBCoords[self.count+1][position][0]
y2 = self.TBCoords[self.count+1][position][1]
boxX = (x1+x2)/2 # midway between
if position=='top':
boxY = min(y1,y2)-y_offset # over helix
y_indent = -1*bezier_pull
if position=='bottom':
boxY = max(y1,y2)+y_offset # over helix
y_indent = bezier_pull
points = str(x1)+","+str(y1)+" "+str(boxX)+","+str(boxY)+" "+str(x2)+","+str(y2)
points2 = "M "+str(x1)+" "+str(y1)+" Q"+str(boxX)+" "+str(boxY+y_indent)+" "+str(x2)+" "+str(y2)
# Getting midpoint of Bezier curve http://www.svgbasics.com/curves.html
Dx = ((x1+boxX)/2)
Ex = ((x2+boxX)/2)
Fx = (Dx+Ex)/2
Dy = ((y1+boxY+y_indent)/2)
Ey = ((y2+boxY+y_indent)/2)
Fy = (Dy+Ey)/2
#JUST SIMPLE
#self.output += "<path class='"+name+" short' d='" + points2 + "' stroke='black' fill='none' stroke-width='2' />"
# self.output += "<rect onclick='toggleLoop(\"."+name+"\",\"short\");' class='"+name+" short' x="+str(Fx-18)+" y="+str(Fy-13)+" rx=5 ry=5 width='35' height='20' stroke='black' fill='white' stroke-width='1' style2='fill:red;stroke:black;stroke-width:5;opacity:0.5'/>"
# self.output += str("<text onclick='toggleLoop(\"."+name+"\",\"short\");' class='"+name+" short' x="+str(Fx)+" y="+str(Fy)+" text-anchor='middle' font-size="+str(font_size)+" font-family='"+font_family+"'>"+name+"</text>")
y_indent = y_indent*len(rs)/5 # get an approx need for y_indent for size of loop
loop_long_length = 0
super_loop_long_length = 40
between_residues = 18
length_of_residues_in_loop = len(rs)*between_residues-self.residue_radius
length = self.lengthbezier([x1,y1],[boxX,boxY+y_indent],[x2,y2],0.001)
if len(rs)<super_loop_long_length:
tries = 0 # adjust size
while abs(length-length_of_residues_in_loop-70)>5:
# print(abs(length-length_of_residues_in_loop+100),length,length_of_residues_in_loop,tries)
if length-length_of_residues_in_loop-70>5:
y_indent *=0.9
else:
y_indent *=1.1
length = self.lengthbezier([x1,y1],[boxX,boxY+y_indent],[x2,y2],0.001)
tries += 1
if tries>100:
break
pos = (length-length_of_residues_in_loop)/2 # get start pos
indentX = 0
indentY2 = 0
prev_where = [x1,y1]
# make rounded arc
points2 = "M "+str(x1)+" "+str(y1)+" Q"+str(boxX)+" "+str(boxY+y_indent)+" "+str(x2)+" "+str(y2)
labelbox = self.wherebezier([x1,y1],[boxX,boxY+y_indent],[x2,y2],0.001,length/2)
labelbox[1][1] += orientation*40
self.output += "<path class='"+name+"' d='" + points2 + "' stroke='black' fill='none' stroke-width='2' />"
max_y = y1
for i in range(0,len(rs)):
r = rs[i]
where = self.wherebezier([x1,y1],[boxX,boxY+y_indent],[x2,y2],0.001,pos)
self.output += self.DrawResidue(where[1][0],where[1][1],r[1], r[0], r[3], self.residue_radius-1,name)
pos += between_residues
if where[1][1]>self.high: self.high = where[1][1]
if where[1][1]<self.low: self.low = where[1][1]
prev_where = where[1][0],where[1][1]
if orientation==-1:
if where[1][1]<self.maxY[position]: self.maxY[position] = where[1][1]
else:
if where[1][1]>self.maxY[position]: self.maxY[position] = where[1][1]
if orientation==-1:
if where[1][1]<max_y:
max_y = where[1][1]
x_at_max_y = where[1][0]
else:
if where[1][1]>max_y:
max_y = where[1][1]
x_at_max_y = where[1][0]
x_at_max_y = where[1][0]
if orientation==1:
max_y = max_y+25
else:
max_y = max_y-20
self.output += "<rect onclick='toggleLoop(\"."+name+"\",\"long\");' class='"+name+"' x="+str(x_at_max_y-18)+" y="+str(max_y-13)+" rx=5 ry=5 width='35' height='20' stroke='black' fill='white' stroke-width='1' style2='fill:red;stroke:black;stroke-width:5;opacity:0.5'/>"
self.output += str("<text onclick='toggleLoop(\"."+name+"\",\"long\");' class='"+name+"' x="+str(x_at_max_y)+" y="+str(max_y)+" text-anchor='middle' font-size="+str(font_size)+" font-family='"+font_family+"'>"+name+"</text>")
|
1693007
|
from django.urls import path
from .views import show_foilaw
urlpatterns = [
path("<slug:slug>/", show_foilaw, name="publicbody-foilaw-show"),
]
|
1693027
|
from CvPythonExtensions import *
import CvUtil
gc = CyGlobalContext()
class CvPediaProject:
def __init__(self, main):
self.iProject = -1
self.top = main
self.X_INFO_PANE = self.top.X_PEDIA_PAGE
self.Y_INFO_PANE = self.top.Y_PEDIA_PAGE
self.W_INFO_PANE = 380 #290
self.H_INFO_PANE = 120
self.W_ICON = 100
self.H_ICON = 100
self.X_ICON = self.X_INFO_PANE + 10
self.Y_ICON = self.Y_INFO_PANE + 10
self.ICON_SIZE = 64
self.X_INFO_TEXT = self.X_INFO_PANE + 110
self.Y_INFO_TEXT = self.Y_ICON + 15
self.W_INFO_TEXT = self.W_INFO_PANE - 70
self.H_INFO_TEXT = self.H_INFO_PANE - 20
self.X_REQUIRES = self.X_INFO_PANE + self.W_INFO_PANE + 10
self.W_REQUIRES = self.top.R_PEDIA_PAGE - self.X_REQUIRES
self.H_REQUIRES = 110
self.Y_REQUIRES = self.Y_INFO_PANE + self.H_INFO_PANE - self.H_REQUIRES
self.X_DETAILS = self.X_INFO_PANE
self.Y_DETAILS = self.Y_INFO_PANE + self.H_INFO_PANE + 10
self.W_DETAILS = self.top.R_PEDIA_PAGE - self.X_DETAILS
self.H_DETAILS = 210
self.X_HISTORY = self.X_DETAILS
self.W_HISTORY = self.top.R_PEDIA_PAGE - self.X_HISTORY
self.Y_HISTORY = self.Y_DETAILS + self.H_DETAILS + 10
self.H_HISTORY = self.top.B_PEDIA_PAGE - self.Y_HISTORY
def interfaceScreen(self, iProject):
self.iProject = iProject
screen = self.top.getScreen()
self.placeInfo()
self.placeRequires()
self.placeDetails()
self.placeHistory()
def placeInfo(self):
screen = self.top.getScreen()
panel = self.top.getNextWidgetName()
info = gc.getProjectInfo(self.iProject)
screen.addPanel(self.top.getNextWidgetName(), "", "", False, False, self.X_INFO_PANE, self.Y_INFO_PANE, self.W_INFO_PANE, self.H_INFO_PANE, PanelStyles.PANEL_STYLE_BLUE50)
screen.addPanel(self.top.getNextWidgetName(), "", "", False, False, self.X_ICON, self.Y_ICON, self.W_ICON, self.H_ICON, PanelStyles.PANEL_STYLE_MAIN)
screen.addDDSGFC(self.top.getNextWidgetName(), info.getButton(), self.X_ICON + self.W_ICON / 2 - self.ICON_SIZE / 2, self.Y_ICON + self.H_ICON / 2 - self.ICON_SIZE / 2, self.ICON_SIZE, self.ICON_SIZE, WidgetTypes.WIDGET_GENERAL, -1, -1)
screen.addListBoxGFC(panel, "", self.X_INFO_TEXT, self.Y_INFO_TEXT, self.W_INFO_TEXT, self.H_INFO_TEXT, TableStyles.TABLE_STYLE_EMPTY)
screen.enableSelect(panel, False)
screen.appendListBoxString(panel, u"<font=4b>" + info.getDescription() + u"</font>", WidgetTypes.WIDGET_GENERAL, 0, 0, CvUtil.FONT_LEFT_JUSTIFY)
screen.appendListBoxString(panel, u"<font=3>Project</font>", WidgetTypes.WIDGET_GENERAL, 0, 0, CvUtil.FONT_LEFT_JUSTIFY)
if info.getProductionCost() >= 0:
if self.top.iActivePlayer == -1:
iCost = (info.getProductionCost() * gc.getDefineINT('PROJECT_PRODUCTION_PERCENT')) / 100
else:
iCost = gc.getActivePlayer().getProjectProductionNeeded(self.iProject)
szCost = u"Cost: %d%c" % (iCost, gc.getYieldInfo(YieldTypes.YIELD_PRODUCTION).getChar())
screen.appendListBoxString(panel, u"<font=3>" + szCost + u"</font>", WidgetTypes.WIDGET_GENERAL, 0, 0, CvUtil.FONT_LEFT_JUSTIFY)
def placeRequires(self):
screen = self.top.getScreen()
panel = self.top.getNextWidgetName()
info = gc.getProjectInfo(self.iProject)
screen.addPanel(panel, CyTranslator().getText("TXT_KEY_PEDIA_REQUIRES", ()), "", False, True, self.X_REQUIRES, self.Y_REQUIRES, self.W_REQUIRES, self.H_REQUIRES, PanelStyles.PANEL_STYLE_BLUE50)
screen.attachLabel(panel, "", " ")
iTech = info.getTechPrereq()
if iTech >= -1:
screen.attachImageButton(panel, "", gc.getTechInfo(iTech).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_PEDIA_JUMP_TO_TECH, iTech, 1, False)
iAnyoneProjectPrereq = info.getAnyoneProjectPrereq()
if iAnyoneProjectPrereq != -1:
screen.attachImageButton(panel, "", gc.getProjectInfo(iAnyoneProjectPrereq).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_PEDIA_JUMP_TO_PROJECT, iAnyoneProjectPrereq, 1, False)
for iProject in range(gc.getNumProjectInfos()):
if info.getProjectsNeeded(iProject) > 0:
screen.attachImageButton(panel, "", gc.getProjectInfo(iProject).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_PEDIA_JUMP_TO_PROJECT, iProject, 1, False)
def placeDetails(self):
screen = self.top.getScreen()
panel = self.top.getNextWidgetName()
text = self.top.getNextWidgetName()
info = gc.getProjectInfo(self.iProject)
screen.addPanel(panel, CyTranslator().getText("TXT_KEY_PEDIA_DETAILS", ()), "", True, False, self.X_DETAILS, self.Y_DETAILS, self.W_DETAILS, self.H_DETAILS, PanelStyles.PANEL_STYLE_BLUE50)
szText = CyGameTextMgr().getProjectHelp(self.iProject, True, None)[1:]
screen.addMultilineText(text, szText, self.X_DETAILS + 5, self.Y_DETAILS + 30, self.W_DETAILS - 10, self.H_DETAILS - 35, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)
def placeHistory(self):
screen = self.top.getScreen()
panel = self.top.getNextWidgetName()
text = self.top.getNextWidgetName()
info = gc.getProjectInfo(self.iProject)
screen.addPanel(panel, CyTranslator().getText("TXT_KEY_CIVILOPEDIA_HISTORY", ()), "", True, True, self.X_HISTORY, self.Y_HISTORY, self.W_HISTORY, self.H_HISTORY, PanelStyles.PANEL_STYLE_BLUE50 )
szText = info.getCivilopedia()
screen.addMultilineText(text, szText, self.X_HISTORY + 10, self.Y_HISTORY + 30, self.W_HISTORY - 20, self.H_HISTORY - 40, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)
def handleInput (self, inputClass):
return 0
|
1693029
|
import torch
import torch.nn as nn
from uninas.modules.heads.abstract import AbstractHead
from uninas.utils.args import Argument
from uninas.utils.shape import Shape
from uninas.register import Register
@Register.network_head()
class PwClassificationHead(AbstractHead):
"""
Network output, pixel-wise
act fun, dropout, conv1x1
"""
@classmethod
def args_to_add(cls, index=None) -> [Argument]:
""" list arguments to add to argparse when this class (or a child class) is chosen """
return super().args_to_add(index) + [
Argument('act_fun', default='relu', type=str, help='act fun of the 1x1 convolution', choices=Register.act_funs.names()),
Argument('bias', default='True', type=str, help='add a bias', is_bool=True),
Argument('dropout', default=0.0, type=float, help='initial dropout probability'),
]
def set_dropout_rate(self, p=None):
if p is not None:
self.head_module[-2].p = p
def _build(self, s_in: Shape, s_out: Shape) -> Shape:
ops = [
Register.act_funs.get(self.act_fun)(inplace=False),
nn.Dropout(p=self.dropout),
nn.Conv2d(s_in.num_features(), s_out.num_features(), 1, 1, 0, bias=self.bias),
]
self.head_module = nn.Sequential(*ops)
return self.probe_outputs(s_in)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.head_module(x)
|
1693043
|
import os
import pickle as pkl
import numpy as np
import scipy.io as scio
import SimpleITK as sitk
from sklearn.preprocessing import normalize
from hyperg.utils import minmax_scale
from hyperg.utils import print_log
DATA_DIR = os.path.join(os.path.dirname(__file__), 'datasets')
def load_myocardium(test_idx=[4]):
heart_seg_dir = os.path.join(DATA_DIR, 'myocardiumSeg')
ori = os.listdir(os.path.join(heart_seg_dir, 'ori'))
X = []
y = []
for name in ori:
ori_img = sitk.ReadImage(os.path.join(heart_seg_dir, "ori/{}".format(name)))
ori_ary = minmax_scale(sitk.GetArrayFromImage(ori_img).squeeze()) # (y, x)
X.append(ori_ary)
seg_img = sitk.ReadImage(os.path.join(heart_seg_dir, "seg/{}".format(name)))
seg_ary = sitk.GetArrayFromImage(seg_img).squeeze()
y.append(seg_ary)
X = np.stack(X)
y = np.stack(y)
training_idx = [i for i in range(X.shape[0]) if i not in test_idx]
X_train = X[training_idx]
X_test = X[test_idx]
y_train = y[training_idx]
y_test = y[test_idx]
return X_train, X_test, y_train, y_test
def load_modelnet(selected_mod):
print_log("selected mod:{}".format(str(selected_mod)))
modelnet40_dir = os.path.join(DATA_DIR, "modelnet40")
X_train = pkl.load(open(os.path.join(modelnet40_dir, 'modelnet_train_fts.pkl'), 'rb'))
X_test = pkl.load(open(os.path.join(modelnet40_dir, 'modelnet_test_fts.pkl'), 'rb'))
y_train = pkl.load(open(os.path.join(modelnet40_dir, 'modelnet_train_lbls.pkl'), 'rb'))
y_test = pkl.load(open(os.path.join(modelnet40_dir, 'modelnet_test_lbls.pkl'), 'rb'))
X_train = [X_train[imod] for imod in selected_mod]
X_test = [X_test[imod] for imod in selected_mod]
if len(selected_mod) == 1:
X_train = X_train[0]
X_test = X_test[0]
return X_train, X_test, np.array(y_train), np.array(y_test)
def load_MSRGesture3D(i_train=2, i_test = 0):
msr_gesture_dir = os.path.join(DATA_DIR, "MSRGesture3D")
data = scio.loadmat(os.path.join(msr_gesture_dir, 'MSRGesture3D.mat'))
all_indices = scio.loadmat(os.path.join(msr_gesture_dir, 'MSRGesture3DTrainIndex.mat'))['trainIndex']
i_indices = all_indices[i_test, i_train].reshape(-1)
X = data['X']
X = normalize(X)
y = np.array(data['Y'], dtype=np.int).reshape(-1)
y = y - np.min(y)
X_train = X[i_indices == 1]
X_test = X[i_indices == 0]
y_train = y[i_indices == 1]
y_test = y[i_indices == 0]
return X_train, X_test, y_train, y_test
if __name__ == "__main__":
pass
|
1693070
|
from app import db
from app.models.base_model import BaseEntity
class Contact(db.Model, BaseEntity):
__tablename__ = 'contact'
name = db.Column(db.String(256))
email = db.Column(db.String(200), unique=True)
phone_nr = db.Column(db.String(64))
location_id = db.Column(db.Integer, db.ForeignKey('location.id'))
location = db.relationship('Location',
backref=db.backref('contacts', lazy='dynamic'))
|
1693157
|
import pytest
from torch.nn import Embedding as _Embedding
from neuralpy.layers.sparse import Embedding
def test_embedding_should_throw_type_error():
with pytest.raises(TypeError):
Embedding()
@pytest.mark.parametrize(
"num_embeddings, embedding_dim, padding_idx, \
max_norm, norm_type, scale_grad_by_freq, sparse, name",
[
# Checking num_embeddings validations
("invalid", 80, 4, 5.6, 4.5, False, False, None),
(False, 80, 4, 5.6, 4.5, False, False, None),
# Checking embedding_dim validations
(300, "invalid", 4, 5.6, 4.5, False, False, None),
(300, False, 4, 5.6, 4.5, False, False, None),
# Checking padding_idx validations
(300, 80, "invalid", 5.6, 4.5, False, False, None),
(300, 80, 234.53, 5.6, 4.5, False, False, None),
# Checking max_norm validations
(300, 80, 4, False, 4.5, False, False, None),
(300, 80, 4, 5, 4.5, False, False, None),
(300, 80, 4, "invalid", 4.5, False, False, None),
# Checking norm_type validations
(300, 80, 4, 5.4, False, False, False, None),
(300, 80, 4, 5.4, 4, False, False, None),
(300, 80, 4, 5.4, "test", False, False, None),
# Checking scale_grad_by_freq validations
(300, 80, 4, 5.4, 5.6, 234, False, None),
(300, 80, 4, 5.4, 4.4, "test", False, None),
# Checking sparse validations
(300, 80, 4, 5.4, 4.4, False, 324, None),
(300, 80, 4, 5.4, 4.4, False, "test", None),
# Checking name
(300, 80, 4, 5.4, 4.4, False, False, False),
(300, 80, 4, 5.4, 4.4, False, False, ""),
(300, 80, 4, 5.4, 4.4, False, False, 24),
],
)
def test_embedding_should_throw_value_error(
num_embeddings,
embedding_dim,
padding_idx,
max_norm,
norm_type,
scale_grad_by_freq,
sparse,
name,
):
with pytest.raises(ValueError):
Embedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
padding_idx=padding_idx,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse,
name=name,
)
# Possible Values
num_embeddingss = [1, 3]
embedding_dims = [2, 1]
padding_idxs = [3, None]
max_norms = [1.0, None]
norm_types = [2.0, 1.0]
scale_grad_by_freqs = [True, False]
sparses = [False, True]
names = ["test", None]
@pytest.mark.parametrize(
"num_embeddings, embedding_dim, padding_idx, \
max_norm, norm_type, scale_grad_by_freq, sparse, name",
[
(
num_embeddings,
embedding_dim,
padding_idx,
max_norm,
norm_type,
scale_grad_by_freq,
sparse,
name,
)
for num_embeddings in num_embeddingss
for embedding_dim in embedding_dims
for padding_idx in padding_idxs
for max_norm in max_norms
for norm_type in norm_types
for scale_grad_by_freq in scale_grad_by_freqs
for sparse in sparses
for name in names
],
)
def test_embedding_get_layer_method(
num_embeddings,
embedding_dim,
padding_idx,
max_norm,
norm_type,
scale_grad_by_freq,
sparse,
name,
):
x = Embedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
padding_idx=padding_idx,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse,
name=name,
)
prev_input_dim = (6,)
x.set_input_dim(prev_input_dim, "embedding")
details = x.get_layer()
assert isinstance(details, dict) is True
assert details["layer_details"] == (embedding_dim,)
assert details["name"] == name
assert issubclass(details["layer"], _Embedding) is True
assert isinstance(details["keyword_arguments"], dict) is True
assert details["keyword_arguments"]["num_embeddings"] == num_embeddings
assert details["keyword_arguments"]["embedding_dim"] == embedding_dim
assert details["keyword_arguments"]["padding_idx"] == padding_idx
assert details["keyword_arguments"]["max_norm"] == max_norm
assert details["keyword_arguments"]["norm_type"] == norm_type
assert details["keyword_arguments"]["scale_grad_by_freq"] == scale_grad_by_freq
assert details["keyword_arguments"]["sparse"] == sparse
|
1693169
|
import tensorflow as tf
import math
def fully_connected(input_, output_nodes, name, stddev=0.02):
with tf.variable_scope(name):
input_shape = input_.get_shape()
input_nodes = input_shape[-1]
w = tf.get_variable('w', [input_nodes, output_nodes],
initializer=tf.truncated_normal_initializer(stddev=0.02))
biases = tf.get_variable('b', [output_nodes],
initializer=tf.constant_initializer(0.0))
res = tf.matmul(input_, w) + biases
return res
# 1d CONVOLUTION WITH DILATION
def conv1d(input_, output_channels,
dilation = 1, filter_width = 1, causal = False,
name = "dilated_conv"):
with tf.variable_scope(name):
w = tf.get_variable('w', [1, filter_width, input_.get_shape()[-1], output_channels ],
initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable('b', [output_channels ],
initializer=tf.constant_initializer(0.0))
if causal:
padding = [[0, 0], [(filter_width - 1) * dilation, 0], [0, 0]]
padded = tf.pad(input_, padding)
input_expanded = tf.expand_dims(padded, dim = 1)
out = tf.nn.atrous_conv2d(input_expanded, w, rate = dilation, padding = 'VALID') + b
else:
input_expanded = tf.expand_dims(input_, dim = 1)
out = tf.nn.atrous_conv2d(input_expanded, w, rate = dilation, padding = 'SAME') + b
return tf.squeeze(out, [1])
def layer_normalization(x, name, epsilon=1e-8, trainable = True):
with tf.variable_scope(name):
shape = x.get_shape()
beta = tf.get_variable('beta', [ int(shape[-1])],
initializer=tf.constant_initializer(0), trainable=trainable)
gamma = tf.get_variable('gamma', [ int(shape[-1])],
initializer=tf.constant_initializer(1), trainable=trainable)
mean, variance = tf.nn.moments(x, axes=[len(shape) - 1], keep_dims=True)
x = (x - mean) / tf.sqrt(variance + epsilon)
return gamma * x + beta
def byetenet_residual_block(input_, dilation, layer_no,
residual_channels, filter_width,
causal = True, train = True):
block_type = "decoder" if causal else "encoder"
block_name = "bytenet_{}_layer_{}_{}".format(block_type, layer_no, dilation)
with tf.variable_scope(block_name):
input_ln = layer_normalization(input_, name="ln1", trainable = train)
relu1 = tf.nn.relu(input_ln)
conv1 = conv1d(relu1, residual_channels, name = "conv1d_1")
conv1 = layer_normalization(conv1, name="ln2", trainable = train)
relu2 = tf.nn.relu(conv1)
dilated_conv = conv1d(relu2, residual_channels,
dilation, filter_width,
causal = causal,
name = "dilated_conv"
)
print dilated_conv
dilated_conv = layer_normalization(dilated_conv, name="ln3", trainable = train)
relu3 = tf.nn.relu(dilated_conv)
conv2 = conv1d(relu3, 2 * residual_channels, name = 'conv1d_2')
return input_ + conv2
def init_weight(dim_in, dim_out, name=None, stddev=1.0):
return tf.Variable(tf.truncated_normal([dim_in, dim_out], stddev=stddev/math.sqrt(float(dim_in))), name=name)
def init_bias(dim_out, name=None):
return tf.Variable(tf.zeros([dim_out]), name=name)
|
1693171
|
import cv2
import os
from hdn.core.config import cfg
'''This script is using for transform POT dataset from video to img frames.'''
if __name__ == "__main__":
import os, shutil
result_path = cfg.BASE.PROJ_PATH + 'experiments/tracker_homo_config/results/POT/HDN'
for i in os.listdir(result_path):
origin_name = os.path.join(result_path, i)
new_name = origin_name[:-4]+'_HDN'+origin_name[-4:]
print('orgin_name', origin_name)
print('new_name', new_name)
os.rename(origin_name, new_name)
|
1693173
|
import re
import pytest
from django.core.exceptions import ImproperlyConfigured
from precise_bbcode.bbcode import BBCodeParserLoader
from precise_bbcode.bbcode import get_parser
from precise_bbcode.bbcode.defaults.placeholder import _color_re
from precise_bbcode.bbcode.defaults.placeholder import _email_re
from precise_bbcode.bbcode.defaults.placeholder import _number_re
from precise_bbcode.bbcode.defaults.placeholder import _simpletext_re
from precise_bbcode.bbcode.defaults.placeholder import _text_re
from precise_bbcode.bbcode.exceptions import InvalidBBCodePlaholder
from precise_bbcode.bbcode.placeholder import BBCodePlaceholder
from precise_bbcode.bbcode.regexes import url_re
from precise_bbcode.bbcode.tag import BBCodeTag
from precise_bbcode.placeholder_pool import PlaceholderAlreadyRegistered
from precise_bbcode.placeholder_pool import PlaceholderNotRegistered
from precise_bbcode.placeholder_pool import placeholder_pool
from precise_bbcode.tag_pool import tag_pool
class SizeTag(BBCodeTag):
name = 'siz'
definition_string = '[siz={RANGE=4,7}]{TEXT}[/siz]'
format_string = '<span style="font-size:{RANGE=4,7}px;">{TEXT}</span>'
class ErroredSizeTag(BBCodeTag):
name = 's2'
definition_string = '[s2={RANGE=a,7}]{TEXT}[/s2]'
format_string = '<span style="font-size:{RANGE=a,7}px;">{TEXT}</span>'
class DayTag(BBCodeTag):
name = 'day'
definition_string = (
'[day]{CHOICE=monday,tuesday,wednesday,tuesday,friday,saturday,sunday}[/day]'
)
format_string = '<h5>{CHOICE=monday,tuesday,wednesday,tuesday,friday,saturday,sunday}</h5>'
class FooPlaceholder(BBCodePlaceholder):
name = 'foo'
pattern = re.compile(r'^[\d]*$')
class DummyPlaceholder(BBCodePlaceholder):
name = 'dummy'
pattern = re.compile(r'^[\w]*$')
class FooBBCodeTag(BBCodeTag):
name = 'xyz'
definition_string = '[xyz]{FOO}[/xyz]'
format_string = '<span class="foo">{FOO}</span>'
class DummyBBCodeTag(BBCodeTag):
name = 'dummy'
definition_string = '[dummy]{DUMMY}[/dummy]'
format_string = '<span class="dummy">{DUMMY}</span'
@pytest.mark.django_db
class TestPlaceholderPool(object):
TAGS_TESTS = (
('[xyz]hello world![/xyz]', '[xyz]hello world![/xyz]'),
('[xyz]12[/xyz]', '<span class="foo">12</span>'),
)
def setup_method(self, method):
self.parser = get_parser()
def test_should_raise_if_a_placeholder_is_registered_twice(self):
# Setup
number_of_placeholders_before = len(placeholder_pool.get_placeholders())
placeholder_pool.register_placeholder(FooPlaceholder)
# Run & check
# Let's add it a second time. We should catch an exception
with pytest.raises(PlaceholderAlreadyRegistered):
placeholder_pool.register_placeholder(FooPlaceholder)
# Let's make sure we have the same number of tags as before
placeholder_pool.unregister_placeholder(FooPlaceholder)
number_of_placeholders_after = len(placeholder_pool.get_placeholders())
assert number_of_placeholders_before == number_of_placeholders_after
def test_cannot_register_placeholders_with_incorrect_parent_classes(self):
# Setup
number_of_placeholders_before = len(placeholder_pool.get_placeholders())
# Run & check
with pytest.raises(ImproperlyConfigured):
class ErrnoneousPlaceholder:
pass
placeholder_pool.register_placeholder(ErrnoneousPlaceholder)
number_of_placeholders_after = len(placeholder_pool.get_placeholders())
assert number_of_placeholders_before == number_of_placeholders_after
def test_cannot_unregister_a_non_registered_placeholder(self):
# Setup
number_of_placeholders_before = len(placeholder_pool.get_placeholders())
# Run & check
with pytest.raises(PlaceholderNotRegistered):
placeholder_pool.unregister_placeholder(FooPlaceholder)
number_of_placeholders_after = len(placeholder_pool.get_placeholders())
assert number_of_placeholders_before == number_of_placeholders_after
def test_placeholders_can_be_used_with_tags(self):
# Setup
parser_loader = BBCodeParserLoader(parser=self.parser)
placeholder_pool.register_placeholder(FooPlaceholder)
placeholder_pool.register_placeholder(DummyPlaceholder)
tag_pool.register_tag(FooBBCodeTag)
tag_pool.register_tag(DummyBBCodeTag)
parser_loader.init_bbcode_placeholders()
parser_loader.init_bbcode_tags()
# Run & check
for bbcodes_text, expected_html_text in self.TAGS_TESTS:
result = self.parser.render(bbcodes_text)
assert result == expected_html_text
placeholder_pool.unregister_placeholder(FooPlaceholder)
placeholder_pool.unregister_placeholder(DummyPlaceholder)
class TestPlaceholder(object):
DEFAULT_PLACEHOLDERS_RE_TESTS = {
'text': {
're': _text_re,
'tests': (
'hello world',
'hello\nworld',
' hello world ',
'http://asdf.xxxx.yyyy.com/vvvvv/PublicPages/Login.aspx?ReturnUrl=%2fvvvvv%2f'
'(<EMAIL>/qwertybean)',
'12902',
'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer pretium, mi ac '
'"molestie ornare, urna sem fermentum erat, malesuada interdum sapien turpis sit '
'amet eros.\nPhasellus quis mi velit. Cras porttitor dui faucibus rhoncus '
'fringilla. Cras non fringilla est. \nCurabitur sollicitudin nisi quis sem '
'sodales, quis blandit massa rhoncus. Nam porta at lacus semper gravida.\n',
'안녕하세요!',
)
},
'simpletext': {
're': _simpletext_re,
'tests': (
'hello world',
'slugify-u-21'
'hello91',
)
},
'url': {
're': url_re,
'tests': (
'http://foo.com/blah_blah',
'(Something like http://foo.com/blah_blah)',
'http://foo.com/blah_blah_(wikipedia)',
'http://foo.com/more_(than)_one_(parens)',
'(Something like http://foo.com/blah_blah_(wikipedia))',
'http://foo.com/blah_(wikipedia)#cite-1',
'http://foo.com/blah_(wikipedia)_blah#cite-1',
'http://foo.com/(something)?after=parens',
'http://foo.com/blah_blah.',
'http://foo.com/blah_blah/.',
'<http://foo.com/blah_blah>',
'<http://foo.com/blah_blah/>',
'http://foo.com/blah_blah,',
'http://www.extinguishedscholar.com/wpglob/?p=364.',
'<tag>http://example.com</tag>',
'Just a www.example.com link.',
'http://example.com/something?with,commas,in,url, but not at end',
'bit.ly/foo',
'http://asdf.xxxx.yyyy.com/vvvvv/PublicPages/Login.aspx?ReturnUrl=%2fvvvvv%2f'
'(<EMAIL>/qwertybean)',
'http://something.xx:8080'
)
},
'email': {
're': _email_re,
'tests': (
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'"<EMAIL>"<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> ',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'bob.jones@[1.1.1.1]',
'<EMAIL>',
'<ab@cd.ef>',
'<NAME> <ab@cd.ef>',
'<NAME> <ab@[1.1.1.111]>',
'blah@127.0.0.1',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'u-s_e.r1@s-ub2.domain-name.museum:8080',
)
},
'color': {
're': _color_re,
'tests': (
'red',
'blue',
'pink',
'#FFFFFF',
'#fff000',
'#FFF',
'#3089a2',
)
},
'number': {
're': _number_re,
'tests': (
'12',
'1289101',
'-121',
'89.12',
'100000000000001',
'10000000000000,1',
'-12,1990000000000000001',
)
}
}
DEFAULT_PLACEHOLDERS_TESTS = (
('[siz=4]hello world![/siz]', '<span style="font-size:4px;">hello world!</span>'),
('[siz=5]hello world![/siz]', '<span style="font-size:5px;">hello world!</span>'),
('[siz=6]hello world![/siz]', '<span style="font-size:6px;">hello world!</span>'),
('[siz=7]hello world![/siz]', '<span style="font-size:7px;">hello world!</span>'),
('[siz=3]hello world![/siz]', '[siz=3]hello world![/siz]'),
('[siz=8]hello world![/siz]', '[siz=8]hello world![/siz]'),
('[siz=test]hello world![/siz]', '[siz=test]hello world![/siz]'),
('[day]tuesday[/day]', '<h5>tuesday</h5>'),
('[day]monday[/day]', '<h5>monday</h5>'),
('[day]sunday[/day]', '<h5>sunday</h5>'),
('[day]sun[/day]', '[day]sun[/day]'),
('[day]test, test[/day]', '[day]test, test[/day]'),
)
ERRORED_DEFAULT_PLACEHOLDERS_TESTS = (
('[s2=4]hello world![/s2]', '[s2=4]hello world![/s2]'),
)
def setup_method(self, method):
self.parser = get_parser()
self.parser.add_bbcode_tag(SizeTag)
self.parser.add_bbcode_tag(ErroredSizeTag)
self.parser.add_bbcode_tag(DayTag)
def test_regexes_provided_by_default_are_valid(self):
# Run & check
for _, re_tests in self.DEFAULT_PLACEHOLDERS_RE_TESTS.items():
for test in re_tests['tests']:
assert re.search(re_tests['re'], test) is not None
def test_provided_by_default_are_valid(self):
for bbcodes_text, expected_html_text in self.DEFAULT_PLACEHOLDERS_TESTS:
result = self.parser.render(bbcodes_text)
assert result == expected_html_text
def test_provided_by_default_cannot_be_rendered_if_they_are_not_used_correctly(self):
for bbcodes_text, expected_html_text in self.ERRORED_DEFAULT_PLACEHOLDERS_TESTS:
result = self.parser.render(bbcodes_text)
assert result == expected_html_text
def test_that_are_invalid_should_raise_at_runtime(self):
# Run & check
with pytest.raises(InvalidBBCodePlaholder):
class InvalidePlaceholder1(BBCodePlaceholder):
pass
with pytest.raises(InvalidBBCodePlaholder):
class InvalidePlaceholder2(BBCodePlaceholder):
delattr(BBCodePlaceholder, 'name')
with pytest.raises(InvalidBBCodePlaholder):
class InvalidePlaceholder3(BBCodePlaceholder):
name = 'bad placeholder name'
with pytest.raises(InvalidBBCodePlaholder):
class InvalidePlaceholder4(BBCodePlaceholder):
name = 'correctname'
pattern = 'incorrect pattern'
|
1693203
|
from __future__ import division
import pickle
import numpy
import time
def run_tti_sim(model, T, max_dt=None,
intervention_start_pct_infected=0, average_introductions_per_day=0,
testing_cadence='everyday', pct_tested_per_day=1.0, test_falseneg_rate='temporal',
testing_compliance_symptomatic=[None], max_pct_tests_for_symptomatics=1.0,
testing_compliance_traced=[None], max_pct_tests_for_traces=1.0,
testing_compliance_random=[None], random_testing_degree_bias=0,
tracing_compliance=[None], num_contacts_to_trace=None, pct_contacts_to_trace=1.0, tracing_lag=1,
isolation_compliance_symptomatic_individual=[None], isolation_compliance_symptomatic_groupmate=[None],
isolation_compliance_positive_individual=[None], isolation_compliance_positive_groupmate=[None],
isolation_compliance_positive_contact=[None], isolation_compliance_positive_contactgroupmate=[None],
isolation_lag_symptomatic=1, isolation_lag_positive=1, isolation_lag_contact=0, isolation_groups=None,
cadence_testing_days=None, cadence_cycle_length=28, temporal_falseneg_rates=None, backlog_skipped_intervals=False
):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Testing cadences involve a repeating 28 day cycle starting on a Monday
# (0:Mon, 1:Tue, 2:Wed, 3:Thu, 4:Fri, 5:Sat, 6:Sun, 7:Mon, 8:Tues, ...)
# For each cadence, testing is done on the day numbers included in the associated list.
if(cadence_testing_days is None):
cadence_testing_days = {
'everyday': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27],
'workday': [0, 1, 2, 3, 4, 7, 8, 9, 10, 11, 14, 15, 16, 17, 18, 21, 22, 23, 24, 25],
'semiweekly': [0, 3, 7, 10, 14, 17, 21, 24],
'weekly': [0, 7, 14, 21],
'biweekly': [0, 14],
'monthly': [0],
'cycle_start': [0]
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(temporal_falseneg_rates is None):
temporal_falseneg_rates = {
model.E: {0: 1.00, 1: 1.00, 2: 1.00, 3: 1.00},
model.I_pre: {0: 0.25, 1: 0.25, 2: 0.22},
model.I_sym: {0: 0.19, 1: 0.16, 2: 0.16, 3: 0.17, 4: 0.19, 5: 0.22, 6: 0.26, 7: 0.29, 8: 0.34, 9: 0.38, 10: 0.43, 11: 0.48, 12: 0.52, 13: 0.57, 14: 0.62, 15: 0.66, 16: 0.70, 17: 0.76, 18: 0.79, 19: 0.82, 20: 0.85, 21: 0.88, 22: 0.90, 23: 0.92, 24: 0.93, 25: 0.95, 26: 0.96, 27: 0.97, 28: 0.97, 29: 0.98, 30: 0.98, 31: 0.99},
model.I_asym: {0: 0.19, 1: 0.16, 2: 0.16, 3: 0.17, 4: 0.19, 5: 0.22, 6: 0.26, 7: 0.29, 8: 0.34, 9: 0.38, 10: 0.43, 11: 0.48, 12: 0.52, 13: 0.57, 14: 0.62, 15: 0.66, 16: 0.70, 17: 0.76, 18: 0.79, 19: 0.82, 20: 0.85, 21: 0.88, 22: 0.90, 23: 0.92, 24: 0.93, 25: 0.95, 26: 0.96, 27: 0.97, 28: 0.97, 29: 0.98, 30: 0.98, 31: 0.99},
model.Q_E: {0: 1.00, 1: 1.00, 2: 1.00, 3: 1.00},
model.Q_pre: {0: 0.25, 1: 0.25, 2: 0.22},
model.Q_sym: {0: 0.19, 1: 0.16, 2: 0.16, 3: 0.17, 4: 0.19, 5: 0.22, 6: 0.26, 7: 0.29, 8: 0.34, 9: 0.38, 10: 0.43, 11: 0.48, 12: 0.52, 13: 0.57, 14: 0.62, 15: 0.66, 16: 0.70, 17: 0.76, 18: 0.79, 19: 0.82, 20: 0.85, 21: 0.88, 22: 0.90, 23: 0.92, 24: 0.93, 25: 0.95, 26: 0.96, 27: 0.97, 28: 0.97, 29: 0.98, 30: 0.98, 31: 0.99},
model.Q_asym: {0: 0.19, 1: 0.16, 2: 0.16, 3: 0.17, 4: 0.19, 5: 0.22, 6: 0.26, 7: 0.29, 8: 0.34, 9: 0.38, 10: 0.43, 11: 0.48, 12: 0.52, 13: 0.57, 14: 0.62, 15: 0.66, 16: 0.70, 17: 0.76, 18: 0.79, 19: 0.82, 20: 0.85, 21: 0.88, 22: 0.90, 23: 0.92, 24: 0.93, 25: 0.95, 26: 0.96, 27: 0.97, 28: 0.97, 29: 0.98, 30: 0.98, 31: 0.99},
}
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Custom simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
interventionOn = False
interventionStartTime = None
timeOfLastIntervention = -1
timeOfLastIntroduction = -1
testingDays = cadence_testing_days[testing_cadence]
cadenceDayNumber = 0
tests_per_day = int(model.numNodes * pct_tested_per_day)
max_tracing_tests_per_day = int(tests_per_day * max_pct_tests_for_traces)
max_symptomatic_tests_per_day = int(tests_per_day * max_pct_tests_for_symptomatics)
tracingPoolQueue = [[] for i in range(tracing_lag)]
isolationQueue_symptomatic = [[] for i in range(isolation_lag_symptomatic)]
isolationQueue_positive = [[] for i in range(isolation_lag_positive)]
isolationQueue_contact = [[] for i in range(isolation_lag_contact)]
model.tmax = T
running = True
while running:
running = model.run_iteration(max_dt=max_dt)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Introduce exogenous exposures randomly:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(int(model.t)!=int(timeOfLastIntroduction)):
timeOfLastIntroduction = model.t
numNewExposures = numpy.random.poisson(lam=average_introductions_per_day)
model.introduce_exposures(num_new_exposures=numNewExposures)
if(numNewExposures > 0):
print("[NEW EXPOSURE @ t = %.2f (%d exposed)]" % (model.t, numNewExposures))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Execute testing policy at designated intervals:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(int(model.t)!=int(timeOfLastIntervention)):
cadenceDayNumbers = [int(model.t % cadence_cycle_length)]
if(backlog_skipped_intervals):
cadenceDayNumbers = [int(i % cadence_cycle_length) for i in numpy.arange(start=timeOfLastIntervention, stop=int(model.t), step=1.0)[1:]] + cadenceDayNumbers
timeOfLastIntervention = model.t
for cadenceDayNumber in cadenceDayNumbers:
currentNumInfected = model.total_num_infected()[model.tidx]
currentPctInfected = model.total_num_infected()[model.tidx]/model.numNodes
if(currentPctInfected >= intervention_start_pct_infected and not interventionOn):
interventionOn = True
interventionStartTime = model.t
if(interventionOn):
print("[INTERVENTIONS @ t = %.2f (%d (%.2f%%) infected)]" % (model.t, currentNumInfected, currentPctInfected*100))
nodeStates = model.X.flatten()
nodeTestedStatuses = model.tested.flatten()
nodeTestedInCurrentStateStatuses = model.testedInCurrentState.flatten()
nodePositiveStatuses = model.positive.flatten()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# tracingPoolQueue[0] = tracingPoolQueue[0]Queue.pop(0)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
newIsolationGroup_symptomatic = []
newIsolationGroup_contact = []
#----------------------------------------
# Isolate SYMPTOMATIC cases without a test:
#----------------------------------------
numSelfIsolated_symptoms = 0
numSelfIsolated_symptomaticGroupmate = 0
if(any(isolation_compliance_symptomatic_individual)):
symptomaticNodes = numpy.argwhere((nodeStates==model.I_sym)).flatten()
for symptomaticNode in symptomaticNodes:
if(isolation_compliance_symptomatic_individual[symptomaticNode]):
if(model.X[symptomaticNode] == model.I_sym):
numSelfIsolated_symptoms += 1
newIsolationGroup_symptomatic.append(symptomaticNode)
#----------------------------------------
# Isolate the GROUPMATES of this SYMPTOMATIC node without a test:
#----------------------------------------
if(isolation_groups is not None and any(isolation_compliance_symptomatic_groupmate)):
isolationGroupmates = next((group for group in isolation_groups if symptomaticNode in group), None)
for isolationGroupmate in isolationGroupmates:
if(isolationGroupmate != symptomaticNode):
if(isolation_compliance_symptomatic_groupmate[isolationGroupmate]):
numSelfIsolated_symptomaticGroupmate += 1
newIsolationGroup_symptomatic.append(isolationGroupmate)
#----------------------------------------
# Isolate the CONTACTS of detected POSITIVE cases without a test:
#----------------------------------------
numSelfIsolated_positiveContact = 0
numSelfIsolated_positiveContactGroupmate = 0
if(any(isolation_compliance_positive_contact) or any(isolation_compliance_positive_contactgroupmate)):
for contactNode in tracingPoolQueue[0]:
if(isolation_compliance_positive_contact[contactNode]):
newIsolationGroup_contact.append(contactNode)
numSelfIsolated_positiveContact += 1
#----------------------------------------
# Isolate the GROUPMATES of this self-isolating CONTACT without a test:
#----------------------------------------
if(isolation_groups is not None and any(isolation_compliance_positive_contactgroupmate)):
isolationGroupmates = next((group for group in isolation_groups if contactNode in group), None)
for isolationGroupmate in isolationGroupmates:
# if(isolationGroupmate != contactNode):
if(isolation_compliance_positive_contactgroupmate[isolationGroupmate]):
newIsolationGroup_contact.append(isolationGroupmate)
numSelfIsolated_positiveContactGroupmate += 1
#----------------------------------------
# Update the nodeStates list after self-isolation updates to model.X:
#----------------------------------------
nodeStates = model.X.flatten()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#----------------------------------------
# Allow SYMPTOMATIC individuals to self-seek tests
# regardless of cadence testing days
#----------------------------------------
symptomaticSelection = []
if(any(testing_compliance_symptomatic)):
symptomaticPool = numpy.argwhere((testing_compliance_symptomatic==True)
& (nodeTestedInCurrentStateStatuses==False)
& (nodePositiveStatuses==False)
& ((nodeStates==model.I_sym)|(nodeStates==model.Q_sym))
).flatten()
numSymptomaticTests = min(len(symptomaticPool), max_symptomatic_tests_per_day)
if(len(symptomaticPool) > 0):
symptomaticSelection = symptomaticPool[numpy.random.choice(len(symptomaticPool), min(numSymptomaticTests, len(symptomaticPool)), replace=False)]
#----------------------------------------
# Test individuals randomly and via contact tracing
# on cadence testing days:
#----------------------------------------
tracingSelection = []
randomSelection = []
if(cadenceDayNumber in testingDays):
#----------------------------------------
# Apply a designated portion of this day's tests
# to individuals identified by CONTACT TRACING:
#----------------------------------------
tracingPool = tracingPoolQueue.pop(0)
if(any(testing_compliance_traced)):
numTracingTests = min(len(tracingPool), min(tests_per_day-len(symptomaticSelection), max_tracing_tests_per_day))
for trace in range(numTracingTests):
traceNode = tracingPool.pop()
if((nodePositiveStatuses[traceNode]==False)
and (testing_compliance_traced[traceNode]==True)
and (model.X[traceNode] != model.R)
and (model.X[traceNode] != model.Q_R)
and (model.X[traceNode] != model.H)
and (model.X[traceNode] != model.F)):
tracingSelection.append(traceNode)
#----------------------------------------
# Apply the remainder of this day's tests to random testing:
#----------------------------------------
if(any(testing_compliance_random)):
testingPool = numpy.argwhere((testing_compliance_random==True)
& (nodePositiveStatuses==False)
& (nodeStates != model.R)
& (nodeStates != model.Q_R)
& (nodeStates != model.H)
& (nodeStates != model.F)
).flatten()
numRandomTests = max(min(tests_per_day-len(tracingSelection)-len(symptomaticSelection), len(testingPool)), 0)
testingPool_degrees = model.degree.flatten()[testingPool]
testingPool_degreeWeights = numpy.power(testingPool_degrees,random_testing_degree_bias)/numpy.sum(numpy.power(testingPool_degrees,random_testing_degree_bias))
if(len(testingPool) > 0):
randomSelection = testingPool[numpy.random.choice(len(testingPool), numRandomTests, p=testingPool_degreeWeights, replace=False)]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#----------------------------------------
# Perform the tests on the selected individuals:
#----------------------------------------
selectedToTest = numpy.concatenate((symptomaticSelection, tracingSelection, randomSelection)).astype(int)
numTested = 0
numTested_random = 0
numTested_tracing = 0
numTested_symptomatic = 0
numPositive = 0
numPositive_random = 0
numPositive_tracing = 0
numPositive_symptomatic = 0
numIsolated_positiveGroupmate = 0
newTracingPool = []
newIsolationGroup_positive = []
for i, testNode in enumerate(selectedToTest):
model.set_tested(testNode, True)
numTested += 1
if(i < len(symptomaticSelection)):
numTested_symptomatic += 1
elif(i < len(symptomaticSelection)+len(tracingSelection)):
numTested_tracing += 1
else:
numTested_random += 1
# If the node to be tested is not infected, then the test is guaranteed negative,
# so don't bother going through with doing the test:
if(model.X[testNode] == model.S or model.X[testNode] == model.Q_S):
pass
# Also assume that latent infections are not picked up by tests:
elif(model.X[testNode] == model.E or model.X[testNode] == model.Q_E):
pass
elif(model.X[testNode] == model.I_pre or model.X[testNode] == model.Q_pre
or model.X[testNode] == model.I_sym or model.X[testNode] == model.Q_sym
or model.X[testNode] == model.I_asym or model.X[testNode] == model.Q_asym):
if(test_falseneg_rate == 'temporal'):
testNodeState = model.X[testNode][0]
testNodeTimeInState = model.timer_state[testNode][0]
if(testNodeState in list(temporal_falseneg_rates.keys())):
falseneg_prob = temporal_falseneg_rates[testNodeState][ int(min(testNodeTimeInState, max(list(temporal_falseneg_rates[testNodeState].keys())))) ]
else:
falseneg_prob = 1.00
else:
falseneg_prob = test_falseneg_rate
if(numpy.random.rand() < (1-falseneg_prob)):
# +++++++++++++++++++++++++++++++++++++++++++++
# The tested node has returned a positive test
# +++++++++++++++++++++++++++++++++++++++++++++
numPositive += 1
if(i < len(symptomaticSelection)):
numPositive_symptomatic += 1
elif(i < len(symptomaticSelection)+len(tracingSelection)):
numPositive_tracing += 1
else:
numPositive_random += 1
# Update the node's state to the appropriate detected case state:
model.set_positive(testNode, True)
#----------------------------------------
# Add this positive node to the isolation group:
#----------------------------------------
if(isolation_compliance_positive_individual[testNode]):
newIsolationGroup_positive.append(testNode)
#----------------------------------------
# Add the groupmates of this positive node to the isolation group:
#----------------------------------------
if(isolation_groups is not None and any(isolation_compliance_positive_groupmate)):
isolationGroupmates = next((group for group in isolation_groups if testNode in group), None)
for isolationGroupmate in isolationGroupmates:
if(isolationGroupmate != testNode):
if(isolation_compliance_positive_groupmate[isolationGroupmate]):
numIsolated_positiveGroupmate += 1
newIsolationGroup_positive.append(isolationGroupmate)
#----------------------------------------
# Add this node's neighbors to the contact tracing pool:
#----------------------------------------
if(any(tracing_compliance) or any(isolation_compliance_positive_contact) or any(isolation_compliance_positive_contactgroupmate)):
if(tracing_compliance[testNode]):
testNodeContacts = list(model.G[testNode].keys())
numpy.random.shuffle(testNodeContacts)
if(num_contacts_to_trace is None):
numContactsToTrace = int(pct_contacts_to_trace*len(testNodeContacts))
else:
numContactsToTrace = num_contacts_to_trace
newTracingPool.extend(testNodeContacts[0:numContactsToTrace])
# Add the nodes to be isolated to the isolation queue:
isolationQueue_positive.append(newIsolationGroup_positive)
isolationQueue_symptomatic.append(newIsolationGroup_symptomatic)
isolationQueue_contact.append(newIsolationGroup_contact)
# Add the nodes to be traced to the tracing queue:
tracingPoolQueue.append(newTracingPool)
print("\t"+str(numTested_symptomatic) +"\ttested due to symptoms [+ "+str(numPositive_symptomatic)+" positive (%.2f %%) +]" % (numPositive_symptomatic/numTested_symptomatic*100 if numTested_symptomatic>0 else 0))
print("\t"+str(numTested_tracing) +"\ttested as traces [+ "+str(numPositive_tracing)+" positive (%.2f %%) +]" % (numPositive_tracing/numTested_tracing*100 if numTested_tracing>0 else 0))
print("\t"+str(numTested_random) +"\ttested randomly [+ "+str(numPositive_random)+" positive (%.2f %%) +]" % (numPositive_random/numTested_random*100 if numTested_random>0 else 0))
print("\t"+str(numTested) +"\ttested TOTAL [+ "+str(numPositive)+" positive (%.2f %%) +]" % (numPositive/numTested*100 if numTested>0 else 0))
print("\t"+str(numSelfIsolated_symptoms) +" will isolate due to symptoms ("+str(numSelfIsolated_symptomaticGroupmate)+" as groupmates of symptomatic)")
print("\t"+str(numPositive) +" will isolate due to positive test ("+str(numIsolated_positiveGroupmate)+" as groupmates of positive)")
print("\t"+str(numSelfIsolated_positiveContact) +" will isolate due to positive contact ("+str(numSelfIsolated_positiveContactGroupmate)+" as groupmates of contact)")
#----------------------------------------
# Update the status of nodes who are to be isolated:
#----------------------------------------
numIsolated = 0
isolationGroup_symptomatic = isolationQueue_symptomatic.pop(0)
for isolationNode in isolationGroup_symptomatic:
model.set_isolation(isolationNode, True)
numIsolated += 1
isolationGroup_contact = isolationQueue_contact.pop(0)
for isolationNode in isolationGroup_contact:
model.set_isolation(isolationNode, True)
numIsolated += 1
isolationGroup_positive = isolationQueue_positive.pop(0)
for isolationNode in isolationGroup_positive:
model.set_isolation(isolationNode, True)
numIsolated += 1
print("\t"+str(numIsolated)+" entered isolation")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
interventionInterval = (interventionStartTime, model.t)
return interventionInterval
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
1693230
|
import requests
import json
import time
import os
import sys
from contextlib import contextmanager
from subprocess import Popen, check_output, CalledProcessError
import shlex
class Droplet:
def __init__(self, session, id, i_know_what_im_doing=False):
if not i_know_what_im_doing:
raise Exception(
"You shouldn't be calling this constructor directly")
self.session = session
self.valid = True
self._id = id['id']
self._process = id['process_id']
def _new(self, *args, **kwargs):
return type(self)(
self.session, *args, i_know_what_im_doing=True, **kwargs)
def _use(self):
if not self.valid:
raise DropletConsumed('{} already used!'.format(self))
self.valid = False
return self._mk_id()
def _mk_id(self):
return {'id': self._id, 'process_id': self._process}
def _renew(self, new_id):
assert not self.valid
assert self.session.pid == new_id['process_id']
self.valid = True
self._id = new_id['id']
def move(self, loc):
result_id = self.session._rpc("move", self.session.pid, self._use(),
to_location(loc))
self._renew(result_id)
def mix(self, other):
assert isinstance(other, type(self))
result_id = self.session._rpc("mix", self.session.pid, self._use(),
other._use())
return self._new(result_id)
def combine_into(self, other):
assert isinstance(other, type(self))
result_id = self.session._rpc("combine_into", self.session.pid,
self._use(), other._use())
return self._new(result_id)
def split(self):
id1, id2 = self.session._rpc("split", self.session.pid, self._use())
return (self._new(id1), self._new(id2))
def output(self, substance):
self.session._rpc("output", self.session.pid, substance, self._use())
def volume(self):
droplets = self.session.droplets()
return droplets[self._id]['volume']
def to_location(loc):
return {'y': loc[0], 'x': loc[1]}
class RPCError(Exception):
pass
class RequestError(Exception):
pass
class SessionError(Exception):
pass
class DropletConsumed(Exception):
pass
class Session:
json_headers = {'content-type': 'application/json'}
def __init__(self, endpoint, name):
self.endpoint = endpoint
self.next_id = 0
status_check = endpoint + '/status'
max_attempts = 10
for attempt in range(max_attempts):
try:
resp = requests.get(status_check)
if resp.status_code == requests.codes.ok:
break
except Exception as exn:
msg = 'Attempt {}: could not connect to {}'.format(
attempt + 1, status_check)
if attempt == max_attempts - 1:
raise RPCError(msg) from exn
print(msg)
time.sleep(0.5)
if resp.status_code != 200:
raise RPCError(
'Something is wrong with {}: got status code {}'.format(
status_check, resp.status_code))
self.pid = self._rpc('new_process', name)
def _rpc(self, method, *args, **kwargs):
if args and kwargs:
raise RPCError('Cannot have both args and kwargs')
request_id = self.next_id
self.next_id += 1
data = {
"jsonrpc": "2.0",
"id": request_id,
"method": method,
"params": args or kwargs,
}
try:
response = requests.post(
self.endpoint + '/rpc',
headers=Session.json_headers,
data=json.dumps(data),
)
except requests.RequestException as exn:
raise RequestError(
"Error calling method {}".format(method)) from exn
if response.status_code != requests.codes.ok:
raise RequestError("Response {} from server was not OK\n{}".format(
response.status_code, response.text))
resp_json = response.json()
assert resp_json['id'] == request_id
if 'result' in resp_json:
return resp_json['result']
else:
raise SessionError(resp_json['error'])
def prelude(self, starting_dict=None):
if starting_dict is None:
starting_dict = {}
else:
starting_dict = dict(starting_dict)
starting_dict['mix'] = self.mix
starting_dict['split'] = self.split
starting_dict['create'] = self.create
starting_dict['droplets'] = self.droplets
starting_dict['_flush'] = self._flush
return starting_dict
def droplets(self):
dlist = self._rpc("droplet_info", self.pid)
return {d['id']['id']: d for d in dlist}
def _flush(self):
self._rpc("flush", self.pid)
def close(self):
self._rpc("close_process", self.pid)
def create(self, location, volume=1.0, dimensions=(1, 1), **kwargs):
droplet_class = kwargs.pop('droplet_class', Droplet)
result_id = self._rpc("create", self.pid,
to_location(location) if location else None,
volume,
to_location(dimensions) if dimensions else None)
return droplet_class(
self, result_id, **kwargs, i_know_what_im_doing=True)
def input(self, substance, volume, dimensions, **kwargs):
result_id = self._rpc("input", self.pid, substance, volume, dimensions)
return Droplet(self, result_id, **kwargs, i_know_what_im_doing=True)
def heat(self, droplet, temp, seconds, **kwargs):
result_id = self._rpc("heat", self.pid, droplet._use(), temp, seconds)
return Droplet(self, result_id, **kwargs, i_know_what_im_doing=True)
# just call the droplet methods
def move(self, droplet, *args, **kwargs):
return droplet.move(*args, **kwargs)
def mix(self, droplet, *args, **kwargs):
return droplet.mix(*args, **kwargs)
def combine_into(self, droplet, *args, **kwargs):
return droplet.combine_into(*args, **kwargs)
def split(self, droplet, *args, **kwargs):
return droplet.split(*args, **kwargs)
def output(self, substance, droplet, *args, **kwargs):
return droplet.output(substance, *args, **kwargs)
def call(cmd):
args = shlex.split(cmd)
output = check_output(args)
return output.decode('utf8').strip()
def project_path(path):
root = call('git rev-parse --show-toplevel')
return root + '/' + path
@contextmanager
def mk_session(
arch_file,
host='127.0.0.1',
port='3000',
profile='',
):
# make sure there aren't any puddle servers running now
try:
call('killall puddle-server')
except CalledProcessError:
pass
# this won't build the server, so make sure it's there
default_command = 'cargo run {profile} --bin puddle-server -- '
command = os.environ.get('PUDDLE_SERVER', default_command)
# build the server command and run it
flags = ' --static {static_dir} --address {host}:{port} --grid {arch_file}'
cmd = (command + flags).format(
cargo_toml=project_path('/src/core/Cargo.toml'),
profile=profile,
arch_file=arch_file,
static_dir=project_path('/src/web'),
host=host,
port=port,
)
print(cmd)
log_file = open('puddle.log', 'a')
popen = Popen(args=shlex.split(cmd), stdout=log_file, stderr=sys.stderr)
session = Session('http://{}:{}'.format(host, port), 'test')
yield session
# session._flush()
session.close()
popen.terminate()
popen.wait()
log_file.close()
|
1693334
|
class Solution:
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
from collections import Counter
out=even=sum(v for k,v in Counter(s).items() if v%2==0)
odd_big=[v for k,v in Counter(s).items() if v%2!=0 and v>1]
odd_small=[v for k,v in Counter(s).items() if v==1]
if len(odd_big)==1: out+=odd_big[0]
else:
out+=sum(odd_big)-len(odd_big)+1
if len(odd_small)==0 and len(odd_big)==0: out-=1
return out
|
1693336
|
import time
import mock
from datetime import datetime, timedelta
from chalice import logs
from chalice.awsclient import TypedAWSClient
from six import StringIO
NO_OPTIONS = logs.LogRetrieveOptions()
def message(log_message, log_stream_name='logStreamName'):
return {
'logStreamName': log_stream_name,
'message': log_message,
}
def test_can_convert_since_to_start_time():
options = logs.LogRetrieveOptions.create(
follow=True, since='2020-01-01T00:00:00',
include_lambda_messages=False)
assert options.max_entries is None
assert options.start_time == datetime(2020, 1, 1, 0, 0, 0)
assert not options.include_lambda_messages
def test_can_retrieve_all_logs():
client = mock.Mock(spec=TypedAWSClient)
log_message = message('first')
client.iter_log_events.return_value = [log_message]
retriever = logs.LogRetriever(client, 'loggroup')
messages = list(retriever.retrieve_logs(NO_OPTIONS))
expected = log_message.copy()
# We also inject a logShortId.
expected['logShortId'] = 'logStreamName'
assert messages == [expected]
def test_can_support_max_entries():
client = mock.Mock(spec=TypedAWSClient)
client.iter_log_events.return_value = [message('first'), message('second')]
retriever = logs.LogRetriever(client, 'loggroup')
messages = list(
retriever.retrieve_logs(logs.LogRetrieveOptions(max_entries=1)))
assert len(messages) == 1
assert messages[0]['message'] == 'first'
def test_can_exclude_lambda_messages():
client = mock.Mock(spec=TypedAWSClient)
client.iter_log_events.return_value = [
message('START RequestId: id Version: $LATEST'),
message('END RequestId: id'),
message('REPORT RequestId: id Duration: 0.42 ms '
'Billed Duration: 100 ms '
'Memory Size: 128 MB Max Memory Used: 19 MB'),
message('Not a lambda message'),
]
retriever = logs.LogRetriever(client, 'loggroup')
messages = list(retriever.retrieve_logs(
logs.LogRetrieveOptions(include_lambda_messages=False)))
assert len(messages) == 1
assert messages[0]['message'] == 'Not a lambda message'
def test_can_parse_short_id():
log_message = message(
'Log Message',
'2017/04/28/[$LATEST]fc219a0d613b40e9b5c58e6b8fd2320c'
)
client = mock.Mock(spec=TypedAWSClient)
client.iter_log_events.return_value = [log_message]
retriever = logs.LogRetriever(client, 'loggroup')
messages = list(retriever.retrieve_logs(
logs.LogRetrieveOptions(include_lambda_messages=False)))
assert len(messages) == 1
assert messages[0]['logShortId'] == 'fc219a'
def test_can_create_from_arn():
retriever = logs.LogRetriever.create_from_lambda_arn(
mock.sentinel.client,
'arn:aws:lambda:us-east-1:123:function:my-function'
)
assert isinstance(retriever, logs.LogRetriever)
def test_can_display_logs():
retriever = mock.Mock(spec=logs.LogRetriever)
retriever.retrieve_logs.return_value = [
{'timestamp': 'NOW', 'logShortId': 'shortId', 'message': 'One'},
{'timestamp': 'NOW', 'logShortId': 'shortId', 'message': 'Two'},
{'timestamp': 'NOW', 'logShortId': 'shortId', 'message': 'Three'},
]
stream = StringIO()
logs.display_logs(retriever, retrieve_options=NO_OPTIONS, stream=stream)
assert stream.getvalue().splitlines() == [
'NOW shortId One',
'NOW shortId Two',
'NOW shortId Three',
]
def test_can_iterate_through_all_log_events():
client = mock.Mock(spec=TypedAWSClient)
client.iter_log_events.return_value = [
{'timestamp': 'NOW', 'logShortId': 'shortId', 'message': 'One'},
{'timestamp': 'NOW', 'logShortId': 'shortId', 'message': 'Two'},
{'timestamp': 'NOW', 'logShortId': 'shortId', 'message': 'Three'},
]
event_gen = logs.LogEventGenerator(client)
assert list(event_gen.iter_log_events(
log_group_name='mygroup', options=NO_OPTIONS)) == [
{'timestamp': 'NOW', 'logShortId': 'shortId', 'message': 'One'},
{'timestamp': 'NOW', 'logShortId': 'shortId', 'message': 'Two'},
{'timestamp': 'NOW', 'logShortId': 'shortId', 'message': 'Three'},
]
def test_can_follow_log_events():
sleep = mock.Mock(spec=time.sleep)
client = mock.Mock(spec=TypedAWSClient)
client.filter_log_events.side_effect = [
# First page of results has nextToken indicating there's
# more results.
{'events': [{'eventId': '1', 'timestamp': 1},
{'eventId': '2', 'timestamp': 2},
{'eventId': '3', 'timestamp': 3}],
'nextToken': 'nextToken1'},
# Second page with no more results, also note the
# timestamps are out of order.
{'events': [{'eventId': '4', 'timestamp': 4},
{'eventId': '6', 'timestamp': 6},
{'eventId': '5', 'timestamp': 5}]},
# We then poll again with no new results for timestamp=6.
{'events': [{'eventId': '6', 'timestamp': 6}]},
# And now we get new results.
{'events': [{'eventId': '6', 'timestamp': 6},
# Same timestamp we're querying (6) but a new event.
{'eventId': '6NEW', 'timestamp': 6},
{'eventId': '7', 'timestamp': 7},
{'eventId': '8', 'timestamp': 8}]},
KeyboardInterrupt(),
]
event_gen = logs.FollowLogEventGenerator(client, sleep)
options = logs.LogRetrieveOptions(start_time=1)
assert list(event_gen.iter_log_events(
log_group_name='mygroup', options=options)) == [
{'eventId': '1', 'timestamp': 1},
{'eventId': '2', 'timestamp': 2},
{'eventId': '3', 'timestamp': 3},
{'eventId': '4', 'timestamp': 4},
# Note we don't try to sort these entries.
{'eventId': '6', 'timestamp': 6},
{'eventId': '5', 'timestamp': 5},
{'eventId': '6NEW', 'timestamp': 6},
{'eventId': '7', 'timestamp': 7},
{'eventId': '8', 'timestamp': 8},
]
assert client.filter_log_events.call_args_list == [
mock.call(log_group_name='mygroup', start_time=1),
mock.call(log_group_name='mygroup', start_time=1,
next_token='nextToken1'),
mock.call(log_group_name='mygroup', start_time=6),
mock.call(log_group_name='mygroup', start_time=6),
mock.call(log_group_name='mygroup', start_time=8),
]
def test_follow_logs_initially_empty():
sleep = mock.Mock(spec=time.sleep)
client = mock.Mock(spec=TypedAWSClient)
client.filter_log_events.side_effect = [
{'events': []},
{'events': []},
{'events': [{'eventId': '1', 'timestamp': 1},
{'eventId': '2', 'timestamp': 2},
{'eventId': '3', 'timestamp': 3}]},
KeyboardInterrupt(),
]
event_gen = logs.FollowLogEventGenerator(client, sleep)
assert list(event_gen.iter_log_events(
log_group_name='mygroup', options=NO_OPTIONS)) == [
{'eventId': '1', 'timestamp': 1},
{'eventId': '2', 'timestamp': 2},
{'eventId': '3', 'timestamp': 3},
]
def test_follow_logs_single_pages_only():
sleep = mock.Mock(spec=time.sleep)
client = mock.Mock(spec=TypedAWSClient)
client.filter_log_events.side_effect = [
{'events': [{'eventId': '1', 'timestamp': 1}]},
{'events': [{'eventId': '2', 'timestamp': 2}]},
{'events': [{'eventId': '3', 'timestamp': 3}]},
KeyboardInterrupt(),
]
event_gen = logs.FollowLogEventGenerator(client, sleep)
assert list(event_gen.iter_log_events(
log_group_name='mygroup', options=NO_OPTIONS)) == [
{'eventId': '1', 'timestamp': 1},
{'eventId': '2', 'timestamp': 2},
{'eventId': '3', 'timestamp': 3},
]
def test_follow_logs_last_page_empty():
sleep = mock.Mock(spec=time.sleep)
client = mock.Mock(spec=TypedAWSClient)
client.filter_log_events.side_effect = [
{'events': [{'eventId': '1', 'timestamp': 1},
{'eventId': '2', 'timestamp': 2},
{'eventId': '3', 'timestamp': 3}],
'nextToken': 'nextToken1'},
{'events': [{'eventId': '4', 'timestamp': 4},
{'eventId': '6', 'timestamp': 6},
{'eventId': '5', 'timestamp': 5}],
'nextToken': 'nextToken2'},
# You can sometimes get a next token but with no events.
{'events': [], 'nextToken': 'nextToken3'},
{'events': []},
{'events': [{'eventId': '7', 'timestamp': 7}]},
KeyboardInterrupt(),
]
event_gen = logs.FollowLogEventGenerator(client, sleep)
options = logs.LogRetrieveOptions(start_time=1)
assert list(event_gen.iter_log_events(
log_group_name='mygroup', options=options)) == [
{'eventId': '1', 'timestamp': 1},
{'eventId': '2', 'timestamp': 2},
{'eventId': '3', 'timestamp': 3},
{'eventId': '4', 'timestamp': 4},
{'eventId': '6', 'timestamp': 6},
{'eventId': '5', 'timestamp': 5},
{'eventId': '7', 'timestamp': 7},
]
assert client.filter_log_events.call_args_list == [
mock.call(log_group_name='mygroup', start_time=1),
mock.call(log_group_name='mygroup', start_time=1,
next_token='nextToken1'),
mock.call(log_group_name='mygroup', start_time=1,
next_token='nextToken2'),
mock.call(log_group_name='mygroup', start_time=1,
next_token='nextToken3'),
mock.call(log_group_name='mygroup', start_time=6),
mock.call(log_group_name='mygroup', start_time=7),
]
def test_follow_logs_all_pages_empty_with_pagination():
sleep = mock.Mock(spec=time.sleep)
client = mock.Mock(spec=TypedAWSClient)
client.filter_log_events.side_effect = [
{'events': [], 'nextToken': 'nextToken1'},
{'events': [], 'nextToken': 'nextToken2'},
{'events': [], 'nextToken': 'nextToken3'},
{'events': []},
KeyboardInterrupt(),
]
event_gen = logs.FollowLogEventGenerator(client, sleep)
options = logs.LogRetrieveOptions(start_time=1)
assert list(event_gen.iter_log_events(
log_group_name='mygroup', options=options)) == []
assert client.filter_log_events.call_args_list == [
mock.call(log_group_name='mygroup', start_time=1),
mock.call(log_group_name='mygroup', start_time=1,
next_token='nextToken1'),
mock.call(log_group_name='mygroup', start_time=1,
next_token='<PASSWORD>'),
mock.call(log_group_name='mygroup', start_time=1,
next_token='<PASSWORD>'),
# The last call should not use a next token.
mock.call(log_group_name='mygroup', start_time=1)
]
def test_follow_logs_defaults_to_ten_minutes():
# To avoid having to patch out/pass in utcnow(), we'll just make sure
# that the start_time used is more recent than 10 minutes from now.
# This is a safe assumption because we're saving the current time before
# we invoke iter_log_events().
ten_minutes = datetime.utcnow() - timedelta(minutes=10)
options = logs.LogRetrieveOptions.create(follow=True)
assert options.start_time >= ten_minutes
def test_dont_default_if_explicit_since_is_provided():
utcnow = datetime.utcnow()
options = logs.LogRetrieveOptions.create(follow=True, since=str(utcnow))
assert options.start_time == utcnow
|
1693357
|
import warnings
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
@treeano.register_node("normal_sample")
class NormalSampleNode(treeano.NodeImpl):
input_keys = ("mu", "sigma")
hyperparameter_names = ("deterministic",)
def compute_output(self, network, mu_vw, sigma_vw):
deterministic = network.find_hyperparameter(["deterministic"], False)
if deterministic:
res = mu_vw.variable
else:
# TODO look at shape of both mu and sigma
shape = mu_vw.shape
if any(s is None for s in shape):
# NOTE: this uses symbolic shape - can be an issue with
# theano.clone and random numbers
# https://groups.google.com/forum/#!topic/theano-users/P7Mv7Fg0kUs
warnings.warn("using symbolic shape for random number shape, "
"which can be an issue with theano.clone")
shape = mu_vw.variable.shape
# TODO save this state so that we can seed the rng
srng = MRG_RandomStreams()
res = srng.normal(shape,
avg=mu_vw.variable,
std=sigma_vw.variable,
dtype=fX)
network.create_vw(
"default",
variable=theano.gradient.disconnected_grad(res),
shape=mu_vw.shape,
tags={"output"},
)
@treeano.register_node("normal_REINFORCE")
class NormalREINFORCECostNode(treeano.NodeImpl):
"""
cost node to implement REINFORCE algorithm
include_baseline: whether or not to include a baseline network
backprop_baseline: whether or not to backprop the baseline update to
the rest of the network
"""
hyperparameter_names = ("include_baseline",
"backprop_baseline")
input_keys = ("state", "mu", "sigma", "reward", "sampled")
def compute_output(self,
network,
state_vw,
mu_vw,
sigma_vw,
reward_vw,
sampled_vw):
# want state to have dim (batch size x size of state)
assert state_vw.ndim == 2
# want mu to have dim (batch size x number of actions)
assert mu_vw.ndim == 2
state = state_vw.variable
mu = mu_vw.variable
sigma = sigma_vw.variable
reward = reward_vw.variable
sampled = sampled_vw.variable
# create reward baseline
bias = network.create_vw(
name="bias",
is_shared=True,
shape=(),
tags={"parameter", "bias"},
default_inits=[],
).variable
weight = network.create_vw(
name="weight",
is_shared=True,
shape=(state_vw.shape[1],),
tags={"parameter", "weight"},
default_inits=[],
).variable
if not network.find_hyperparameter(["backprop_baseline"], False):
state = theano.gradient.disconnected_grad(state)
baseline = ((weight.dimshuffle("x", 0) * state).sum(axis=1)
+ bias)
if not network.find_hyperparameter(["include_baseline"], True):
# to try REINFORCE without the baseline network
baseline = baseline * 0
# TODO monitor baseline
constant_baseline = theano.gradient.disconnected_grad(baseline)
# 1 / (sigma * sqrt(2 * pi)) * exp(-1/2 * ((t - mu) / sigma)^2)
normal_pdf = (1 / (sigma * treeano.utils.as_fX(np.sqrt(2 * np.pi)))
* T.exp(-0.5 * T.sqr((sampled - mu) / sigma)))
log_normal_pdf = T.log(normal_pdf)
R = reward - constant_baseline
# take sum of log pdf
reinforce_cost = -(R * log_normal_pdf.sum(axis=1)).sum()
# TODO add parameter as weight for baseline
baseline_cost = T.sum((reward - baseline) ** 2)
network.create_vw(
name="default",
# variable=reinforce_cost,
variable=reinforce_cost + baseline_cost,
shape=(),
tags={"output", "monitor"},
)
|
1693383
|
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def print_name(self):
pass
print("Hello my name is " + self.name)
class Dog:
def __init__(self, name):
self.name = name
self.tricks = []
def add_trick(self, trick):
self.tricks.append(trick)
class Mapping:
def __init__(self, iterable):
self.items_list = []
self.__update(iterable)
def update(self, iterable):
for item in iterable:
self.items_list.append(item)
__update = update
class MappingSubclass(Mapping):
def update(self, keys, values):
for item in zip(keys, values):
self.items_list.append(item)
def check():
p = Person("John", 36)
# p.print_name()
d = Dog('Fido')
d.add_trick('roll over')
# print(d.tricks)
m = Mapping('x')
m.update('y')
# print(m.items_list)
check()
|
1693385
|
from ..utils import TranspileTestCase
class SetComprehensionTests(TranspileTestCase):
def test_syntax(self):
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
s = {v**2 for v in x}
print(len(s))
print(1 in s)
print(4 in s)
print(9 in s)
print(16 in s)
print(25 in s)
""")
def test_method(self):
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
s = set(v**2 for v in x)
print(len(s))
print(1 in s)
print(4 in s)
print(9 in s)
print(16 in s)
print(25 in s)
""")
|
1693416
|
import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(max_eval_batches = 70,
batchsize_eval = 65536,
batchsize = 65536,
lr = 0.5,
warmup_steps = 300,
vvgpu = [[0,1,2,3,4,5,6,7]],
repeat_dataset = True)
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Raw,
source = ["./train_data.bin"],
eval_source = "./test_data.bin",
num_samples = 36672493,
eval_num_samples = 4584062,
check_type = hugectr.Check_t.Non)
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.SGD,
update_type = hugectr.Update_t.Local,
atomic_update = True)
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
dense_dim = 13, dense_name = "dense",
data_reader_sparse_param_array =
[hugectr.DataReaderSparseParam("data1", 2, False, 26)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.LocalizedSlotSparseEmbeddingOneHot,
slot_size_array = [1459, 583, 6373320, 1977439, 305, 24, 12513, 633, 3, 92719, 5681, 5666265, 3193, 27, 14986, 4209368, 10, 5652, 2173, 4, 5058596, 18, 15, 282062, 105, 141594],
workspace_size_per_gpu_in_mb = 11645,
embedding_vec_size = 128,
combiner = "sum",
sparse_embedding_name = "sparse_embedding1",
bottom_name = "data1",
optimizer = optimizer))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dense"],
top_names = ["fc1"],
num_output=512))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc1"],
top_names = ["relu1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["relu1"],
top_names = ["fc2"],
num_output=256))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc2"],
top_names = ["relu2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["relu2"],
top_names = ["fc3"],
num_output=128))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc3"],
top_names = ["relu3"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Interaction,
bottom_names = ["relu3","sparse_embedding1"],
top_names = ["interaction1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["interaction1"],
top_names = ["fc4"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc4"],
top_names = ["relu4"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["relu4"],
top_names = ["fc5"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc5"],
top_names = ["relu5"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["relu5"],
top_names = ["fc6"],
num_output=512))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc6"],
top_names = ["relu6"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["relu6"],
top_names = ["fc7"],
num_output=256))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc7"],
top_names = ["relu7"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["relu7"],
top_names = ["fc8"],
num_output=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss,
bottom_names = ["fc8", "label"],
top_names = ["loss"]))
model.compile()
model.summary()
model.fit(max_iter = 600, display = 50, eval_interval = 100, snapshot = 10000000, snapshot_prefix = "dlrm")
|
1693418
|
import pandas_flavor as pf
import pandas as pd
@pf.register_dataframe_method
def sort_column_value_order(
df: pd.DataFrame, column: str, column_value_order: dict, columns=None
) -> pd.DataFrame:
"""
This function adds precedence to certain values in a specified column, then
sorts based on that column and any other specified columns.
Functional usage syntax:
```python
import pandas as pd
import janitor as jn
df = pd.DataFrame(...)
jn.sort_column_value_order(
column,
column_value_order = {col1: number, ...}
columns
)
```
Method chaining usage syntax:
```python
import pandas as pd
import janitor
df.sort_column_value_order(
column,
column_value_order = {col1: number, ...}
columns
)
```
:param df: This is our DataFrame that we are manipulating
:param column: This is a column name as a string we are using to specify
which column to sort by
:param column_value_order: This is a dictionary of values that will
represent precedence of the values in the specified column
:param columns: This is a list of additional columns that we can sort by
:raises ValueError: raises error if chosen Column Name is not in
Dataframe, or if column_value_order dictionary is empty.
:return: This function returns a Pandas DataFrame
"""
if len(column_value_order) > 0:
if column in df.columns:
df["cond_order"] = df[column].replace(column_value_order)
if columns is None:
new_df = df.sort_values("cond_order")
del new_df["cond_order"]
else:
new_df = df.sort_values(columns + ["cond_order"])
del new_df["cond_order"]
return new_df
else:
raise ValueError("Column Name not in DataFrame")
else:
raise ValueError("column_value_order dictionary cannot be empty")
|
1693440
|
import json
import os
import platform
import sys
import threading
import traceback
from copy import deepcopy
from datetime import datetime
from string import lower
class AbstractLog(object):
# AbstractLog provides an ABC for the creation of a concrete Log class
def __init__(self, ToolProjectName, ToolProjectVersion, ToolName=None, ToolVersion=None, options={}, params={}):
# setup default options, deepcopy those passed by user, set missing with defaults, if any errors use defaults
def_opts = {'OutputDir': 'D:\\DSZOPSDisk\\logs',
'Prefix': 'concernedparent',
'Logging': True,
'Verbose': True,
'Debugging': True,
}
try:
self.options = deepcopy(options)
for k in def_opts:
if not self.options.has_key(k):
self.options[k] = def_opts[k]
except:
self.options = def_opts
self.notifyOfError('{options} passed were invalid and have been reset. Moving on.')
# setup default params, deepcopy those passed by user, set missing with defaults, if any errors use defaults
try:
def_pars = {'ToolProjectName': ToolProjectName,
'ToolProjectVersion': ToolProjectVersion,
'ToolName': ToolName if ToolName else ToolProjectName,
'ToolVersion': ToolVersion if ToolVersion else ToolProjectVersion,
'EventType': 'event',
}
except:
def_pars = {'ToolProjectName': 'Unknown', 'EventType': 'event'}
self.notifyOfError('A parameter passed was invalid. ToolProjectName set to Unknown. Moving on.')
try:
self.params = deepcopy(params)
for k in def_pars:
if not self.params.has_key(k):
self.params[k] = def_pars[k]
except:
self.params = def_pars
self.notifyOfError('{params} passed were invalid and have been reset. Moving on.')
# dispatcher will scavenge files deeper than specified path
# for the foreseeable future expect root of path to be D:\DSZOPSDisk
# by default we will try D:\DSZOPSDisk\logs and if that fails try cwd
try:
self.options['OutputDir'] = os.path.join(self.options['OutputDir'], self.options['Prefix'])
if not os.path.exists(self.options['OutputDir']):
os.mkdir(self.options['OutputDir'])
except:
try:
self.options['OutputDir'] = os.path.join(os.getcwd(), self.options['Prefix'])
if not os.path.exists(self.options['OutputDir']):
os.mkdir(self.options['OutputDir'])
except:
self.notifyOfError('Could not open a log output directory. Logging will be disabled.')
self.options['Logging'] = False
finally:
if self.options['Verbose'] and self.options['Logging']: print "Logging to " + self.options['OutputDir']
def __call__(self, ToolEvent='pulsed', ToolName=None, ToolVersion=None, Annotation=None, ToolStatus='notify', ToolProjectName=None, ToolProjectVersion=None, CommandLine=None, params={}):
try:
d = deepcopy(self.params)
for k in params:
d[k] = deepcopy(params[k])
d['ToolEvent'] = ToolEvent
d['ToolStatus'] = ToolStatus
if d.has_key('StartTime'): d['EventTime'] = d['StartTime']
else: d['EventTime'] = datetime.now()
if ToolProjectName: d['ToolProjectName'] = ToolProjectName
if ToolProjectVersion: d['ToolProjectVersion'] = ToolProjectVersion
if ToolName: d['ToolName'] = ToolName
if ToolVersion: d['ToolVersion'] = ToolVersion
if Annotation: d['Annotation'] = Annotation
if CommandLine: d['CommandLine'] = CommandLine
self.fromDICTwriteJSON(d)
except:
self.notifyOfError("Failed to generate output file. Parameters were:\n"+str(params))
def __getattr__(self,name):
return object.__getattribute__(self,lower(name))
def __setattr__(self,name,value):
object.__setattr__(self, lower(name), value)
if isinstance(self.__getattr__(lower(name)), self.AbstractLogType):
self.__getattr__(name).log = self
self.__getattr__(name).name = lower(name)
def __getitem__(self,key):
return self.__getattr__(key)
def __setitem__(self,key,value):
self.__setattr__(key, value)
def open(self): self('opened')
def close(self):
self.running = False
self('closed')
def pacemaker(self, timeout=60):
# This is a stand-alone heartbeat generator. To pulse from your own control loop,
# call your AbstractLog subclass instance event handler (e.g. AbstractLog['event']()
def __target(timeout=60):
if platform.uname()[0].lower() == "windows":
import win32con
import win32event
self.running = True
kill = win32event.CreateEvent(None, 1, 0, None)
pulse = win32event.CreateWaitableTimer(None, 0, None)
win32event.SetWaitableTimer(pulse, 0, timeout*1000, None, None, False)
while(self.running):
try:
result = win32event.WaitForMultipleObjects([kill, pulse], False, 1000)
# if kill signal received, break loop
if(result == win32con.WAIT_OBJECT_0): break
# elif timeout has passed, generate a pulse
elif(result == win32con.WAIT_OBJECT_0 + 1): self['event']()
except:
self.notifyOfError("Pacemaker shutdown. Heartbeats will not be generated.")
win32event.SetEvent(kill)
elif self.options['Verbose']: print "Pacemaker only supported in Windows at this time. "
try:
self.thread = threading.Thread(target=__target, args=(timeout,) )
self.thread.start()
except:
self.notifyOfError("Pacemaker thread exception. Heartbeats will not be generated.")
def basefilename(self):
return os.path.join(self.options['OutputDir'], self.options['Prefix'].lower()) + '.' + str(self.params['ToolProjectName']).lower() + "." + str(datetime.now().strftime("%Y%m%d%H%M%S"))
def notifyOfError(self,errorString=''):
# notifyOfError(errorString)
# Outputs reason for error and traceback stack to console.
if self.options['Verbose']: print "Error: " + errorString
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
if self.options['Debugging']: traceback.print_exception(exceptionType, exceptionValue, exceptionTraceback, limit=3, file=sys.stdout)
class __dtencoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime): return obj.isoformat(' ')
return json.JSONEncoder.default(self, obj)
def __dumps(self, obj):
return json.dumps(obj, cls=self.__dtencoder)
def fromDICTwriteJSON(self, params={}):
# Open a new file using the logging scheme specified.
# Dump JSON output of the parameters provided.
if self.options['Logging']:
try:
filename = self.basefilename() + '.json'
out = {}
for key in params: out[lower(key)] = params[key]
with open(filename, 'a') as f:
f.write(self.__dumps(out)+'\n')
except:
self.notifyOfError("Failed to generate output file. Parameters were:\n"+str(params))
try: os.remove(filename)
except: pass
class AbstractLogType(object):
def __init__(self, params={}):
# params is a dictionary of k,v pairs.
# log is the parent concrete AbstractLog instance that called this
self.__params = deepcopy(params)
self.__queue = {}
def __call__(self, ToolEvent='executed', ToolName=None, ToolVersion=None, Annotation=None, ToolStatus='notify', ToolProjectName=None, ToolProjectVersion=None, CommandLine=None, params={}):
# allows object to be called like a function via Object(...)
# implements a generic event logger for most applications, sends heartbeat by default
# capability can be employed as-is, extended, or overridden
try:
d = self.__params
for k in self.__queue:
d[k] = self.__queue[k]
self.__queue = {}
for k in params:
d[k] = params[k]
if not d.has_key('EventType'): d['EventType'] = self.name
self.log(ToolEvent, ToolName, ToolVersion, Annotation, ToolStatus, ToolProjectName, ToolProjectVersion, CommandLine, d)
except:
self.log.notifyOfError("Failed to generate output file. Parameters were:\n"+str(params))
def __getitem__(self, key):
# get a single item by dict reference
if self.__params.has_key(key): return self.__params[key]
else: return None
def __setitem__(self, key, value):
# set a single item by dict reference
self.__params[key] = deepcopy(value)
def queue(self, params={}):
# enqueue parameters for one-time output
try:
for key in params: self.__queue[key] = deepcopy(params[key])
except:
self.log.notifyOfError("Could not set queue by dictionary.")
def set(self, params={}):
# set parameters for permanent output
try:
for key in params: self.__params[key] = deepcopy(params[key])
except:
self.log.notifyOfError("Could not set params by dictionary.")
def open(self, params={}): self('opened', params=params)
def close(self, params={}): self('closed', params=params)
def start(self): self.queue({'StartTime': datetime.now()})
def stop(self): self.queue({'StopTime': datetime.now()})
|
1693484
|
from modeltranslation.translator import translator, TranslationOptions
from .models import Language, Keyword, KeywordSet, Place, Event, Offer, License
class LanguageTranslationOptions(TranslationOptions):
fields = ('name',)
translator.register(Language, LanguageTranslationOptions)
class KeywordTranslationOptions(TranslationOptions):
fields = ('name',)
translator.register(Keyword, KeywordTranslationOptions)
class KeywordSetTranslationOptions(TranslationOptions):
fields = ('name',)
translator.register(KeywordSet, KeywordTranslationOptions)
class PlaceTranslationOptions(TranslationOptions):
fields = ('name', 'description', 'info_url', 'street_address', 'address_locality', 'telephone')
translator.register(Place, PlaceTranslationOptions)
class EventTranslationOptions(TranslationOptions):
fields = ('name', 'description', 'short_description', 'info_url',
'location_extra_info', 'headline', 'secondary_headline', 'provider',
'provider_contact_info')
translator.register(Event, EventTranslationOptions)
class OfferTranslationOptions(TranslationOptions):
fields = ('price', 'info_url', 'description')
translator.register(Offer, OfferTranslationOptions)
class LicenseTranslationOptions(TranslationOptions):
fields = ('name',)
translator.register(License, LicenseTranslationOptions)
|
1693512
|
from decouple import config
DATAPORTEN = {
"STUDY": {
"ENABLED": config("OW4_DP_STUDY_ENABLED", cast=bool, default=False),
"TESTING": config("OW4_DP_STUDY_TESTING", cast=bool, default=True),
"CLIENT_ID": config("OW4_DP_STUDY_CLIENT_ID", default=""),
"CLIENT_SECRET": config("OW4_DP_STUDY_CLIENT_SECRET", default=""),
"REDIRECT_URI": config("OW4_DP_STUDY_REDIRECT_URI", default=""),
"PROVIDER_URL": "https://auth.dataporten.no/oauth/token",
"SCOPES": ["openid", "userid-feide", "profile", "groups", "email"],
}
}
|
1693517
|
def test():
assert (
"patterns = list(nlp.pipe(people))" in __solution__
), "你有用list将nlp.pipe的结果变为列表吗?"
__msg__.good(
"干得漂亮!接下来我们看一个实际例子,用nlp.pipe来处理文档生成更多的元数据。"
)
|
1693548
|
from orator.migrations import Migration
class CreateSubredditsTable(Migration):
def up(self):
"""
Run the migrations.
"""
with self.schema.create('subreddits') as table:
table.increments('id')
table.timestamps()
table.text('subreddit').unique()
table.boolean('isEnabled')
table.boolean('isSandbox')
def down(self):
"""
Revert the migrations.
"""
self.schema.drop('subreddits')
|
1693576
|
import os
import config
import models
import gamestats
import message
import addgamestats
import basehandler
from wrap import PropWrap
from wrap import DictWrap
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext import db
class GameDetail(basehandler.BaseHandler):
def get(self):
# If a game key is specified, use that. If no game key and a player
# is specified, use the last game of that player. If neither, show
# a message to the user. The common error case is a non-logged in
# player, who hasn't specified a game.
# Get player name, if any
player_name = self.request.get('p')
# Get game key. If missing, get it from the player's last game
key_name = self.request.get('g')
if key_name == '':
# No game key name. Is there a player?
if player_name == '':
message.show(self, message.GAME_NOT_FOUND)
return
obj = models.PlayerModel.get(models.playermodel_key(player_name))
if not obj:
message.show(self, message.PLAYER_NOT_FOUND)
return
key_name = obj.last_game_key_name
if key_name == '':
message.show(self, message.PLAYER_NOT_PLAYED_GAME)
return
g, game_obj = gamestats.load_from_key_name(key_name, player_name,
load_players=True)
if not g:
message.show(self, message.GAME_NOT_FOUND)
return
if self.request.get('j') == '1':
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(g.json)
return
if self.request.get('i') == '1':
self.response.headers['Content-Type'] = 'text/plain'
for player_stat in g.player_stats:
ip = 'unknown'
if 'ip' in player_stat.__dict__ and len(player_stat.ip) != 0:
ip = player_stat.ip
did = 'unknown'
if 'did' in player_stat.__dict__ and len(player_stat.did) != 0:
did = player_stat.did
winner = ''
if player_stat.win_result == gamestats.PLAYER_RESULT_WIN:
winner = ' (winner)'
self.response.out.write('Player: %s ip: %s did: %s %s\n' % (player_stat.name, ip, did, winner))
if game_obj.dids:
self.response.out.write('\n')
self.response.out.write(game_obj.dids)
return
# Render the template and serve the response
template_values = {
'tabs': config.get_tabs(player_name),
'selected_tab': config.TAB_NONE,
'gamestats': g,
'units_built_sums': self.get_units_built_sums(g),
'computer_default_rating': gamestats.COMPUTER_DEFAULT_RATING,
'anonymous_default_rating': gamestats.ANONYMOUS_DEFAULT_RATING,
'player_default_rating': gamestats.PLAYER_DEFAULT_RATING,
'computer_avatar_url': config.COMPUTER_AVATAR_URL,
'anonymous_avatar_url': config.ANONYMOUS_AVATAR_URL,
'winner_image_url': config.WINNER_IMAGE_URL,
'chevron_image_url': config.CHEVRON_IMAGE_URL,
}
self.set_caching_headers(config.INFOPAGE_MAX_AGE_SECONDS)
self.response.headers['Content-Type'] = 'application/xhtml+xml'
path = os.path.join(os.path.dirname(__file__), 'gamedetail.xhtml')
self.response.out.write(template.render(path, template_values))
def get_units_built_sums(self, g):
units_built_sums = [0] * config.BUILT_COUNTS_MAX
for i in xrange(config.BUILT_COUNTS_MAX):
for player_stat in g.player_stats:
units_built_sums[i] += player_stat.winstats.built_counts[i]
return units_built_sums
|
1693582
|
from nose.tools import with_setup
import pandas as pd
from ..widget import utils as utils
from ..widget.encoding import Encoding
df = None
encoding = None
def _setup():
global df, encoding
records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': 12, "mystr": "alejandro", "mystr2": "1"},
{u'buildingID': 1, u'date': u'6/1/13', u'temp_diff': 0, "mystr": "alejandro", "mystr2": "1"},
{u'buildingID': 2, u'date': u'6/1/14', u'temp_diff': 11, "mystr": "alejandro", "mystr2": "1"},
{u'buildingID': 0, u'date': u'6/1/15', u'temp_diff': 5, "mystr": "alejandro", "mystr2": "1.0"},
{u'buildingID': 1, u'date': u'6/1/16', u'temp_diff': 19, "mystr": "alejandro", "mystr2": "1"},
{u'buildingID': 2, u'date': u'6/1/17', u'temp_diff': 32, "mystr": "alejandro", "mystr2": "1"}]
df = pd.DataFrame(records)
encoding = Encoding(chart_type="table", x="date", y="temp_diff")
def _teardown():
pass
@with_setup(_setup, _teardown)
def test_on_render_viz():
df["date"] = pd.to_datetime(df["date"])
df["mystr2"] = pd.to_numeric(df["mystr2"])
assert utils.infer_vegalite_type(df["buildingID"]) == "Q"
assert utils.infer_vegalite_type(df["date"]) == "T"
assert utils.infer_vegalite_type(df["temp_diff"]) == "Q"
assert utils.infer_vegalite_type(df["mystr"]) == "N"
assert utils.infer_vegalite_type(df["mystr2"]) == "Q"
def test_select_x():
assert utils.select_x(None) is None
def _check(d, expected):
x = utils.select_x(d)
assert x == expected
data = dict(col1=[1.0, 2.0, 3.0], # Q
col2=['A', 'B', 'C'], # N
col3=pd.date_range('2012', periods=3, freq='A')) # T
_check(data, 'col3')
data = dict(col1=[1.0, 2.0, 3.0], # Q
col2=['A', 'B', 'C']) # N
_check(data, 'col2')
data = dict(col1=[1.0, 2.0, 3.0]) # Q
_check(data, 'col1')
# Custom order
data = dict(col1=[1.0, 2.0, 3.0], # Q
col2=['A', 'B', 'C'], # N
col3=pd.date_range('2012', periods=3, freq='A'), # T
col4=pd.date_range('2012', periods=3, freq='A')) # T
selected_x = utils.select_x(data, ['N', 'T', 'Q', 'O'])
assert selected_x == "col2"
# Len < 1
assert utils.select_x(dict()) is None
def test_select_y():
def _check(d, expected):
x = 'col1'
y = utils.select_y(d, x)
assert y == expected
data = dict(col1=[1.0, 2.0, 3.0], # Chosen X
col2=['A', 'B', 'C'], # N
col3=pd.date_range('2012', periods=3, freq='A'), # T
col4=pd.date_range('2012', periods=3, freq='A'), # T
col5=[1.0, 2.0, 3.0]) # Q
_check(data, 'col5')
data = dict(col1=[1.0, 2.0, 3.0], # Chosen X
col2=['A', 'B', 'C'], # N
col3=pd.date_range('2012', periods=3, freq='A')) # T
_check(data, 'col2')
data = dict(col1=[1.0, 2.0, 3.0], # Chosen X
col2=pd.date_range('2012', periods=3, freq='A')) # T
_check(data, 'col2')
# No data
assert utils.select_y(None, "something") is None
# Len < 2
assert utils.select_y(dict(col1=[1.0, 2.0, 3.0]), "something") is None
# No x
assert utils.select_y(df, None) is None
# Custom order
data = dict(col1=[1.0, 2.0, 3.0], # Chosen X
col2=['A', 'B', 'C'], # N
col3=pd.date_range('2012', periods=3, freq='A'), # T
col4=pd.date_range('2012', periods=3, freq='A'), # T
col5=[1.0, 2.0, 3.0], # Q
col6=[1.0, 2.0, 3.0]) # Q
selected_x = 'col1'
selected_y = utils.select_y(data, selected_x, ['N', 'T', 'Q', 'O'])
assert selected_y == 'col2'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.